hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 958k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f72af6a3f7871c38684b0e461069b71876226a9b | 157 | py | Python | tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingAverage_Seasonal_Second_MLP.py | shaido987/pyaf | b9afd089557bed6b90b246d3712c481ae26a1957 | [
"BSD-3-Clause"
] | 377 | 2016-10-13T20:52:44.000Z | 2022-03-29T18:04:14.000Z | tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingAverage_Seasonal_Second_MLP.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 160 | 2016-10-13T16:11:53.000Z | 2022-03-28T04:21:34.000Z | tests/model_control/detailed/transf_None/model_control_one_enabled_None_MovingAverage_Seasonal_Second_MLP.py | ysdede/pyaf | b5541b8249d5a1cfdc01f27fdfd99b6580ed680b | [
"BSD-3-Clause"
] | 63 | 2017-03-09T14:51:18.000Z | 2022-03-27T20:52:57.000Z | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['MovingAverage'] , ['Seasonal_Second'] , ['MLP'] ); | 39.25 | 84 | 0.751592 | import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['None'] , ['MovingAverage'] , ['Seasonal_Second'] , ['MLP'] ); | true | true |
f72af6e888f158710810a4b5ed837ab592f4f7f4 | 3,251 | py | Python | tests/toolkit/utils.py | Devtography/ibpy_native | e3e2a406a8db9bb338953be6dc195b8099379acb | [
"Apache-2.0"
] | 6 | 2020-07-09T20:55:41.000Z | 2022-01-22T15:43:29.000Z | tests/toolkit/utils.py | Devtography/ibpy_native | e3e2a406a8db9bb338953be6dc195b8099379acb | [
"Apache-2.0"
] | 1 | 2021-02-28T13:37:43.000Z | 2021-02-28T13:37:43.000Z | tests/toolkit/utils.py | Devtography/ibpy_native | e3e2a406a8db9bb338953be6dc195b8099379acb | [
"Apache-2.0"
] | 5 | 2020-05-24T19:15:06.000Z | 2022-01-22T15:43:35.000Z | """Utilities for making unittests easier to write."""
# pylint: disable=protected-access
import asyncio
import os
import queue
from typing import Dict, List, Optional, Union
from ibapi import wrapper
from ibpy_native import error
from ibpy_native import models
from ibpy_native.interfaces import delegates
from ibpy_native.interfaces import listeners
from ibpy_native.utils import finishable_queue as fq
#region - General utils
def async_test(fn):
# pylint: disable=invalid-name
"""Decorator for testing the async functions."""
def fn_wrapper(*args, **kwargs):
loop = asyncio.new_event_loop()
return loop.run_until_complete(fn(*args, **kwargs))
return fn_wrapper
#endregion - General utils
#region - ibpy_native specific
# Constants
IB_HOST: str = os.getenv("IB_HOST", "127.0.0.1")
IB_PORT: int = int(os.getenv("IB_PORT", "4002"))
IB_CLIENT_ID: int = int(os.getenv("IB_CLIENT_ID", "1001"))
IB_ACC_ID: str = os.getenv("IB_ACC_ID", "")
class MockConnectionListener(listeners.ConnectionListener):
"""Mock connection listener."""
def __init__(self):
self.connected: Optional[bool] = None
def on_connected(self):
self.connected = True
def on_disconnected(self):
self.connected = False
class MockNotificationListener(listeners.NotificationListener):
"""Mock notification listener."""
def __init__(self):
self.msg_code = -1
self.msg = ""
def on_notify(self, msg_code: int, msg: str):
"""Mock callback implementation."""
self.msg_code = msg_code
self.msg = msg
class MockAccountsManagementDelegate(delegates.AccountsManagementDelegate):
"""Mock accounts delegate"""
def __init__(self):
self._account_list: Dict[str, models.Account] = {}
self._account_updates_queue: fq.FinishableQueue = fq.FinishableQueue(
queue_to_finish=queue.Queue()
)
@property
def accounts(self) -> Dict[str, models.Account]:
return self._account_list
@property
def account_updates_queue(self) -> fq.FinishableQueue:
return self._account_updates_queue
def on_account_list_update(self, account_list: List[str]):
for account_id in account_list:
self._account_list[account_id] = models.Account(account_id)
async def sub_account_updates(self, account: models.Account):
pass
async def unsub_account_updates(self):
pass
def on_disconnected(self):
pass
class MockLiveTicksListener(listeners.LiveTicksListener):
"""Mock notification listener"""
def __init__(self):
self.ticks: List[Union[wrapper.HistoricalTick,
wrapper.HistoricalTickBidAsk,
wrapper.HistoricalTickLast]] = []
self.finished = False
def on_tick_receive(self, req_id: int,
tick: Union[wrapper.HistoricalTick,
wrapper.HistoricalTickBidAsk,
wrapper.HistoricalTickLast,]):
self.ticks.append(tick)
def on_finish(self, req_id: int):
self.finished = True
def on_err(self, err: error.IBError):
raise err
#endregion - ibpy_native specific
| 30.669811 | 77 | 0.671486 |
import asyncio
import os
import queue
from typing import Dict, List, Optional, Union
from ibapi import wrapper
from ibpy_native import error
from ibpy_native import models
from ibpy_native.interfaces import delegates
from ibpy_native.interfaces import listeners
from ibpy_native.utils import finishable_queue as fq
def async_test(fn):
def fn_wrapper(*args, **kwargs):
loop = asyncio.new_event_loop()
return loop.run_until_complete(fn(*args, **kwargs))
return fn_wrapper
IB_HOST: str = os.getenv("IB_HOST", "127.0.0.1")
IB_PORT: int = int(os.getenv("IB_PORT", "4002"))
IB_CLIENT_ID: int = int(os.getenv("IB_CLIENT_ID", "1001"))
IB_ACC_ID: str = os.getenv("IB_ACC_ID", "")
class MockConnectionListener(listeners.ConnectionListener):
def __init__(self):
self.connected: Optional[bool] = None
def on_connected(self):
self.connected = True
def on_disconnected(self):
self.connected = False
class MockNotificationListener(listeners.NotificationListener):
def __init__(self):
self.msg_code = -1
self.msg = ""
def on_notify(self, msg_code: int, msg: str):
self.msg_code = msg_code
self.msg = msg
class MockAccountsManagementDelegate(delegates.AccountsManagementDelegate):
def __init__(self):
self._account_list: Dict[str, models.Account] = {}
self._account_updates_queue: fq.FinishableQueue = fq.FinishableQueue(
queue_to_finish=queue.Queue()
)
@property
def accounts(self) -> Dict[str, models.Account]:
return self._account_list
@property
def account_updates_queue(self) -> fq.FinishableQueue:
return self._account_updates_queue
def on_account_list_update(self, account_list: List[str]):
for account_id in account_list:
self._account_list[account_id] = models.Account(account_id)
async def sub_account_updates(self, account: models.Account):
pass
async def unsub_account_updates(self):
pass
def on_disconnected(self):
pass
class MockLiveTicksListener(listeners.LiveTicksListener):
def __init__(self):
self.ticks: List[Union[wrapper.HistoricalTick,
wrapper.HistoricalTickBidAsk,
wrapper.HistoricalTickLast]] = []
self.finished = False
def on_tick_receive(self, req_id: int,
tick: Union[wrapper.HistoricalTick,
wrapper.HistoricalTickBidAsk,
wrapper.HistoricalTickLast,]):
self.ticks.append(tick)
def on_finish(self, req_id: int):
self.finished = True
def on_err(self, err: error.IBError):
raise err
| true | true |
f72af7d6e7b04db16a0baa10f553c130371e0a1e | 1,561 | py | Python | __scraping__/comics.panini.it - scrapy/main-itemloader.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | __scraping__/comics.panini.it - scrapy/main-itemloader.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | __scraping__/comics.panini.it - scrapy/main-itemloader.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | #!/usr/bin/env python3
# date: 2019.08.06
# https://stackoverflow.com/questions/57366488/how-to-pass-the-single-link-in-a-nested-url-scrape
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
def clean(text):
text = text.replace('\xa0', ' ')
text = text.strip().split('\n')
text = ' '.join(x.strip() for x in text)
return text
class ComicscraperItem(scrapy.Item):
title = scrapy.Field(input_processor=MapCompose(clean))
link = scrapy.Field()
price = scrapy.Field(input_processor=MapCompose(clean))
class PaniniSpider(scrapy.Spider):
name = "spiderP"
start_urls = ["http://comics.panini.it/store/pub_ita_it/magazines.html"]
def parse(self, response):
for sel in response.xpath("//div[@class='list-group']//h3/a"):
l = ItemLoader(item=ComicscraperItem(), selector=sel)
l.add_xpath('title', './text()')
l.add_xpath('link', './@href')
request = scrapy.Request(sel.xpath('./@href').extract_first(), callback=self.parse_isbn, dont_filter=True)
request.meta['l'] = l
yield request
def parse_isbn(self, response):
l = response.meta['l']
l.add_value('price', response.xpath("//p[@class='special-price']//span/text()").get())
return l.load_item()
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'FEED_FORMAT': 'csv', # csv, json, xml
'FEED_URI': 'output.csv', #
})
c.crawl(PaniniSpider)
c.start()
| 31.22 | 118 | 0.643177 |
import scrapy
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
def clean(text):
text = text.replace('\xa0', ' ')
text = text.strip().split('\n')
text = ' '.join(x.strip() for x in text)
return text
class ComicscraperItem(scrapy.Item):
title = scrapy.Field(input_processor=MapCompose(clean))
link = scrapy.Field()
price = scrapy.Field(input_processor=MapCompose(clean))
class PaniniSpider(scrapy.Spider):
name = "spiderP"
start_urls = ["http://comics.panini.it/store/pub_ita_it/magazines.html"]
def parse(self, response):
for sel in response.xpath("//div[@class='list-group']//h3/a"):
l = ItemLoader(item=ComicscraperItem(), selector=sel)
l.add_xpath('title', './text()')
l.add_xpath('link', './@href')
request = scrapy.Request(sel.xpath('./@href').extract_first(), callback=self.parse_isbn, dont_filter=True)
request.meta['l'] = l
yield request
def parse_isbn(self, response):
l = response.meta['l']
l.add_value('price', response.xpath("//p[@class='special-price']//span/text()").get())
return l.load_item()
from scrapy.crawler import CrawlerProcess
c = CrawlerProcess({
'USER_AGENT': 'Mozilla/5.0',
'FEED_FORMAT': 'csv',
'FEED_URI': 'output.csv',
})
c.crawl(PaniniSpider)
c.start()
| true | true |
f72af7e4a722a6457a4e5bb9862634b05fb4b74c | 3,915 | py | Python | sendSMSSkillLambda/package/ask_sdk_model/interfaces/geolocation/altitude.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/interfaces/geolocation/altitude.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | null | null | null | sendSMSSkillLambda/package/ask_sdk_model/interfaces/geolocation/altitude.py | shneydor/aws-alexa-lambda-workshop | 0fa6b7067b04fc85c46b9ce1c2cc04554ed5baf4 | [
"Apache-2.0"
] | 1 | 2019-10-11T17:15:20.000Z | 2019-10-11T17:15:20.000Z | # coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class Altitude(object):
"""
An object containing the altitude information of the device.
:param altitude_in_meters: A double representing the altitude of the device in meters.
:type altitude_in_meters: (optional) float
:param accuracy_in_meters: A double representing the accuracy of the altitude measurement in meters.
:type accuracy_in_meters: (optional) float
"""
deserialized_types = {
'altitude_in_meters': 'float',
'accuracy_in_meters': 'float'
} # type: Dict
attribute_map = {
'altitude_in_meters': 'altitudeInMeters',
'accuracy_in_meters': 'accuracyInMeters'
} # type: Dict
def __init__(self, altitude_in_meters=None, accuracy_in_meters=None):
# type: (Optional[float], Optional[float]) -> None
"""An object containing the altitude information of the device.
:param altitude_in_meters: A double representing the altitude of the device in meters.
:type altitude_in_meters: (optional) float
:param accuracy_in_meters: A double representing the accuracy of the altitude measurement in meters.
:type accuracy_in_meters: (optional) float
"""
self.__discriminator_value = None # type: str
self.altitude_in_meters = altitude_in_meters
self.accuracy_in_meters = accuracy_in_meters
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, Altitude):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 34.043478 | 108 | 0.61507 |
import pprint
import re
import six
import typing
from enum import Enum
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
class Altitude(object):
deserialized_types = {
'altitude_in_meters': 'float',
'accuracy_in_meters': 'float'
}
attribute_map = {
'altitude_in_meters': 'altitudeInMeters',
'accuracy_in_meters': 'accuracyInMeters'
}
def __init__(self, altitude_in_meters=None, accuracy_in_meters=None):
self.__discriminator_value = None
self.altitude_in_meters = altitude_in_meters
self.accuracy_in_meters = accuracy_in_meters
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, Altitude):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72af7f234a3a7aaf0e57fc752f62d4dd0d648af | 38 | py | Python | frontend/GUI/ROOT_AND_MAIN/USER_WINDOW/USER_FRAME/callbacks.py | Lucianofc138/smart_scheduler_usm | 0ac50d71cfd1947b889a9551c31a3a67ecabfb88 | [
"MIT"
] | null | null | null | frontend/GUI/ROOT_AND_MAIN/USER_WINDOW/USER_FRAME/callbacks.py | Lucianofc138/smart_scheduler_usm | 0ac50d71cfd1947b889a9551c31a3a67ecabfb88 | [
"MIT"
] | null | null | null | frontend/GUI/ROOT_AND_MAIN/USER_WINDOW/USER_FRAME/callbacks.py | Lucianofc138/smart_scheduler_usm | 0ac50d71cfd1947b889a9551c31a3a67ecabfb88 | [
"MIT"
] | null | null | null | def new_user(user_stringvar):
pass | 19 | 29 | 0.763158 | def new_user(user_stringvar):
pass | true | true |
f72af8f3d31d026bd4517c8b3a0509701311dff5 | 4,016 | py | Python | netmiko/exercise4.py | Tes3awy/DevNet-DC | 03b4c7dc82221943bc25d0ab9d74ee2697fcc34c | [
"MIT"
] | null | null | null | netmiko/exercise4.py | Tes3awy/DevNet-DC | 03b4c7dc82221943bc25d0ab9d74ee2697fcc34c | [
"MIT"
] | null | null | null | netmiko/exercise4.py | Tes3awy/DevNet-DC | 03b4c7dc82221943bc25d0ab9d74ee2697fcc34c | [
"MIT"
] | null | null | null | # Export Nexus device show interface brief command output to
# an Excel file
import json
import xlsxwriter
from netmiko import ConnectHandler
# Devices to SSH into
devices = [
{
"device_type": "cisco_nxos",
"ip": "sbx-nxos-mgmt.cisco.com",
"username": "admin",
"password": "Admin_1234!",
"port": 8181,
"fast_cli": False,
"session_log": "nxos-exercise4.log",
},
{
"device_type": "cisco_nxos",
"ip": "192.168.90.46",
"username": "admin",
"password": "P@ssw0rd",
"fast_cli": False,
"session_log": "nxos-exercise4-1.log",
"verbose": True,
},
{
"device_type": "cisco_nxos",
"ip": "192.168.90.47",
"username": "admin",
"password": "P@ssw0rd",
"fast_cli": False,
"session_log": "nxos-exercise4-2.log",
"verbose": True,
},
]
# Create an Excel file
with xlsxwriter.Workbook(filename="Ex4-Nexus-Interfaces-Brief.xlsx") as workbook:
# Loop over each device
for device in devices:
# Connect to each device
with ConnectHandler(**device) as net_connect:
# Parse hostname of each device
hostname = net_connect.send_command(
command_string="show hostname", use_textfsm=True
)[0]["hostname"]
# Parse show interface brief of each device
intfs = net_connect.send_command(
command_string="show interface brief", use_textfsm=True
)
# Export interfaces to a JSON file for readability (Comment out if you don't need it)
with open(file=f"{hostname}-intfs-brief.json", mode="w") as outfile:
json.dump(obj=intfs, fp=outfile, indent=4, sort_keys=True)
# Create worksheets with the hostname of each device
worksheet = workbook.add_worksheet(f"{hostname} Interface Brief")
# Auto Filter for header line
worksheet.autofilter("A1:L1")
# Freeze top row and very left column only
worksheet.freeze_panes(1, 1)
# Header line
header_line = {
"A1": "Interface Name", # 1
"B1": "IP Address", # 2
"C1": "Interface Type", # 3
"D1": "Mode", # 4
"E1": "VLAN", # 5
"F1": "Port-Channel", # 6
"G1": "Speed", # 7
"H1": "Status", # 8
"I1": "MTU", # 9
"J1": "VRF", # 10
"K1": "Reason", # 11
"L1": "Description", # 12
}
# Format header line text
header_line_frmt = workbook.add_format(
{
"bold": True,
"align": "center",
"valign": "vcenter",
"bg_color": "#0058a0",
"font_color": "#FFFFFF",
}
)
# Write header line
for key, value in header_line.items():
worksheet.write(key, value, header_line_frmt)
# Initial Values for row and col
row = 1
col = 0
# Place data according to header line
for intf in intfs:
worksheet.write(row, col + 0, intf["interface"]) # Interface Name
worksheet.write(row, col + 1, intf["ip"]) # IP
worksheet.write(row, col + 2, intf["type"]) # Type
worksheet.write(row, col + 3, intf["mode"]) # Mode
worksheet.write(row, col + 4, intf["vlan"]) # VLAN
worksheet.write(row, col + 5, intf["portch"]) # Port-Channel
worksheet.write(row, col + 6, intf["speed"]) # Speed
worksheet.write(row, col + 7, intf["status"]) # Status
worksheet.write(row, col + 8, intf["mtu"]) # MTU
worksheet.write(row, col + 9, intf["vrf"]) # VRF
worksheet.write(row, col + 10, intf["reason"]) # Reason
worksheet.write(row, col + 11, intf["description"]) # Description
# Jump to next row
row += 1
print("Done")
| 34.033898 | 93 | 0.528884 |
import json
import xlsxwriter
from netmiko import ConnectHandler
devices = [
{
"device_type": "cisco_nxos",
"ip": "sbx-nxos-mgmt.cisco.com",
"username": "admin",
"password": "Admin_1234!",
"port": 8181,
"fast_cli": False,
"session_log": "nxos-exercise4.log",
},
{
"device_type": "cisco_nxos",
"ip": "192.168.90.46",
"username": "admin",
"password": "P@ssw0rd",
"fast_cli": False,
"session_log": "nxos-exercise4-1.log",
"verbose": True,
},
{
"device_type": "cisco_nxos",
"ip": "192.168.90.47",
"username": "admin",
"password": "P@ssw0rd",
"fast_cli": False,
"session_log": "nxos-exercise4-2.log",
"verbose": True,
},
]
with xlsxwriter.Workbook(filename="Ex4-Nexus-Interfaces-Brief.xlsx") as workbook:
for device in devices:
with ConnectHandler(**device) as net_connect:
hostname = net_connect.send_command(
command_string="show hostname", use_textfsm=True
)[0]["hostname"]
intfs = net_connect.send_command(
command_string="show interface brief", use_textfsm=True
)
with open(file=f"{hostname}-intfs-brief.json", mode="w") as outfile:
json.dump(obj=intfs, fp=outfile, indent=4, sort_keys=True)
# Create worksheets with the hostname of each device
worksheet = workbook.add_worksheet(f"{hostname} Interface Brief")
# Auto Filter for header line
worksheet.autofilter("A1:L1")
# Freeze top row and very left column only
worksheet.freeze_panes(1, 1)
# Header line
header_line = {
"A1": "Interface Name", # 1
"B1": "IP Address", # 2
"C1": "Interface Type", # 3
"D1": "Mode", # 4
"E1": "VLAN", # 5
"F1": "Port-Channel", # 6
"G1": "Speed", # 7
"H1": "Status", # 8
"I1": "MTU", # 9
"J1": "VRF", # 10
"K1": "Reason", # 11
"L1": "Description", # 12
}
# Format header line text
header_line_frmt = workbook.add_format(
{
"bold": True,
"align": "center",
"valign": "vcenter",
"bg_color": "#0058a0",
"font_color": "#FFFFFF",
}
)
# Write header line
for key, value in header_line.items():
worksheet.write(key, value, header_line_frmt)
# Initial Values for row and col
row = 1
col = 0
# Place data according to header line
for intf in intfs:
worksheet.write(row, col + 0, intf["interface"]) # Interface Name
worksheet.write(row, col + 1, intf["ip"]) # IP
worksheet.write(row, col + 2, intf["type"]) # Type
worksheet.write(row, col + 3, intf["mode"]) # Mode
worksheet.write(row, col + 4, intf["vlan"]) # VLAN
worksheet.write(row, col + 5, intf["portch"]) # Port-Channel
worksheet.write(row, col + 6, intf["speed"]) # Speed
worksheet.write(row, col + 7, intf["status"]) # Status
worksheet.write(row, col + 8, intf["mtu"]) # MTU
worksheet.write(row, col + 9, intf["vrf"]) # VRF
worksheet.write(row, col + 10, intf["reason"]) # Reason
worksheet.write(row, col + 11, intf["description"]) # Description
# Jump to next row
row += 1
print("Done")
| true | true |
f72af970ed2aadceab74dc301a14ce7e5a191b93 | 2,414 | py | Python | examples/adspygoogle/dfp/v201101/delete_custom_targeting_keys.py | hockeyprincess/google-api-dfp-python | efa82a8d85cbdc90f030db9d168790c55bd8b12a | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfp/v201101/delete_custom_targeting_keys.py | hockeyprincess/google-api-dfp-python | efa82a8d85cbdc90f030db9d168790c55bd8b12a | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfp/v201101/delete_custom_targeting_keys.py | hockeyprincess/google-api-dfp-python | efa82a8d85cbdc90f030db9d168790c55bd8b12a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example deletes a custom targeting key by its name. To determine which
custom targeting keys exist, run get_all_custom_targeting_keys_and_values.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle.dfp.DfpClient import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service. By default, the request is always made against
# sandbox environment.
custom_targeting_service = client.GetCustomTargetingService(
'https://sandbox.google.com', 'v201101')
key_name = 'INSERT_CUSTOM_TARGETING_KEY_NAME_HERE'
values = [{
'key': 'name',
'value': {
'xsi_type': 'TextValue',
'value': key_name
}
}]
filter_statement = {'query': 'WHERE name = :name',
'values': values}
# Get custom targeting keys.
keys = custom_targeting_service.GetCustomTargetingKeysByStatement(
filter_statement)[0]['results']
print 'Number of custom targeting keys to be deleted: %s' % len(keys)
if keys:
key_ids = [key['id'] for key in keys]
action = {'type': 'DeleteCustomTargetingKeyAction'}
filter_statement = {'query': 'WHERE id IN (%s)' % ', '.join(key_ids)}
# Delete custom targeting keys.
result = custom_targeting_service.PerformCustomTargetingKeyAction(
action, filter_statement)[0]
# Display results.
if result and result['numChanges'] > 0:
print 'Number of custom targeting keys deleted: %s' % result['numChanges']
else:
print 'No custom targeting keys were deleted.'
| 34.485714 | 80 | 0.718724 |
"""This example deletes a custom targeting key by its name. To determine which
custom targeting keys exist, run get_all_custom_targeting_keys_and_values.py."""
__author__ = 'api.sgrinberg@gmail.com (Stan Grinberg)'
import os
import sys
sys.path.append(os.path.join('..', '..', '..', '..'))
from adspygoogle.dfp.DfpClient import DfpClient
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
custom_targeting_service = client.GetCustomTargetingService(
'https://sandbox.google.com', 'v201101')
key_name = 'INSERT_CUSTOM_TARGETING_KEY_NAME_HERE'
values = [{
'key': 'name',
'value': {
'xsi_type': 'TextValue',
'value': key_name
}
}]
filter_statement = {'query': 'WHERE name = :name',
'values': values}
keys = custom_targeting_service.GetCustomTargetingKeysByStatement(
filter_statement)[0]['results']
print 'Number of custom targeting keys to be deleted: %s' % len(keys)
if keys:
key_ids = [key['id'] for key in keys]
action = {'type': 'DeleteCustomTargetingKeyAction'}
filter_statement = {'query': 'WHERE id IN (%s)' % ', '.join(key_ids)}
result = custom_targeting_service.PerformCustomTargetingKeyAction(
action, filter_statement)[0]
if result and result['numChanges'] > 0:
print 'Number of custom targeting keys deleted: %s' % result['numChanges']
else:
print 'No custom targeting keys were deleted.'
| false | true |
f72afbb1ae862f6cc33248e2ecf5c95000d6017c | 7,390 | py | Python | server/opendp_apps/dataset/dataset_formatter.py | opendifferentialprivacy/opendp-ux | 2669602d0a65f6a83d9e9916cbf753c38fd64c94 | [
"MIT"
] | null | null | null | server/opendp_apps/dataset/dataset_formatter.py | opendifferentialprivacy/opendp-ux | 2669602d0a65f6a83d9e9916cbf753c38fd64c94 | [
"MIT"
] | 82 | 2020-08-06T17:11:12.000Z | 2021-02-07T21:01:05.000Z | server/opendp_apps/dataset/dataset_formatter.py | opendifferentialprivacy/opendp-ux | 2669602d0a65f6a83d9e9916cbf753c38fd64c94 | [
"MIT"
] | 2 | 2020-10-16T22:03:24.000Z | 2020-11-15T22:45:19.000Z | """
Format a DataSetInfo for use in a JSON Release
"""
import json
from opendp_apps.dataset.models import DataSetInfo
from opendp_apps.dataset import static_vals as dstatic
from opendp_apps.model_helpers.basic_err_check import BasicErrCheck
from opendp_apps.model_helpers.basic_response import ok_resp, err_resp, BasicResponse
class DataSetFormatter(BasicErrCheck):
def __init__(self, dataset_info: DataSetInfo):
"""Init with a DataSetInfo object"""
assert isinstance(dataset_info, DataSetInfo), '"dataset_info" must be a DataSetInfo instance.'
self.dataset = dataset_info
self.formatted_info = {}
self.run_formatter()
def run_formatter(self):
"""
Format the dataset info
"""
if self.dataset.source == DataSetInfo.SourceChoices.UserUpload:
self.dataset = self.dataset.uploadfileinfo # Get the UploadFileInfo object
self.format_user_upload()
elif self.dataset.source == DataSetInfo.SourceChoices.Dataverse:
self.dataset = self.dataset.dataversefileinfo # Get the DataverseFileInfo object
self.format_dataverse_dataset()
else:
self.add_err_msg('Unknown dataset type: {self.dataset.source}')
return
def get_formatted_info(self, as_json=False):
"""
Return the formatted data
"""
assert self.has_error() is False,\
"Do not call this method before checking if \".has_error()\" is False"
if as_json:
return json.dumps(self.formatted_info, indent=4)
return self.formatted_info
def format_user_upload(self):
"""Format UserUpload dataset"""
if self.has_error():
return
ds_dict = {
'type': self.dataset.source,
'name': self.dataset.name,
'creator': self.dataset.creator,
'created': self.dataset.created,
}
self.formatted_info = ds_dict
def format_dataverse_dataset(self):
"""Format UserUpload dataset"""
if self.has_error():
return
# Pull citation from self.dataset.dataset_schema_info
#
citation_info = self.get_citation_from_dataset_schema_or_None()
if citation_info.success:
citation = citation_info.data
else:
self.add_err_msg(citation_info.message)
return
# Pull name from self.dataset.dataset_schema_info
#
name_info = self.get_name_from_dataset_schema()
if name_info.success:
ds_name = name_info.data
else:
self.add_err_msg(name_info.message)
return
# Format info in self.dataset.file_schema_info
#
file_info = self.get_file_info()
if file_info.success:
file_dict = file_info.data
else:
self.add_err_msg(file_info.message)
return
ds_dict = {
'type': self.dataset.source,
'name': self.dataset.name,
"citation": citation,
"doi": self.dataset.dataset_doi,
"identifier": self.get_dataset_identifier_or_none(),
'release_deposit_info': {
"deposited": False,
# if True, add: "release_url": "some-url"
# update with https://github.com/opendp/dpcreator/issues/34
# "release_urls": {
# "release_json": "http://dataverse.edu/some.json",
# "release_pdf": "http://dataverse.edu/some.pdf"
# }
},
'installation': {
"name": self.dataset.dv_installation.name,
"url": self.dataset.dv_installation.dataverse_url
},
"file_information": file_dict
}
self.formatted_info = ds_dict
def get_name_from_dataset_schema(self) -> BasicResponse:
"""
Return the "name" text from self.dataset_schema_info (a bit ugly...)
Trying to return string from: self.dataset.dataset_schema_info['name']
"""
if self.has_error():
# Shouldn't happen...
return err_resp(self.get_err_msg())
if not self.dataset.dataset_schema_info:
return err_resp('".dataset_schema_info" is empty')
if not 'name' in self.dataset.dataset_schema_info:
return err_resp('"name" not found in ".dataset_schema_info" not found')
ds_name = self.dataset.dataset_schema_info['name']
if not ds_name:
return err_resp('"name" within ".dataset_schema_info" is empty')
return ok_resp(ds_name)
def get_dataset_identifier_or_none(self):
"""Return the identifer within dataset_schema_info['identifer']"""
if '@id' in self.dataset.dataset_schema_info['@id']:
return elf.dataset.dataset_schema_info['@id']
return None
def get_citation_from_dataset_schema_or_None(self):
"""
Return the citation text from self.dataset_schema_info (a bit ugly...)
Trying to return string from: self.dataset.dataset_schema_info['citation'][0]
"""
if self.has_error():
# Shouldn't happen...
return err_resp(self.get_err_msg())
if not self.dataset.dataset_schema_info:
return err_resp('".dataset_schema_info" is empty')
if not 'citation' in self.dataset.dataset_schema_info:
return ok_resp(None)
# If the citation key is found, then do error checking....
if (not self.dataset.dataset_schema_info['citation']) or \
(not isinstance(self.dataset.dataset_schema_info['citation'], list)):
return err_resp('"citation" within ".dataset_schema_info" is empty or not a list')
if not 'text' in self.dataset.dataset_schema_info['citation'][0]:
return err_resp('"[\'citation\'][0][\'text\']" not found in ".dataset_schema_info"')
return ok_resp(self.dataset.dataset_schema_info['citation'][0]['text'])
def get_file_info(self):
"""
Return information from the "DataverseFileInfo.file_schema_info" field
Ideal:
{
"name": "crisis.tab"
"identifier": "https://doi.org/10.7910/DVN/OLD7MB/ZI4N3J",
"fileFormat": "text/tab-separated-values",
}
"""
if self.has_error():
# Shouldn't happen!
return err_resp(self.get_err_msg())
if not self.dataset.file_schema_info:
return err_resp('".file_schema_info" is empty')
file_dict = {}
if 'name' in self.dataset.file_schema_info:
file_dict['name'] = self.dataset.file_schema_info['name']
else:
return err_resp('"name" not found in ".file_schema_info" not found')
if 'identifier' in self.dataset.file_schema_info:
file_dict['identifier'] = self.dataset.file_schema_info['identifier']
else:
file_dict['identifier'] = None
if 'fileFormat' in self.dataset.file_schema_info:
file_dict['fileFormat'] = self.dataset.file_schema_info['fileFormat']
else:
file_dict['fileFormat'] = None
return ok_resp(file_dict)
| 34.858491 | 102 | 0.604195 | import json
from opendp_apps.dataset.models import DataSetInfo
from opendp_apps.dataset import static_vals as dstatic
from opendp_apps.model_helpers.basic_err_check import BasicErrCheck
from opendp_apps.model_helpers.basic_response import ok_resp, err_resp, BasicResponse
class DataSetFormatter(BasicErrCheck):
def __init__(self, dataset_info: DataSetInfo):
assert isinstance(dataset_info, DataSetInfo), '"dataset_info" must be a DataSetInfo instance.'
self.dataset = dataset_info
self.formatted_info = {}
self.run_formatter()
def run_formatter(self):
if self.dataset.source == DataSetInfo.SourceChoices.UserUpload:
self.dataset = self.dataset.uploadfileinfo
self.format_user_upload()
elif self.dataset.source == DataSetInfo.SourceChoices.Dataverse:
self.dataset = self.dataset.dataversefileinfo
self.format_dataverse_dataset()
else:
self.add_err_msg('Unknown dataset type: {self.dataset.source}')
return
def get_formatted_info(self, as_json=False):
assert self.has_error() is False,\
"Do not call this method before checking if \".has_error()\" is False"
if as_json:
return json.dumps(self.formatted_info, indent=4)
return self.formatted_info
def format_user_upload(self):
if self.has_error():
return
ds_dict = {
'type': self.dataset.source,
'name': self.dataset.name,
'creator': self.dataset.creator,
'created': self.dataset.created,
}
self.formatted_info = ds_dict
def format_dataverse_dataset(self):
if self.has_error():
return
citation_info = self.get_citation_from_dataset_schema_or_None()
if citation_info.success:
citation = citation_info.data
else:
self.add_err_msg(citation_info.message)
return
name_info = self.get_name_from_dataset_schema()
if name_info.success:
ds_name = name_info.data
else:
self.add_err_msg(name_info.message)
return
file_info = self.get_file_info()
if file_info.success:
file_dict = file_info.data
else:
self.add_err_msg(file_info.message)
return
ds_dict = {
'type': self.dataset.source,
'name': self.dataset.name,
"citation": citation,
"doi": self.dataset.dataset_doi,
"identifier": self.get_dataset_identifier_or_none(),
'release_deposit_info': {
"deposited": False,
},
'installation': {
"name": self.dataset.dv_installation.name,
"url": self.dataset.dv_installation.dataverse_url
},
"file_information": file_dict
}
self.formatted_info = ds_dict
def get_name_from_dataset_schema(self) -> BasicResponse:
if self.has_error():
return err_resp(self.get_err_msg())
if not self.dataset.dataset_schema_info:
return err_resp('".dataset_schema_info" is empty')
if not 'name' in self.dataset.dataset_schema_info:
return err_resp('"name" not found in ".dataset_schema_info" not found')
ds_name = self.dataset.dataset_schema_info['name']
if not ds_name:
return err_resp('"name" within ".dataset_schema_info" is empty')
return ok_resp(ds_name)
def get_dataset_identifier_or_none(self):
if '@id' in self.dataset.dataset_schema_info['@id']:
return elf.dataset.dataset_schema_info['@id']
return None
def get_citation_from_dataset_schema_or_None(self):
if self.has_error():
# Shouldn't happen...
return err_resp(self.get_err_msg())
if not self.dataset.dataset_schema_info:
return err_resp('".dataset_schema_info" is empty')
if not 'citation' in self.dataset.dataset_schema_info:
return ok_resp(None)
if (not self.dataset.dataset_schema_info['citation']) or \
(not isinstance(self.dataset.dataset_schema_info['citation'], list)):
return err_resp('"citation" within ".dataset_schema_info" is empty or not a list')
if not 'text' in self.dataset.dataset_schema_info['citation'][0]:
return err_resp('"[\'citation\'][0][\'text\']" not found in ".dataset_schema_info"')
return ok_resp(self.dataset.dataset_schema_info['citation'][0]['text'])
def get_file_info(self):
if self.has_error():
return err_resp(self.get_err_msg())
if not self.dataset.file_schema_info:
return err_resp('".file_schema_info" is empty')
file_dict = {}
if 'name' in self.dataset.file_schema_info:
file_dict['name'] = self.dataset.file_schema_info['name']
else:
return err_resp('"name" not found in ".file_schema_info" not found')
if 'identifier' in self.dataset.file_schema_info:
file_dict['identifier'] = self.dataset.file_schema_info['identifier']
else:
file_dict['identifier'] = None
if 'fileFormat' in self.dataset.file_schema_info:
file_dict['fileFormat'] = self.dataset.file_schema_info['fileFormat']
else:
file_dict['fileFormat'] = None
return ok_resp(file_dict)
| true | true |
f72afc6fd07bcfad6b0ce2194a5a5dfd54a13f25 | 9,191 | py | Python | 04_test.py | 500kg/learn2branch | 693d6f68def3ce290a0f5f289820e708019c019a | [
"MIT"
] | 248 | 2019-01-10T21:58:46.000Z | 2022-03-30T07:55:34.000Z | 04_test.py | 500kg/learn2branch | 693d6f68def3ce290a0f5f289820e708019c019a | [
"MIT"
] | 17 | 2018-10-09T19:17:25.000Z | 2022-02-27T07:33:11.000Z | 04_test.py | 500kg/learn2branch | 693d6f68def3ce290a0f5f289820e708019c019a | [
"MIT"
] | 66 | 2019-06-08T12:18:43.000Z | 2022-03-29T07:44:18.000Z | import os
import sys
import importlib
import argparse
import csv
import numpy as np
import time
import pickle
import pathlib
import gzip
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import svmrank
import utilities
from utilities_tf import load_batch_gcnn
def load_batch_flat(sample_files, feats_type, augment_feats, normalize_feats):
cand_features = []
cand_choices = []
cand_scoress = []
for i, filename in enumerate(sample_files):
cand_states, cand_scores, cand_choice = utilities.load_flat_samples(filename, feats_type, 'scores', augment_feats, normalize_feats)
cand_features.append(cand_states)
cand_choices.append(cand_choice)
cand_scoress.append(cand_scores)
n_cands_per_sample = [v.shape[0] for v in cand_features]
cand_features = np.concatenate(cand_features, axis=0).astype(np.float32, copy=False)
cand_choices = np.asarray(cand_choices).astype(np.int32, copy=False)
cand_scoress = np.concatenate(cand_scoress, axis=0).astype(np.float32, copy=False)
n_cands_per_sample = np.asarray(n_cands_per_sample).astype(np.int32, copy=False)
return cand_features, n_cands_per_sample, cand_choices, cand_scoress
def padding(output, n_vars_per_sample, fill=-1e8):
n_vars_max = tf.reduce_max(n_vars_per_sample)
output = tf.split(
value=output,
num_or_size_splits=n_vars_per_sample,
axis=1,
)
output = tf.concat([
tf.pad(
x,
paddings=[[0, 0], [0, n_vars_max - tf.shape(x)[1]]],
mode='CONSTANT',
constant_values=fill)
for x in output
], axis=0)
return output
def process(policy, dataloader, top_k):
mean_kacc = np.zeros(len(top_k))
n_samples_processed = 0
for batch in dataloader:
if policy['type'] == 'gcnn':
c, ei, ev, v, n_cs, n_vs, n_cands, cands, best_cands, cand_scores = batch
pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False))
# filter candidate variables
pred_scores = tf.expand_dims(tf.gather(tf.squeeze(pred_scores, 0), cands), 0)
elif policy['type'] == 'ml-competitor':
cand_feats, n_cands, best_cands, cand_scores = batch
# move to numpy
cand_feats = cand_feats.numpy()
n_cands = n_cands.numpy()
# feature normalization
cand_feats = (cand_feats - policy['feat_shift']) / policy['feat_scale']
pred_scores = policy['model'].predict(cand_feats)
# move back to TF
pred_scores = tf.convert_to_tensor(pred_scores.reshape((1, -1)), dtype=tf.float32)
# padding
pred_scores = padding(pred_scores, n_cands)
true_scores = padding(tf.reshape(cand_scores, (1, -1)), n_cands)
true_bestscore = tf.reduce_max(true_scores, axis=-1, keepdims=True)
assert all(true_bestscore.numpy() == np.take_along_axis(true_scores.numpy(), best_cands.numpy().reshape((-1, 1)), axis=1))
kacc = []
for k in top_k:
pred_top_k = tf.nn.top_k(pred_scores, k=k)[1].numpy()
pred_top_k_true_scores = np.take_along_axis(true_scores.numpy(), pred_top_k, axis=1)
kacc.append(np.mean(np.any(pred_top_k_true_scores == true_bestscore.numpy(), axis=1)))
kacc = np.asarray(kacc)
batch_size = int(n_cands.shape[0])
mean_kacc += kacc * batch_size
n_samples_processed += batch_size
mean_kacc /= n_samples_processed
return mean_kacc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='MILP instance type to process.',
choices=['setcover', 'cauctions', 'facilities', 'indset'],
)
parser.add_argument(
'-g', '--gpu',
help='CUDA GPU id (-1 for CPU).',
type=int,
default=0,
)
args = parser.parse_args()
print(f"problem: {args.problem}")
print(f"gpu: {args.gpu}")
os.makedirs("results", exist_ok=True)
result_file = f"results/{args.problem}_validation_{time.strftime('%Y%m%d-%H%M%S')}.csv"
seeds = [0, 1, 2, 3, 4]
gcnn_models = ['baseline']
other_models = ['extratrees_gcnn_agg', 'lambdamart_khalil', 'svmrank_khalil']
test_batch_size = 128
top_k = [1, 3, 5, 10]
problem_folders = {
'setcover': 'setcover/500r_1000c_0.05d',
'cauctions': 'cauctions/100_500',
'facilities': 'facilities/100_100_5',
'indset': 'indset/500_4',
}
problem_folder = problem_folders[args.problem]
if args.problem == 'setcover':
gcnn_models += ['mean_convolution', 'no_prenorm']
result_file = f"results/{args.problem}_test_{time.strftime('%Y%m%d-%H%M%S')}"
result_file = result_file + '.csv'
os.makedirs('results', exist_ok=True)
### TENSORFLOW SETUP ###
if args.gpu == -1:
os.environ['CUDA_VISIBLE_DEVICES'] = ''
else:
os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config)
tf.executing_eagerly()
test_files = list(pathlib.Path(f"data/samples/{problem_folder}/test").glob('sample_*.pkl'))
test_files = [str(x) for x in test_files]
print(f"{len(test_files)} test samples")
evaluated_policies = [['gcnn', model] for model in gcnn_models] + \
[['ml-competitor', model] for model in other_models]
fieldnames = [
'policy',
'seed',
] + [
f'acc@{k}' for k in top_k
]
with open(result_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for policy_type, policy_name in evaluated_policies:
print(f"{policy_type}:{policy_name}...")
for seed in seeds:
rng = np.random.RandomState(seed)
tf.set_random_seed(rng.randint(np.iinfo(int).max))
policy = {}
policy['name'] = policy_name
policy['type'] = policy_type
if policy['type'] == 'gcnn':
# load model
sys.path.insert(0, os.path.abspath(f"models/{policy['name']}"))
import model
importlib.reload(model)
del sys.path[0]
policy['model'] = model.GCNPolicy()
policy['model'].restore_state(f"trained_models/{args.problem}/{policy['name']}/{seed}/best_params.pkl")
policy['model'].call = tfe.defun(policy['model'].call, input_signature=policy['model'].input_signature)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.float32,
tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = load_batch_gcnn
else:
# load feature normalization parameters
try:
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/normalization.pkl", 'rb') as f:
policy['feat_shift'], policy['feat_scale'] = pickle.load(f)
except:
policy['feat_shift'], policy['feat_scale'] = 0, 1
# load model
if policy_name.startswith('svmrank'):
policy['model'] = svmrank.Model().read(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.txt")
else:
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.pkl", 'rb') as f:
policy['model'] = pickle.load(f)
# load feature specifications
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/feat_specs.pkl", 'rb') as f:
feat_specs = pickle.load(f)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = lambda x: load_batch_flat(x, feat_specs['type'], feat_specs['augment'], feat_specs['qbnorm'])
test_data = tf.data.Dataset.from_tensor_slices(test_files)
test_data = test_data.batch(test_batch_size)
test_data = test_data.map(lambda x: tf.py_func(
policy['batch_fun'], [x], policy['batch_datatypes']))
test_data = test_data.prefetch(2)
test_kacc = process(policy, test_data, top_k)
print(f" {seed} " + " ".join([f"acc@{k}: {100*acc:4.1f}" for k, acc in zip(top_k, test_kacc)]))
writer.writerow({
**{
'policy': f"{policy['type']}:{policy['name']}",
'seed': seed,
},
**{
f'acc@{k}': test_kacc[i] for i, k in enumerate(top_k)
},
})
csvfile.flush()
| 37.060484 | 158 | 0.586878 | import os
import sys
import importlib
import argparse
import csv
import numpy as np
import time
import pickle
import pathlib
import gzip
import tensorflow as tf
import tensorflow.contrib.eager as tfe
import svmrank
import utilities
from utilities_tf import load_batch_gcnn
def load_batch_flat(sample_files, feats_type, augment_feats, normalize_feats):
cand_features = []
cand_choices = []
cand_scoress = []
for i, filename in enumerate(sample_files):
cand_states, cand_scores, cand_choice = utilities.load_flat_samples(filename, feats_type, 'scores', augment_feats, normalize_feats)
cand_features.append(cand_states)
cand_choices.append(cand_choice)
cand_scoress.append(cand_scores)
n_cands_per_sample = [v.shape[0] for v in cand_features]
cand_features = np.concatenate(cand_features, axis=0).astype(np.float32, copy=False)
cand_choices = np.asarray(cand_choices).astype(np.int32, copy=False)
cand_scoress = np.concatenate(cand_scoress, axis=0).astype(np.float32, copy=False)
n_cands_per_sample = np.asarray(n_cands_per_sample).astype(np.int32, copy=False)
return cand_features, n_cands_per_sample, cand_choices, cand_scoress
def padding(output, n_vars_per_sample, fill=-1e8):
n_vars_max = tf.reduce_max(n_vars_per_sample)
output = tf.split(
value=output,
num_or_size_splits=n_vars_per_sample,
axis=1,
)
output = tf.concat([
tf.pad(
x,
paddings=[[0, 0], [0, n_vars_max - tf.shape(x)[1]]],
mode='CONSTANT',
constant_values=fill)
for x in output
], axis=0)
return output
def process(policy, dataloader, top_k):
mean_kacc = np.zeros(len(top_k))
n_samples_processed = 0
for batch in dataloader:
if policy['type'] == 'gcnn':
c, ei, ev, v, n_cs, n_vs, n_cands, cands, best_cands, cand_scores = batch
pred_scores = policy['model']((c, ei, ev, v, tf.reduce_sum(n_cs, keepdims=True), tf.reduce_sum(n_vs, keepdims=True)), tf.convert_to_tensor(False))
pred_scores = tf.expand_dims(tf.gather(tf.squeeze(pred_scores, 0), cands), 0)
elif policy['type'] == 'ml-competitor':
cand_feats, n_cands, best_cands, cand_scores = batch
cand_feats = cand_feats.numpy()
n_cands = n_cands.numpy()
cand_feats = (cand_feats - policy['feat_shift']) / policy['feat_scale']
pred_scores = policy['model'].predict(cand_feats)
pred_scores = tf.convert_to_tensor(pred_scores.reshape((1, -1)), dtype=tf.float32)
pred_scores = padding(pred_scores, n_cands)
true_scores = padding(tf.reshape(cand_scores, (1, -1)), n_cands)
true_bestscore = tf.reduce_max(true_scores, axis=-1, keepdims=True)
assert all(true_bestscore.numpy() == np.take_along_axis(true_scores.numpy(), best_cands.numpy().reshape((-1, 1)), axis=1))
kacc = []
for k in top_k:
pred_top_k = tf.nn.top_k(pred_scores, k=k)[1].numpy()
pred_top_k_true_scores = np.take_along_axis(true_scores.numpy(), pred_top_k, axis=1)
kacc.append(np.mean(np.any(pred_top_k_true_scores == true_bestscore.numpy(), axis=1)))
kacc = np.asarray(kacc)
batch_size = int(n_cands.shape[0])
mean_kacc += kacc * batch_size
n_samples_processed += batch_size
mean_kacc /= n_samples_processed
return mean_kacc
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'problem',
help='MILP instance type to process.',
choices=['setcover', 'cauctions', 'facilities', 'indset'],
)
parser.add_argument(
'-g', '--gpu',
help='CUDA GPU id (-1 for CPU).',
type=int,
default=0,
)
args = parser.parse_args()
print(f"problem: {args.problem}")
print(f"gpu: {args.gpu}")
os.makedirs("results", exist_ok=True)
result_file = f"results/{args.problem}_validation_{time.strftime('%Y%m%d-%H%M%S')}.csv"
seeds = [0, 1, 2, 3, 4]
gcnn_models = ['baseline']
other_models = ['extratrees_gcnn_agg', 'lambdamart_khalil', 'svmrank_khalil']
test_batch_size = 128
top_k = [1, 3, 5, 10]
problem_folders = {
'setcover': 'setcover/500r_1000c_0.05d',
'cauctions': 'cauctions/100_500',
'facilities': 'facilities/100_100_5',
'indset': 'indset/500_4',
}
problem_folder = problem_folders[args.problem]
if args.problem == 'setcover':
gcnn_models += ['mean_convolution', 'no_prenorm']
result_file = f"results/{args.problem}_test_{time.strftime('%Y%m%d-%H%M%S')}"
result_file = result_file + '.csv'
os.makedirs('results', exist_ok=True)
SIBLE_DEVICES'] = ''
else:
os.environ['CUDA_VISIBLE_DEVICES'] = f'{args.gpu}'
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
tf.enable_eager_execution(config)
tf.executing_eagerly()
test_files = list(pathlib.Path(f"data/samples/{problem_folder}/test").glob('sample_*.pkl'))
test_files = [str(x) for x in test_files]
print(f"{len(test_files)} test samples")
evaluated_policies = [['gcnn', model] for model in gcnn_models] + \
[['ml-competitor', model] for model in other_models]
fieldnames = [
'policy',
'seed',
] + [
f'acc@{k}' for k in top_k
]
with open(result_file, 'w', newline='') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for policy_type, policy_name in evaluated_policies:
print(f"{policy_type}:{policy_name}...")
for seed in seeds:
rng = np.random.RandomState(seed)
tf.set_random_seed(rng.randint(np.iinfo(int).max))
policy = {}
policy['name'] = policy_name
policy['type'] = policy_type
if policy['type'] == 'gcnn':
sys.path.insert(0, os.path.abspath(f"models/{policy['name']}"))
import model
importlib.reload(model)
del sys.path[0]
policy['model'] = model.GCNPolicy()
policy['model'].restore_state(f"trained_models/{args.problem}/{policy['name']}/{seed}/best_params.pkl")
policy['model'].call = tfe.defun(policy['model'].call, input_signature=policy['model'].input_signature)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.float32,
tf.float32, tf.int32, tf.int32, tf.int32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = load_batch_gcnn
else:
try:
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/normalization.pkl", 'rb') as f:
policy['feat_shift'], policy['feat_scale'] = pickle.load(f)
except:
policy['feat_shift'], policy['feat_scale'] = 0, 1
if policy_name.startswith('svmrank'):
policy['model'] = svmrank.Model().read(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.txt")
else:
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/model.pkl", 'rb') as f:
policy['model'] = pickle.load(f)
with open(f"trained_models/{args.problem}/{policy['name']}/{seed}/feat_specs.pkl", 'rb') as f:
feat_specs = pickle.load(f)
policy['batch_datatypes'] = [tf.float32, tf.int32, tf.int32, tf.float32]
policy['batch_fun'] = lambda x: load_batch_flat(x, feat_specs['type'], feat_specs['augment'], feat_specs['qbnorm'])
test_data = tf.data.Dataset.from_tensor_slices(test_files)
test_data = test_data.batch(test_batch_size)
test_data = test_data.map(lambda x: tf.py_func(
policy['batch_fun'], [x], policy['batch_datatypes']))
test_data = test_data.prefetch(2)
test_kacc = process(policy, test_data, top_k)
print(f" {seed} " + " ".join([f"acc@{k}: {100*acc:4.1f}" for k, acc in zip(top_k, test_kacc)]))
writer.writerow({
**{
'policy': f"{policy['type']}:{policy['name']}",
'seed': seed,
},
**{
f'acc@{k}': test_kacc[i] for i, k in enumerate(top_k)
},
})
csvfile.flush()
| true | true |
f72afd17d996c315fc23e466eee5e411f1188c6d | 5,980 | py | Python | duoSpider.py | susemm/books | 80e96dd0ef7309707b37b036c991d4b11a9bed0a | [
"Apache-2.0"
] | null | null | null | duoSpider.py | susemm/books | 80e96dd0ef7309707b37b036c991d4b11a9bed0a | [
"Apache-2.0"
] | null | null | null | duoSpider.py | susemm/books | 80e96dd0ef7309707b37b036c991d4b11a9bed0a | [
"Apache-2.0"
] | null | null | null | __author__ = 'vin@misday.com'
import sys, re, os, wx
from datetime import *
from urlparse import urlparse
from bs4 import BeautifulSoup
from pyvin.spider import Spider
from pyvin.core import Callbacks
reload(sys)
sys.setdefaultencoding('utf8')
class Special(Callbacks):
siteRoot = 'http://www.duokan.com'
(EVT_FIND_LINK, EVT_FIND_BOOK) = range(0, 2)
def __init__(self, proxyHost='', proxyAuthUser='', proxyAuthPswd=''):
Callbacks.__init__(self)
self.init([Special.EVT_FIND_LINK, Special.EVT_FIND_BOOK])
self.titles = {}
self.links = {}
self.authors = {}
self.callbacks = {'http://www.duokan.com/special': self.findBooks,
'http://www.duokan.com/book': self.findBook,
'http://www.duokan.com': self.findLinks,
# 'http://www.duokan.com/r/%E5%85%8D%E8%B4%B9%E4%B8%93%E5%8C%BA': self.finfLimitFree,
}
self.spider = Spider('Duokan Special')
if len(proxyHost) > 0:
self.spider.set_proxy(proxyHost, proxyAuthUser, proxyAuthPswd)
self.spider.add_callbacks(self.callbacks)
self.spider.add_urls([Special.siteRoot,
# 'http://www.duokan.com/r/%E5%85%8D%E8%B4%B9%E4%B8%93%E5%8C%BA'
])
def findLinks(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
list_nodes = self.soup.findAll('div', attrs={'class': 'u-aimg'})
if len(list_nodes) > 0:
list_node = list_nodes[0]
links = list_node.findAll('a')
# limit free read
link = links[0]
link = [Special.siteRoot + link['href']]
self.spider.add_urls(link)
self.dispatch(Special.EVT_FIND_LINK, link[0])
# limit free buy
# link = links[2]
# link = [Special.siteRoot + link['href']]
# self.spider.add_urls(link)
def finfLimitFree(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
list_nodes = self.soup.findAll('li', attrs={'class': 'u-bookitm1 j-bookitm'})
if len(list_nodes) > 0:
list_node = list_nodes[0]
links = list_node.findAll('a')
# limit free read
link = links[0]
link = [Special.siteRoot + link['href']]
self.spider.add_urls(link)
self.dispatch(Special.EVT_FIND_LINK, link[0])
def findBooks(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
book_nodes = self.soup.findAll('li', attrs={'class': 'u-bookitm1 j-bookitm'})
for item in book_nodes:
id = item['data-id']
if id:
title = item.find('a', attrs={'class': 'title'}).string
link = item.find('a', attrs={'class': 'title'})['href']
author = item.find('div', attrs={'class': 'u-author'}).find('span').string
self.titles[id] = title
self.links[id] = Special.siteRoot + link
self.authors[id] = author
self.dispatch(Special.EVT_FIND_BOOK, id, self.titles[id], self.authors[id], self.links[id])
return self.titles
def findBook(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
# id
# content = self.soup.find('meta', attrs={'name':'apple-itunes-app'})['content'].split('/')
# id = content[len(content) - 1]
# title
# descNode = self.soup.findAll('div', attrs={'class':'desc'})
# title = descNode[0].find('h3').string
# author
author = ''
# author = descNode[0].find('td', attrs={'class':'author'}).find('a').string
# link
# link = self.soup.find('div', attrs={'class':'cover', 'id':'cover-img'}).find('a')['href']
# link = DuokanSpecial.siteRoot + link
# self.dispatch(DuokanSpecial.ON_FIND_BOOK, id, title, author, link)
scriptNodes = self.soup.findAll('script', attrs={'type': 'text/javascript'})
for node in scriptNodes:
str = node.string
if str:
if str.find('window.dk_data') > 0:
start = str.index('=') + len('=')
end = str.index('window.dk_data.comments_url')
str = str[start:end]
# str = str.strip().lstrip()
str = str.replace('book_id :', '\'book_id\' :')
str = str.replace('book :', '\'book\' :')
str = str.replace('sid :', '\'sid\' :')
str = str.replace('id :', '\'id\' :')
str = str.replace('title : ', '\'title\' : u')
str = str.replace('old_price :', '\'old_price\' :')
str = str.replace('price :', '\'price\' :')
str = str.replace('cover :', '\'cover\' :')
str = str.replace('url :', '\'url\' :')
str = str.replace('webreader :', '\'webreader\' :')
str = str.replace('limited_time :', '\'limited_time\' :')
str = str.replace('authors : ', '\'authors\' : u')
# print str
dk_data = eval(str)
id = dk_data['book']['id']
title = dk_data['book']['title']
author = dk_data['book']['authors']
link = Special.siteRoot + dk_data['book']['url']
self.dispatch(Special.EVT_FIND_BOOK, id, title, author, link)
def start(self):
self.spider.start()
def stop(self):
self.spider.stop()
def getTitle(self):
return self.titles
def getLinks(self):
return self.links
def getAuthors(self):
return self.authors
if __name__ == "__main__":
special = Special()
special.start()
| 40.958904 | 111 | 0.527926 | __author__ = 'vin@misday.com'
import sys, re, os, wx
from datetime import *
from urlparse import urlparse
from bs4 import BeautifulSoup
from pyvin.spider import Spider
from pyvin.core import Callbacks
reload(sys)
sys.setdefaultencoding('utf8')
class Special(Callbacks):
siteRoot = 'http://www.duokan.com'
(EVT_FIND_LINK, EVT_FIND_BOOK) = range(0, 2)
def __init__(self, proxyHost='', proxyAuthUser='', proxyAuthPswd=''):
Callbacks.__init__(self)
self.init([Special.EVT_FIND_LINK, Special.EVT_FIND_BOOK])
self.titles = {}
self.links = {}
self.authors = {}
self.callbacks = {'http://www.duokan.com/special': self.findBooks,
'http://www.duokan.com/book': self.findBook,
'http://www.duokan.com': self.findLinks,
}
self.spider = Spider('Duokan Special')
if len(proxyHost) > 0:
self.spider.set_proxy(proxyHost, proxyAuthUser, proxyAuthPswd)
self.spider.add_callbacks(self.callbacks)
self.spider.add_urls([Special.siteRoot,
])
def findLinks(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
list_nodes = self.soup.findAll('div', attrs={'class': 'u-aimg'})
if len(list_nodes) > 0:
list_node = list_nodes[0]
links = list_node.findAll('a')
link = links[0]
link = [Special.siteRoot + link['href']]
self.spider.add_urls(link)
self.dispatch(Special.EVT_FIND_LINK, link[0])
def finfLimitFree(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
list_nodes = self.soup.findAll('li', attrs={'class': 'u-bookitm1 j-bookitm'})
if len(list_nodes) > 0:
list_node = list_nodes[0]
links = list_node.findAll('a')
link = links[0]
link = [Special.siteRoot + link['href']]
self.spider.add_urls(link)
self.dispatch(Special.EVT_FIND_LINK, link[0])
def findBooks(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
book_nodes = self.soup.findAll('li', attrs={'class': 'u-bookitm1 j-bookitm'})
for item in book_nodes:
id = item['data-id']
if id:
title = item.find('a', attrs={'class': 'title'}).string
link = item.find('a', attrs={'class': 'title'})['href']
author = item.find('div', attrs={'class': 'u-author'}).find('span').string
self.titles[id] = title
self.links[id] = Special.siteRoot + link
self.authors[id] = author
self.dispatch(Special.EVT_FIND_BOOK, id, self.titles[id], self.authors[id], self.links[id])
return self.titles
def findBook(self, url, response):
self.soup = BeautifulSoup(response, from_encoding='utf8')
author = ''
scriptNodes = self.soup.findAll('script', attrs={'type': 'text/javascript'})
for node in scriptNodes:
str = node.string
if str:
if str.find('window.dk_data') > 0:
start = str.index('=') + len('=')
end = str.index('window.dk_data.comments_url')
str = str[start:end]
str = str.replace('book_id :', '\'book_id\' :')
str = str.replace('book :', '\'book\' :')
str = str.replace('sid :', '\'sid\' :')
str = str.replace('id :', '\'id\' :')
str = str.replace('title : ', '\'title\' : u')
str = str.replace('old_price :', '\'old_price\' :')
str = str.replace('price :', '\'price\' :')
str = str.replace('cover :', '\'cover\' :')
str = str.replace('url :', '\'url\' :')
str = str.replace('webreader :', '\'webreader\' :')
str = str.replace('limited_time :', '\'limited_time\' :')
str = str.replace('authors : ', '\'authors\' : u')
dk_data = eval(str)
id = dk_data['book']['id']
title = dk_data['book']['title']
author = dk_data['book']['authors']
link = Special.siteRoot + dk_data['book']['url']
self.dispatch(Special.EVT_FIND_BOOK, id, title, author, link)
def start(self):
self.spider.start()
def stop(self):
self.spider.stop()
def getTitle(self):
return self.titles
def getLinks(self):
return self.links
def getAuthors(self):
return self.authors
if __name__ == "__main__":
special = Special()
special.start()
| false | true |
f72afdb37d0bc3631c2708300be0110723f46ee0 | 4,090 | py | Python | src/python/pants/ivy/ivy_subsystem.py | SergeKireev/pants | cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541 | [
"Apache-2.0"
] | 1 | 2020-08-26T03:30:31.000Z | 2020-08-26T03:30:31.000Z | src/python/pants/ivy/ivy_subsystem.py | SergeKireev/pants | cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541 | [
"Apache-2.0"
] | 1 | 2021-09-02T21:06:31.000Z | 2021-09-02T21:06:31.000Z | src/python/pants/ivy/ivy_subsystem.py | SergeKireev/pants | cd92c65aeb3dfdcee3e0946f2b68a301ef2f4541 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import urllib
from pants.java.distribution.distribution import DistributionLocator
from pants.subsystem.subsystem import Subsystem
class IvySubsystem(Subsystem):
"""Common configuration items for ivy tasks.
:API: public
"""
options_scope = 'ivy'
_DEFAULT_VERSION = '2.4.0'
_DEFAULT_URL = ('https://repo1.maven.org/maven2/'
'org/apache/ivy/ivy/'
'{version}/ivy-{version}.jar'.format(version=_DEFAULT_VERSION))
@classmethod
def register_options(cls, register):
super().register_options(register)
register('--http-proxy', advanced=True,
help='Specify a proxy URL for http requests.')
register('--https-proxy', advanced=True,
help='Specify a proxy URL for https requests.')
register('--bootstrap-jar-url', advanced=True, default=cls._DEFAULT_URL,
help='Location to download a bootstrap version of Ivy.')
register('--bootstrap-fetch-timeout-secs', type=int, advanced=True, default=10,
help='Timeout the fetch if the connection is idle for longer than this value.')
register('--ivy-profile', advanced=True, default=cls._DEFAULT_VERSION,
help='The version of ivy to fetch.')
register('--cache-dir', advanced=True, default=os.path.expanduser('~/.ivy2/pants'),
help='The default directory used for both the Ivy resolution and repository caches.'
'If you want to isolate the resolution cache from the repository cache, we '
'recommend setting both the --resolution-cache-dir and --repository-cache-dir '
'instead of using --cache-dir')
register('--resolution-cache-dir', advanced=True,
help='Directory to store Ivy resolution artifacts.')
register('--repository-cache-dir', advanced=True,
help='Directory to store Ivy repository artifacts.')
register('--ivy-settings', advanced=True,
help='Location of XML configuration file for Ivy settings.')
register('--bootstrap-ivy-settings', advanced=True,
help='Bootstrap Ivy XML configuration file.')
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (DistributionLocator,)
def http_proxy(self):
"""Set ivy to use an http proxy.
Expects a string of the form http://<host>:<port>
"""
if os.getenv('HTTP_PROXY'):
return os.getenv('HTTP_PROXY')
if os.getenv('http_proxy'):
return os.getenv('http_proxy')
return self.get_options().http_proxy
def https_proxy(self):
"""Set ivy to use an https proxy.
Expects a string of the form http://<host>:<port>
"""
if os.getenv('HTTPS_PROXY'):
return os.getenv('HTTPS_PROXY')
if os.getenv('https_proxy'):
return os.getenv('https_proxy')
return self.get_options().https_proxy
def extra_jvm_options(self):
extra_options = []
http_proxy = self.http_proxy()
if http_proxy:
host, port = self._parse_proxy_string(http_proxy)
extra_options.extend([
"-Dhttp.proxyHost={}".format(host),
"-Dhttp.proxyPort={}".format(port),
])
https_proxy = self.https_proxy()
if https_proxy:
host, port = self._parse_proxy_string(https_proxy)
extra_options.extend([
"-Dhttps.proxyHost={}".format(host),
"-Dhttps.proxyPort={}".format(port),
])
return extra_options
def _parse_proxy_string(self, proxy_string):
parse_result = urllib.parse.urlparse(proxy_string)
return parse_result.hostname, parse_result.port
def resolution_cache_dir(self):
if self.get_options().resolution_cache_dir:
return self.get_options().resolution_cache_dir
else:
return self.get_options().cache_dir
def repository_cache_dir(self):
if self.get_options().repository_cache_dir:
return self.get_options().repository_cache_dir
else:
return self.get_options().cache_dir
| 37.181818 | 97 | 0.674817 |
import os
import urllib
from pants.java.distribution.distribution import DistributionLocator
from pants.subsystem.subsystem import Subsystem
class IvySubsystem(Subsystem):
options_scope = 'ivy'
_DEFAULT_VERSION = '2.4.0'
_DEFAULT_URL = ('https://repo1.maven.org/maven2/'
'org/apache/ivy/ivy/'
'{version}/ivy-{version}.jar'.format(version=_DEFAULT_VERSION))
@classmethod
def register_options(cls, register):
super().register_options(register)
register('--http-proxy', advanced=True,
help='Specify a proxy URL for http requests.')
register('--https-proxy', advanced=True,
help='Specify a proxy URL for https requests.')
register('--bootstrap-jar-url', advanced=True, default=cls._DEFAULT_URL,
help='Location to download a bootstrap version of Ivy.')
register('--bootstrap-fetch-timeout-secs', type=int, advanced=True, default=10,
help='Timeout the fetch if the connection is idle for longer than this value.')
register('--ivy-profile', advanced=True, default=cls._DEFAULT_VERSION,
help='The version of ivy to fetch.')
register('--cache-dir', advanced=True, default=os.path.expanduser('~/.ivy2/pants'),
help='The default directory used for both the Ivy resolution and repository caches.'
'If you want to isolate the resolution cache from the repository cache, we '
'recommend setting both the --resolution-cache-dir and --repository-cache-dir '
'instead of using --cache-dir')
register('--resolution-cache-dir', advanced=True,
help='Directory to store Ivy resolution artifacts.')
register('--repository-cache-dir', advanced=True,
help='Directory to store Ivy repository artifacts.')
register('--ivy-settings', advanced=True,
help='Location of XML configuration file for Ivy settings.')
register('--bootstrap-ivy-settings', advanced=True,
help='Bootstrap Ivy XML configuration file.')
@classmethod
def subsystem_dependencies(cls):
return super().subsystem_dependencies() + (DistributionLocator,)
def http_proxy(self):
if os.getenv('HTTP_PROXY'):
return os.getenv('HTTP_PROXY')
if os.getenv('http_proxy'):
return os.getenv('http_proxy')
return self.get_options().http_proxy
def https_proxy(self):
if os.getenv('HTTPS_PROXY'):
return os.getenv('HTTPS_PROXY')
if os.getenv('https_proxy'):
return os.getenv('https_proxy')
return self.get_options().https_proxy
def extra_jvm_options(self):
extra_options = []
http_proxy = self.http_proxy()
if http_proxy:
host, port = self._parse_proxy_string(http_proxy)
extra_options.extend([
"-Dhttp.proxyHost={}".format(host),
"-Dhttp.proxyPort={}".format(port),
])
https_proxy = self.https_proxy()
if https_proxy:
host, port = self._parse_proxy_string(https_proxy)
extra_options.extend([
"-Dhttps.proxyHost={}".format(host),
"-Dhttps.proxyPort={}".format(port),
])
return extra_options
def _parse_proxy_string(self, proxy_string):
parse_result = urllib.parse.urlparse(proxy_string)
return parse_result.hostname, parse_result.port
def resolution_cache_dir(self):
if self.get_options().resolution_cache_dir:
return self.get_options().resolution_cache_dir
else:
return self.get_options().cache_dir
def repository_cache_dir(self):
if self.get_options().repository_cache_dir:
return self.get_options().repository_cache_dir
else:
return self.get_options().cache_dir
| true | true |
f72afdfc03221196ea9ceaf1098c9e1569cc1366 | 808 | py | Python | sampling/text.py | YoannDupont/corpus-sampling | 20fd993bc967fd499e88444d882472ba7598c197 | [
"MIT"
] | null | null | null | sampling/text.py | YoannDupont/corpus-sampling | 20fd993bc967fd499e88444d882472ba7598c197 | [
"MIT"
] | null | null | null | sampling/text.py | YoannDupont/corpus-sampling | 20fd993bc967fd499e88444d882472ba7598c197 | [
"MIT"
] | null | null | null | from pathlib import Path
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.RegexpTokenizer(r"([A-Z][A-Z0-9.]+|[0-9]+[,.][0-9]+|[cdjlmnst]'|qu'|[\w'-]+|\S)")
class Sentence:
def __init__(self, text, nth):
self.text = text
self.nth = nth
def __len__(self):
return len(tokenizer.tokenize(self.text))
@property
def id(self):
return self.nth
def contains_pos(self, postag):
return False
def count_pos(self, postag):
return 0
def read_corpus(path):
corpus = []
with open(path) as input_stream:
content = input_stream.read()
sents = [item.replace("\n", " ") for item in sent_tokenize(content)]
for nth, sent in enumerate(sents):
corpus.append(Sentence(sent, nth))
return corpus
| 22.444444 | 98 | 0.62005 | from pathlib import Path
import nltk
from nltk.tokenize import sent_tokenize
tokenizer = nltk.RegexpTokenizer(r"([A-Z][A-Z0-9.]+|[0-9]+[,.][0-9]+|[cdjlmnst]'|qu'|[\w'-]+|\S)")
class Sentence:
def __init__(self, text, nth):
self.text = text
self.nth = nth
def __len__(self):
return len(tokenizer.tokenize(self.text))
@property
def id(self):
return self.nth
def contains_pos(self, postag):
return False
def count_pos(self, postag):
return 0
def read_corpus(path):
corpus = []
with open(path) as input_stream:
content = input_stream.read()
sents = [item.replace("\n", " ") for item in sent_tokenize(content)]
for nth, sent in enumerate(sents):
corpus.append(Sentence(sent, nth))
return corpus
| true | true |
f72afeec1ac6435e1b1eedcbe12ee9db89f07d10 | 8,129 | py | Python | TDETestCases.py | GGSimmons1992/timeDelayEstimation | 007a04cbf02ef168d9ebfd4ac72fbbed1dc7cb2a | [
"MIT"
] | 3 | 2019-03-01T00:16:01.000Z | 2021-12-15T00:00:48.000Z | TDETestCases.py | GGSimmons1992/timeDelayEstimation | 007a04cbf02ef168d9ebfd4ac72fbbed1dc7cb2a | [
"MIT"
] | null | null | null | TDETestCases.py | GGSimmons1992/timeDelayEstimation | 007a04cbf02ef168d9ebfd4ac72fbbed1dc7cb2a | [
"MIT"
] | 2 | 2021-12-03T11:49:32.000Z | 2021-12-15T00:11:29.000Z | """
Compilation of functions used to make test cases
"""
import numpy as np
import random
pi=np.pi
Debugger=0
def PlaneWavePacket(Amp,k,omega,theta,sigma,x,y,SNR,length):
Vx=(omega/k)*np.cos(theta)
Vy=(omega/k)*np.sin(theta)
kx=k*np.cos(theta)
ky=k*np.sin(theta)
t=np.arange(length)-int(length/2)
sigmaPart=2*np.square(sigma)
sine=Amp*np.cos((kx*x)+(ky*y)-(omega*t))
#sine=(len(t)*[1.0]) (Uncomment to get wavepacket. Comment to just get gaussian)
packet=np.exp((-np.square(x-(Vx*t))/sigmaPart))*np.exp((-np.square(y-(Vy*t))/sigmaPart))
wavePacket=sine*packet
maxAmp=max(wavePacket)
noise=NoiseMaker(length,maxAmp,SNR)
wavePacket=wavePacket+noise
"""
t=np.arange(length)-int(length/2)
tshiftX=t-shiftX
tshiftY=t-shiftY
sine=Amp*np.sin(2*pi*fX*tshiftX)*np.sin(2*pi*fY*tshiftY)
Norm=1
packet=Norm*np.exp(-np.square(tshiftX)/(2*np.square(sigmaX)))*np.exp(-np.square(tshiftY)/(2*np.square(sigmaY)))
wavePacket=sine*packet
maxAmp=max(wavePacket)
noise=NoiseMaker(length,maxAmp,SNR)
if (Debugger!=0):
print (len(noise)-len(wavePacket))
wavePacket=wavePacket+noise
"""
return wavePacket
def ThreePointGenerator():
Amp=100.0
Base=20.0
V=Base/50.0
x=Base
#print "point 1:({},0)".format(x)
y=Base
#print "point 2:(0,{})".format(x)
theta=pi/4
f=100.0
omega=f
sigma=10.0
k=omega/V
Vx=V*np.cos(theta)
Vy=V*np.sin(theta)
#print "Vx={}".format(Vx)
#print "Vy={}".format(Vy)
SNR=10.0
length=1000
t=np.arange(length)-(int(length/2))
originPacket=PlaneWavePacket(Amp,k,omega,theta,sigma,0,0,SNR,length)
dxPointPacket=PlaneWavePacket(Amp,k,omega,theta,sigma,x,0,SNR,length)
dyPointPacket=PlaneWavePacket(Amp,k,omega,theta,sigma,0,y,SNR,length)
return originPacket,dxPointPacket,dyPointPacket,Vx,Vy,x,y,t
def NoiseMaker(length,Amp,SNR):
noise=np.array([0.0]*length)
for x in range(0,len(noise)):
noise[x]=random.gauss(0,Amp/SNR)
return noise
def ThreePoint_NRunGenerator(N):
#Creates an Nxlength matrix for 3 data points
#Meant to simulate a concatination of N runs for 3 points
for i in range(0,N):
origin,dxPoint,dyPoint,Vx,Vy,x,y,t=ThreePointGenerator()
if (i==0):
originMatrix=origin
xMatrix=dxPoint
yMatrix=dyPoint
else:
originMatrix=np.vstack((originMatrix,origin))
xMatrix=np.vstack((xMatrix,dxPoint))
yMatrix=np.vstack((yMatrix,dyPoint))
return originMatrix,xMatrix,yMatrix,Vx,Vy,x,y,t
class pixel:
def __init__(self,xCoor,yCoor,timeData):
#Initial Conditions
self.xCoor=xCoor
self.yCoor=yCoor
self.timeData=timeData
self.averageData=np.mean(timeData,axis=0)
if (isinstance(self.averageData,(list,tuple,np.ndarray))==0):
self.averageData=timeData
#dt and Correlation
self.dt=0.0
self.errorDT=0.0
self.Correlation=0.0
self.errorCorrelation=0.0
#Velocity
self.Vx=0.0
self.Vy=0.0
self.errorVX=0.0
self.errorVY=0.0
def dtAndCorrelation(self,dt,errorDT,Correlation,errorCorrelation):
self.dt=dt
self.errorDT=errorDT
self.Correlation=Correlation
self.errorCorrelation=errorCorrelation
def velocityRecorder(self,Vx,Vy,errorVX,errorVY):
self.Vx=Vx
self.Vy=Vy
self.errorVX=errorVX
self.errorVY=errorVY
def Printer(self):
x=self.xCoor
y=self.yCoor
dt=self.dt
errorDT=self.errorDT
Corre=self.Correlation
eCorre=self.errorCorrelation
Vx=self.Vx
errorVX=self.errorVX
Vy=self.Vy
errorVY=self.errorVY
print "Measurement at ({},{})".format(x,y)
print "dt={}+/-{}".format(dt,errorDT)
print "correlation={}+/-{}".format(Corre,eCorre)
print "Vx={}+/-{}".format(Vx,errorVX)
print "Vy={}+/-{}".format(Vy,errorVY)
def dtAnalyzer(self,theoryDT,maxDT,maxErrorDT):
theoryDiff=self.dt-theoryDT
maxDiff=self.dt-maxDT
if (theoryDT==0.0 and theoryDiff!=0):
theoryDT=0.001
if (maxDT==0.0 and maxDiff!=0):
maxDT=0.001
if (self.errorDT>=self.dt):
print "Imprecise dt Measurement"
if (self.Correlation<51):
print "Low Correlation"
if (self.errorCorrelation>=self.Correlation):
print "Imprecise Correlation"
if (theoryDiff==0.0):
print "dt measurement is exact to theory"
else:
print "dt measurement is {}% from theoryDT".format(theoryDiff*(100.0/theoryDT))
if (theoryDT<(self.dt-self.errorDT) or theoryDT>(self.dt+self.errorDT)):
print "theoryDT is outside of errorbars"
if (self.errorDT!=0):
print "{} errorbars from theoryDT".format(abs(theoryDiff)/self.errorDT)
if (maxDiff==0.0):
print "dt measurement is exact to maxChecker"
else:
print "dt measurement is {}% from maxDT".format((self.dt-maxDT)*(100.0/maxDT))
if ((maxDT+maxErrorDT)<(self.dt-self.errorDT) or (maxDT-maxErrorDT)>(self.dt+self.errorDT)):
print "maxDT is outside of errorbars"
if (self.errorDT!=0):
print "{} errorbars from <maxDT>".format(abs(maxDiff)/self.errorDT)
def velocityAnalyzer(self,theoryVX,maxVX,maxErrorVX,theoryVY,maxVY,maxErrorVY):
theoryXDiff=self.Vx-theoryVX
maxXDiff=self.Vx-maxVX
theoryYDiff=self.Vy-theoryVY
maxYDiff=self.Vy-maxVY
if (theoryVX==0.0 and theoryXDiff!=0):
theoryVX=0.001
if (maxVX==0.0 and maxXDiff!=0):
maxVX=0.001
if (theoryVY==0.0 and theoryYDiff!=0):
theoryVY=0.001
if (maxVY==0.0 and maxYDiff!=0):
maxVY=0.001
if (self.errorVX>=self.Vx):
print "Imprecise Vx Measurement"
if (theoryXDiff==0.0):
print "Vx measurement is exact to theory"
else:
print "Vx measurement is {}% from theoryVX".format((theoryXDiff)*(100.0/theoryVX))
if (theoryVX<(self.Vx-self.errorVX) or theoryVX>(self.Vx+self.errorVX)):
print "theoryVX is outside of errorbars"
if (self.errorVX!=0):
print "{} errorbars from theoryVX".format(abs(theoryXDiff)/self.errorVX)
if (maxXDiff==0.0):
print "Vx measurement is exact to maxChecker"
else:
print "Vx measurement is {}% from maxVX".format((maxXDiff)*(100.0/maxVX))
if ((maxVX+maxErrorVX)<(self.Vx-self.errorVX) or (maxVX-maxErrorVX)>(self.Vx+self.errorVX)):
print "maxVX is outside of errorbars"
if (self.errorVX!=0):
print "{} errorbars from <maxVX>".format(abs(maxXDiff)/self.errorVX)
if (self.errorVY>=self.Vy):
print "Imprecise Vy Measurement"
if (theoryYDiff==0.0):
print "Vy measurement is exact to theory"
else:
print "Vy measurement is {}% from theoryVY".format((theoryYDiff)*(100.0/theoryVY))
if (theoryVY<(self.Vy-self.errorVY) or theoryVY>(self.Vy+self.errorVY)):
print "theoryVY is outside of errorbars"
if (self.errorVY!=0):
print "{} errorbars from theoryVY".format(abs(theoryYDiff)/self.errorVY)
if (maxYDiff==0.0):
print "Vy measurement is exact to maxChecker"
else:
print "Vy measurement is {}% from maxVY".format((maxYDiff)*(100.0/maxVY))
if ((maxVY+maxErrorVY)<(self.Vy-self.errorVY) or (maxVY-maxErrorVY)>(self.Vy+self.errorVY)):
print "maxVY is outside of errorbars"
if (self.errorVY!=0):
print "{} errorbars from <maxVY>".format(abs(maxYDiff)/self.errorVY)
| 36.452915 | 115 | 0.598352 | """
Compilation of functions used to make test cases
"""
import numpy as np
import random
pi=np.pi
Debugger=0
def PlaneWavePacket(Amp,k,omega,theta,sigma,x,y,SNR,length):
Vx=(omega/k)*np.cos(theta)
Vy=(omega/k)*np.sin(theta)
kx=k*np.cos(theta)
ky=k*np.sin(theta)
t=np.arange(length)-int(length/2)
sigmaPart=2*np.square(sigma)
sine=Amp*np.cos((kx*x)+(ky*y)-(omega*t))
packet=np.exp((-np.square(x-(Vx*t))/sigmaPart))*np.exp((-np.square(y-(Vy*t))/sigmaPart))
wavePacket=sine*packet
maxAmp=max(wavePacket)
noise=NoiseMaker(length,maxAmp,SNR)
wavePacket=wavePacket+noise
"""
t=np.arange(length)-int(length/2)
tshiftX=t-shiftX
tshiftY=t-shiftY
sine=Amp*np.sin(2*pi*fX*tshiftX)*np.sin(2*pi*fY*tshiftY)
Norm=1
packet=Norm*np.exp(-np.square(tshiftX)/(2*np.square(sigmaX)))*np.exp(-np.square(tshiftY)/(2*np.square(sigmaY)))
wavePacket=sine*packet
maxAmp=max(wavePacket)
noise=NoiseMaker(length,maxAmp,SNR)
if (Debugger!=0):
print (len(noise)-len(wavePacket))
wavePacket=wavePacket+noise
"""
return wavePacket
def ThreePointGenerator():
Amp=100.0
Base=20.0
V=Base/50.0
x=Base
y=Base
theta=pi/4
f=100.0
omega=f
sigma=10.0
k=omega/V
Vx=V*np.cos(theta)
Vy=V*np.sin(theta)
SNR=10.0
length=1000
t=np.arange(length)-(int(length/2))
originPacket=PlaneWavePacket(Amp,k,omega,theta,sigma,0,0,SNR,length)
dxPointPacket=PlaneWavePacket(Amp,k,omega,theta,sigma,x,0,SNR,length)
dyPointPacket=PlaneWavePacket(Amp,k,omega,theta,sigma,0,y,SNR,length)
return originPacket,dxPointPacket,dyPointPacket,Vx,Vy,x,y,t
def NoiseMaker(length,Amp,SNR):
noise=np.array([0.0]*length)
for x in range(0,len(noise)):
noise[x]=random.gauss(0,Amp/SNR)
return noise
def ThreePoint_NRunGenerator(N):
for i in range(0,N):
origin,dxPoint,dyPoint,Vx,Vy,x,y,t=ThreePointGenerator()
if (i==0):
originMatrix=origin
xMatrix=dxPoint
yMatrix=dyPoint
else:
originMatrix=np.vstack((originMatrix,origin))
xMatrix=np.vstack((xMatrix,dxPoint))
yMatrix=np.vstack((yMatrix,dyPoint))
return originMatrix,xMatrix,yMatrix,Vx,Vy,x,y,t
class pixel:
def __init__(self,xCoor,yCoor,timeData):
self.xCoor=xCoor
self.yCoor=yCoor
self.timeData=timeData
self.averageData=np.mean(timeData,axis=0)
if (isinstance(self.averageData,(list,tuple,np.ndarray))==0):
self.averageData=timeData
self.dt=0.0
self.errorDT=0.0
self.Correlation=0.0
self.errorCorrelation=0.0
self.Vx=0.0
self.Vy=0.0
self.errorVX=0.0
self.errorVY=0.0
def dtAndCorrelation(self,dt,errorDT,Correlation,errorCorrelation):
self.dt=dt
self.errorDT=errorDT
self.Correlation=Correlation
self.errorCorrelation=errorCorrelation
def velocityRecorder(self,Vx,Vy,errorVX,errorVY):
self.Vx=Vx
self.Vy=Vy
self.errorVX=errorVX
self.errorVY=errorVY
def Printer(self):
x=self.xCoor
y=self.yCoor
dt=self.dt
errorDT=self.errorDT
Corre=self.Correlation
eCorre=self.errorCorrelation
Vx=self.Vx
errorVX=self.errorVX
Vy=self.Vy
errorVY=self.errorVY
print "Measurement at ({},{})".format(x,y)
print "dt={}+/-{}".format(dt,errorDT)
print "correlation={}+/-{}".format(Corre,eCorre)
print "Vx={}+/-{}".format(Vx,errorVX)
print "Vy={}+/-{}".format(Vy,errorVY)
def dtAnalyzer(self,theoryDT,maxDT,maxErrorDT):
theoryDiff=self.dt-theoryDT
maxDiff=self.dt-maxDT
if (theoryDT==0.0 and theoryDiff!=0):
theoryDT=0.001
if (maxDT==0.0 and maxDiff!=0):
maxDT=0.001
if (self.errorDT>=self.dt):
print "Imprecise dt Measurement"
if (self.Correlation<51):
print "Low Correlation"
if (self.errorCorrelation>=self.Correlation):
print "Imprecise Correlation"
if (theoryDiff==0.0):
print "dt measurement is exact to theory"
else:
print "dt measurement is {}% from theoryDT".format(theoryDiff*(100.0/theoryDT))
if (theoryDT<(self.dt-self.errorDT) or theoryDT>(self.dt+self.errorDT)):
print "theoryDT is outside of errorbars"
if (self.errorDT!=0):
print "{} errorbars from theoryDT".format(abs(theoryDiff)/self.errorDT)
if (maxDiff==0.0):
print "dt measurement is exact to maxChecker"
else:
print "dt measurement is {}% from maxDT".format((self.dt-maxDT)*(100.0/maxDT))
if ((maxDT+maxErrorDT)<(self.dt-self.errorDT) or (maxDT-maxErrorDT)>(self.dt+self.errorDT)):
print "maxDT is outside of errorbars"
if (self.errorDT!=0):
print "{} errorbars from <maxDT>".format(abs(maxDiff)/self.errorDT)
def velocityAnalyzer(self,theoryVX,maxVX,maxErrorVX,theoryVY,maxVY,maxErrorVY):
theoryXDiff=self.Vx-theoryVX
maxXDiff=self.Vx-maxVX
theoryYDiff=self.Vy-theoryVY
maxYDiff=self.Vy-maxVY
if (theoryVX==0.0 and theoryXDiff!=0):
theoryVX=0.001
if (maxVX==0.0 and maxXDiff!=0):
maxVX=0.001
if (theoryVY==0.0 and theoryYDiff!=0):
theoryVY=0.001
if (maxVY==0.0 and maxYDiff!=0):
maxVY=0.001
if (self.errorVX>=self.Vx):
print "Imprecise Vx Measurement"
if (theoryXDiff==0.0):
print "Vx measurement is exact to theory"
else:
print "Vx measurement is {}% from theoryVX".format((theoryXDiff)*(100.0/theoryVX))
if (theoryVX<(self.Vx-self.errorVX) or theoryVX>(self.Vx+self.errorVX)):
print "theoryVX is outside of errorbars"
if (self.errorVX!=0):
print "{} errorbars from theoryVX".format(abs(theoryXDiff)/self.errorVX)
if (maxXDiff==0.0):
print "Vx measurement is exact to maxChecker"
else:
print "Vx measurement is {}% from maxVX".format((maxXDiff)*(100.0/maxVX))
if ((maxVX+maxErrorVX)<(self.Vx-self.errorVX) or (maxVX-maxErrorVX)>(self.Vx+self.errorVX)):
print "maxVX is outside of errorbars"
if (self.errorVX!=0):
print "{} errorbars from <maxVX>".format(abs(maxXDiff)/self.errorVX)
if (self.errorVY>=self.Vy):
print "Imprecise Vy Measurement"
if (theoryYDiff==0.0):
print "Vy measurement is exact to theory"
else:
print "Vy measurement is {}% from theoryVY".format((theoryYDiff)*(100.0/theoryVY))
if (theoryVY<(self.Vy-self.errorVY) or theoryVY>(self.Vy+self.errorVY)):
print "theoryVY is outside of errorbars"
if (self.errorVY!=0):
print "{} errorbars from theoryVY".format(abs(theoryYDiff)/self.errorVY)
if (maxYDiff==0.0):
print "Vy measurement is exact to maxChecker"
else:
print "Vy measurement is {}% from maxVY".format((maxYDiff)*(100.0/maxVY))
if ((maxVY+maxErrorVY)<(self.Vy-self.errorVY) or (maxVY-maxErrorVY)>(self.Vy+self.errorVY)):
print "maxVY is outside of errorbars"
if (self.errorVY!=0):
print "{} errorbars from <maxVY>".format(abs(maxYDiff)/self.errorVY)
| false | true |
f72aff11df732c260aca806b126e282388a93204 | 4,897 | py | Python | seahub/api2/authentication.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 2 | 2017-06-21T09:46:55.000Z | 2018-05-30T10:07:32.000Z | seahub/api2/authentication.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | null | null | null | seahub/api2/authentication.py | saukrIppl/newsea | 0fd5ab2ade9a8fb16b1e7b43ba13dac32eb39603 | [
"Apache-2.0"
] | 1 | 2020-10-01T04:11:41.000Z | 2020-10-01T04:11:41.000Z | import datetime
import logging
from rest_framework import status
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import APIException
import seaserv
from seahub.base.accounts import User
from seahub.constants import GUEST_USER
from seahub.api2.models import Token, TokenV2
from seahub.api2.utils import get_client_ip
from seahub.utils import within_time_range
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
logger = logging.getLogger(__name__)
HEADER_CLIENT_VERSION = 'HTTP_X_SEAFILE_CLIENT_VERSION'
HEADER_PLATFORM_VERSION = 'HTTP_X_SEAFILE_PLATFORM_VERSION'
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = 'Incorrect authentication credentials.'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class TokenAuthentication(BaseAuthentication):
"""
Simple token based authentication.
Clients should authenticate by passing the token key in the "Authorization"
HTTP header, prepended with the string "Token ". For example:
Authorization: Token 401f7ac837da42b97f613d789819ff93537bee6a
A custom token model may be used, but must have the following properties.
* key -- The string identifying the token
* user -- The user to which the token belongs
"""
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if not auth or auth[0].lower() != 'token':
return None
if len(auth) == 1:
msg = 'Invalid token header. No credentials provided.'
raise AuthenticationFailed(msg)
elif len(auth) > 2:
msg = 'Invalid token header. Token string should not contain spaces.'
raise AuthenticationFailed(msg)
key = auth[1]
ret = self.authenticate_v2(request, key)
if ret:
return ret
return self.authenticate_v1(request, key)
def _populate_user_permissions(self, user):
"""Disable some operations if ``user`` is a guest.
"""
if user.role == GUEST_USER:
user.permissions.can_add_repo = lambda: False
user.permissions.can_add_group = lambda: False
user.permissions.can_view_org = lambda: False
user.permissions.can_use_global_address_book = lambda: False
user.permissions.can_generate_shared_link = lambda: False
def authenticate_v1(self, request, key):
try:
token = Token.objects.get(key=key)
except Token.DoesNotExist:
raise AuthenticationFailed('Invalid token')
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user)
if orgs:
user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
return (user, token)
def authenticate_v2(self, request, key):
try:
token = TokenV2.objects.get(key=key)
except TokenV2.DoesNotExist:
return None # Continue authentication in token v1
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user)
if orgs:
user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
need_save = False
# We update the device's last_login_ip, client_version, platform_version if changed
ip = get_client_ip(request)
if ip and ip != token.last_login_ip:
token.last_login_ip = ip
need_save = True
client_version = request.META.get(HEADER_CLIENT_VERSION, '')
if client_version and client_version != token.client_version:
token.client_version = client_version
need_save = True
platform_version = request.META.get(HEADER_PLATFORM_VERSION, '')
if platform_version and platform_version != token.platform_version:
token.platform_version = platform_version
need_save = True
if not within_time_range(token.last_accessed, datetime.datetime.now(), 10 * 60):
# We only need 10min precision for the last_accessed field
need_save = True
if need_save:
try:
token.save()
except:
logger.exception('error when save token v2:')
return (user, token)
| 33.772414 | 95 | 0.647131 | import datetime
import logging
from rest_framework import status
from rest_framework.authentication import BaseAuthentication
from rest_framework.exceptions import APIException
import seaserv
from seahub.base.accounts import User
from seahub.constants import GUEST_USER
from seahub.api2.models import Token, TokenV2
from seahub.api2.utils import get_client_ip
from seahub.utils import within_time_range
try:
from seahub.settings import MULTI_TENANCY
except ImportError:
MULTI_TENANCY = False
logger = logging.getLogger(__name__)
HEADER_CLIENT_VERSION = 'HTTP_X_SEAFILE_CLIENT_VERSION'
HEADER_PLATFORM_VERSION = 'HTTP_X_SEAFILE_PLATFORM_VERSION'
class AuthenticationFailed(APIException):
status_code = status.HTTP_401_UNAUTHORIZED
default_detail = 'Incorrect authentication credentials.'
def __init__(self, detail=None):
self.detail = detail or self.default_detail
class TokenAuthentication(BaseAuthentication):
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', '').split()
if not auth or auth[0].lower() != 'token':
return None
if len(auth) == 1:
msg = 'Invalid token header. No credentials provided.'
raise AuthenticationFailed(msg)
elif len(auth) > 2:
msg = 'Invalid token header. Token string should not contain spaces.'
raise AuthenticationFailed(msg)
key = auth[1]
ret = self.authenticate_v2(request, key)
if ret:
return ret
return self.authenticate_v1(request, key)
def _populate_user_permissions(self, user):
if user.role == GUEST_USER:
user.permissions.can_add_repo = lambda: False
user.permissions.can_add_group = lambda: False
user.permissions.can_view_org = lambda: False
user.permissions.can_use_global_address_book = lambda: False
user.permissions.can_generate_shared_link = lambda: False
def authenticate_v1(self, request, key):
try:
token = Token.objects.get(key=key)
except Token.DoesNotExist:
raise AuthenticationFailed('Invalid token')
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user)
if orgs:
user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
return (user, token)
def authenticate_v2(self, request, key):
try:
token = TokenV2.objects.get(key=key)
except TokenV2.DoesNotExist:
return None
try:
user = User.objects.get(email=token.user)
except User.DoesNotExist:
raise AuthenticationFailed('User inactive or deleted')
if MULTI_TENANCY:
orgs = seaserv.get_orgs_by_user(token.user)
if orgs:
user.org = orgs[0]
self._populate_user_permissions(user)
if user.is_active:
need_save = False
ip = get_client_ip(request)
if ip and ip != token.last_login_ip:
token.last_login_ip = ip
need_save = True
client_version = request.META.get(HEADER_CLIENT_VERSION, '')
if client_version and client_version != token.client_version:
token.client_version = client_version
need_save = True
platform_version = request.META.get(HEADER_PLATFORM_VERSION, '')
if platform_version and platform_version != token.platform_version:
token.platform_version = platform_version
need_save = True
if not within_time_range(token.last_accessed, datetime.datetime.now(), 10 * 60):
# We only need 10min precision for the last_accessed field
need_save = True
if need_save:
try:
token.save()
except:
logger.exception('error when save token v2:')
return (user, token)
| true | true |
f72affbaf63edad2e1efdfe81604b7c4734c0339 | 405 | py | Python | setup.py | mstroud/python-matrix-gfyrslf | 0375bfb12d1cd50611f01101917d2cd2123543e4 | [
"MIT"
] | null | null | null | setup.py | mstroud/python-matrix-gfyrslf | 0375bfb12d1cd50611f01101917d2cd2123543e4 | [
"MIT"
] | null | null | null | setup.py | mstroud/python-matrix-gfyrslf | 0375bfb12d1cd50611f01101917d2cd2123543e4 | [
"MIT"
] | null | null | null | from distutils.core import setup
DESC='A simple, extensible chatbot for Matrix'
setup(
name='python-matrix-gfyrslf',
version='0.1',
author='Matt Stroud',
author_email='see github',
url='https://github.com/mstroud/python-matrix-gfyrslf',
packages=['python-matrix-gfyrslf'],
install_requires=['matrix_client'],
license='MIT',
summary=DESC,
long_description=DESC,
)
| 23.823529 | 59 | 0.688889 | from distutils.core import setup
DESC='A simple, extensible chatbot for Matrix'
setup(
name='python-matrix-gfyrslf',
version='0.1',
author='Matt Stroud',
author_email='see github',
url='https://github.com/mstroud/python-matrix-gfyrslf',
packages=['python-matrix-gfyrslf'],
install_requires=['matrix_client'],
license='MIT',
summary=DESC,
long_description=DESC,
)
| true | true |
f72b00a5286e87e05ac8c588aa0072278e0c0565 | 30 | py | Python | bot/__init__.py | Sc2-AI-Cup/example-bot-workerrush | 6a4ddcc4c22018bcd64d07ba405b7ef13ed634f2 | [
"MIT"
] | null | null | null | bot/__init__.py | Sc2-AI-Cup/example-bot-workerrush | 6a4ddcc4c22018bcd64d07ba405b7ef13ed634f2 | [
"MIT"
] | null | null | null | bot/__init__.py | Sc2-AI-Cup/example-bot-workerrush | 6a4ddcc4c22018bcd64d07ba405b7ef13ed634f2 | [
"MIT"
] | null | null | null | from .bot import WorkerRushBot | 30 | 30 | 0.866667 | from .bot import WorkerRushBot | true | true |
f72b00c52fc98e9202a373c7817029e4bb84f7b4 | 8,185 | py | Python | controllers.py | Yoshiyuki-Su/FastAPITodo | d9efcc2793eb5191f70923eb669eb9a1a3fcc427 | [
"MIT"
] | null | null | null | controllers.py | Yoshiyuki-Su/FastAPITodo | d9efcc2793eb5191f70923eb669eb9a1a3fcc427 | [
"MIT"
] | 6 | 2020-11-23T14:38:55.000Z | 2021-01-10T16:55:57.000Z | controllers.py | Yoshiyuki-Su/FastAPITodo | d9efcc2793eb5191f70923eb669eb9a1a3fcc427 | [
"MIT"
] | null | null | null | from fastapi import FastAPI, Depends, Form
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from starlette.templating import Jinja2Templates
from starlette.requests import Request
from starlette.responses import RedirectResponse
from datetime import datetime, timedelta
import db
import hashlib
from mycalendar import MyCalendar
import re
from auth import auth
from models import User, Task
app = FastAPI(
title='FastAPIでつくるToDoアプリケーション',
description='FastAPIチュートリアル:FastAPI(とstarlette)でシンプルなToDoアプリの作成',
version='0.0.1'
)
security = HTTPBasic()
templates = Jinja2Templates(directory="templates")
jinja_env = templates.env
pattern = re.compile(r'\w{4,20}') # 任意の4~20の英数字を示す正規表現
pattern_pw = re.compile(r'\w{6,20}') # 任意の6~20の英数字を示す正規表現
pattern_mail = re.compile(r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$') # e-mailの正規表現
def index(request: Request):
return templates.TemplateResponse('index.html',
{'request': request})
def admin(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.user_id == user.id).all()
db.session.close()
""" [new] 今日の日付と来週の日付"""
today = datetime.now()
next_w = today + timedelta(days=7) # 1週間後の日付
""" [new] カレンダー関連 """
# カレンダーをHTML形式で取得
cal = MyCalendar(username,
{t.deadline.strftime('%Y%m%d'): t.done for t in task}) # 予定がある日付をキーとして渡す
cal = cal.formatyear(today.year, 4) # カレンダーをHTMLで取得
# 直近のタスクだけでいいので、リストを書き換える
task = [t for t in task if today <= t.deadline <= next_w]
links = [t.deadline.strftime('/todo/'+username+'/%Y/%m/%d') for t in task] # 直近の予定リンク
return templates.TemplateResponse('admin.html',
{'request': request,
'user': user,
'task': task,
'links': links,
'calender': cal})
async def register(request: Request):
if request.method == 'GET':
return templates.TemplateResponse('register.html',
{'request': request,
'username': '',
'error': []})
if request.method == 'POST':
data = await request.form()
username = data.get('username')
password = data.get('password')
password_tmp = data.get('password_tmp')
mail = data.get('mail')
error = []
tmp_user = db.session.query(User).filter(User.username == username).first()
if tmp_user is not None:
error.append('同じユーザ名のユーザが存在します。')
if password != password_tmp:
error.append('入力したパスワードが一致しません。')
if pattern.match(username) is None:
error.append('ユーザ名は4~20文字の半角英数字にしてください。')
if pattern_pw.match(password) is None:
error.append('パスワードは6~20文字の半角英数字にしてください。')
if pattern_mail.match(mail) is None:
error.append('正しくメールアドレスを入力してください。')
# エラーがあれば登録ページへ戻す
if error:
return templates.TemplateResponse('register.html',
{'request': request,
'username': username,
'error': error})
# 問題がなければユーザ登録
user = User(username, password, mail)
db.session.add(user)
db.session.commit()
db.session.close()
return templates.TemplateResponse('complete.html',
{'request': request,
'username': username})
def detail(request: Request, username, year, month, day,
credentials: HTTPBasicCredentials = Depends(security)):
username_tmp = auth(credentials)
if username_tmp != username: # もし他のユーザが訪問してきたらはじく
return RedirectResponse('/')
# ログインユーザを取得
user = db.session.query(User).filter(User.username == username).first()
# ログインユーザのタスクを取得
task = db.session.query(Task).filter(Task.user_id == user.id).all()
db.session.close()
# 該当の日付と一致するものだけのリストにする
theday = f'{year}{month.zfill(2)}{day.zfill(2)}' # 月日は0埋めする
task = [t for t in task if t.deadline.strftime('%Y%m%d') == theday]
return templates.TemplateResponse('detail.html',
{'request': request,
'username': username,
'task': task,
'year': year,
'month': month,
'day': day})
async def done(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
# ユーザ情報を取得
user = db.session.query(User).filter(User.username == username).first()
# ログインユーザのタスクを取得
task = db.session.query(Task).filter(Task.user_id == user.id).all()
# フォームで受け取ったタスクの終了判定を見て内容を変更する
data = await request.form()
t_dones = data.getlist('done[]') # リストとして取得
for t in task:
if str(t.id) in t_dones: # もしIDが一致すれば "終了した予定" とする
t.done = True
db.session.commit() # update!!
db.session.close()
return RedirectResponse('/admin')
async def add(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
# フォームからデータを取得
data = await request.form()
print(data)
year = int(data['year'])
month = int(data['month'])
day = int(data['day'])
hour = int(data['hour'])
minute = int(data['minute'])
deadline = datetime(year=year, month=month, day=day,
hour=hour, minute=minute)
# 新しくタスクを生成しコミット
task = Task(user.id, data['content'], deadline)
db.session.add(task)
db.session.commit()
db.session.close()
return RedirectResponse('/admin')
def delete(request: Request, t_id, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.id == t_id).first()
# もしユーザIDが異なれば削除せずリダイレクト
if task.user_id != user.id:
return RedirectResponse('/admin')
# 削除してコミット
db.session.delete(task)
db.session.commit()
db.session.close()
return RedirectResponse('/admin')
def get(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.user_id == user.id).all()
db.session.close()
# JSONフォーマット
task = [{
'id': t.id,
'content': t.content,
'deadline': t.deadline.strftime('%Y-%m-%d %H:%M:%S'),
'published': t.date.strftime('%Y-%m-%d %H:%M:%S'),
'done': t.done,
} for t in task]
return task
async def insert(request: Request,
content: str = Form(...), deadline: str = Form(...),
credentials: HTTPBasicCredentials = Depends(security)):
"""
タスクを追加してJSONで新規タスクを返す。「deadline」は%Y-%m-%d_%H:%M:%S (e.g. 2019-11-03_12:30:00)の形式
"""
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = Task(user.id, content, datetime.strptime(deadline, '%Y-%m-%d_%H:%M:%S'))
db.session.add(task)
db.session.commit()
# テーブルから新しく追加したタスクを取得する
task = db.session.query(Task).all()[-1]
db.session.close()
# 新規タスクをJSONで返す
return {
'id': task.id,
'content': task.content,
'deadline': task.deadline.strftime('%Y-%m-%d %H:%M:%S'),
'published': task.date.strftime('%Y-%m-%d %H:%M:%S'),
'done': task.done,
}
| 32.871486 | 94 | 0.579475 | from fastapi import FastAPI, Depends, Form
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from starlette.templating import Jinja2Templates
from starlette.requests import Request
from starlette.responses import RedirectResponse
from datetime import datetime, timedelta
import db
import hashlib
from mycalendar import MyCalendar
import re
from auth import auth
from models import User, Task
app = FastAPI(
title='FastAPIでつくるToDoアプリケーション',
description='FastAPIチュートリアル:FastAPI(とstarlette)でシンプルなToDoアプリの作成',
version='0.0.1'
)
security = HTTPBasic()
templates = Jinja2Templates(directory="templates")
jinja_env = templates.env
pattern = re.compile(r'\w{4,20}')
pattern_pw = re.compile(r'\w{6,20}')
pattern_mail = re.compile(r'^\w+([-+.]\w+)*@\w+([-.]\w+)*\.\w+([-.]\w+)*$')
def index(request: Request):
return templates.TemplateResponse('index.html',
{'request': request})
def admin(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.user_id == user.id).all()
db.session.close()
today = datetime.now()
next_w = today + timedelta(days=7)
cal = MyCalendar(username,
{t.deadline.strftime('%Y%m%d'): t.done for t in task})
cal = cal.formatyear(today.year, 4)
task = [t for t in task if today <= t.deadline <= next_w]
links = [t.deadline.strftime('/todo/'+username+'/%Y/%m/%d') for t in task]
return templates.TemplateResponse('admin.html',
{'request': request,
'user': user,
'task': task,
'links': links,
'calender': cal})
async def register(request: Request):
if request.method == 'GET':
return templates.TemplateResponse('register.html',
{'request': request,
'username': '',
'error': []})
if request.method == 'POST':
data = await request.form()
username = data.get('username')
password = data.get('password')
password_tmp = data.get('password_tmp')
mail = data.get('mail')
error = []
tmp_user = db.session.query(User).filter(User.username == username).first()
if tmp_user is not None:
error.append('同じユーザ名のユーザが存在します。')
if password != password_tmp:
error.append('入力したパスワードが一致しません。')
if pattern.match(username) is None:
error.append('ユーザ名は4~20文字の半角英数字にしてください。')
if pattern_pw.match(password) is None:
error.append('パスワードは6~20文字の半角英数字にしてください。')
if pattern_mail.match(mail) is None:
error.append('正しくメールアドレスを入力してください。')
if error:
return templates.TemplateResponse('register.html',
{'request': request,
'username': username,
'error': error})
user = User(username, password, mail)
db.session.add(user)
db.session.commit()
db.session.close()
return templates.TemplateResponse('complete.html',
{'request': request,
'username': username})
def detail(request: Request, username, year, month, day,
credentials: HTTPBasicCredentials = Depends(security)):
username_tmp = auth(credentials)
if username_tmp != username:
return RedirectResponse('/')
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.user_id == user.id).all()
db.session.close()
theday = f'{year}{month.zfill(2)}{day.zfill(2)}'
task = [t for t in task if t.deadline.strftime('%Y%m%d') == theday]
return templates.TemplateResponse('detail.html',
{'request': request,
'username': username,
'task': task,
'year': year,
'month': month,
'day': day})
async def done(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.user_id == user.id).all()
data = await request.form()
t_dones = data.getlist('done[]')
for t in task:
if str(t.id) in t_dones:
t.done = True
db.session.commit()
db.session.close()
return RedirectResponse('/admin')
async def add(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
data = await request.form()
print(data)
year = int(data['year'])
month = int(data['month'])
day = int(data['day'])
hour = int(data['hour'])
minute = int(data['minute'])
deadline = datetime(year=year, month=month, day=day,
hour=hour, minute=minute)
task = Task(user.id, data['content'], deadline)
db.session.add(task)
db.session.commit()
db.session.close()
return RedirectResponse('/admin')
def delete(request: Request, t_id, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.id == t_id).first()
if task.user_id != user.id:
return RedirectResponse('/admin')
db.session.delete(task)
db.session.commit()
db.session.close()
return RedirectResponse('/admin')
def get(request: Request, credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = db.session.query(Task).filter(Task.user_id == user.id).all()
db.session.close()
task = [{
'id': t.id,
'content': t.content,
'deadline': t.deadline.strftime('%Y-%m-%d %H:%M:%S'),
'published': t.date.strftime('%Y-%m-%d %H:%M:%S'),
'done': t.done,
} for t in task]
return task
async def insert(request: Request,
content: str = Form(...), deadline: str = Form(...),
credentials: HTTPBasicCredentials = Depends(security)):
username = auth(credentials)
user = db.session.query(User).filter(User.username == username).first()
task = Task(user.id, content, datetime.strptime(deadline, '%Y-%m-%d_%H:%M:%S'))
db.session.add(task)
db.session.commit()
task = db.session.query(Task).all()[-1]
db.session.close()
return {
'id': task.id,
'content': task.content,
'deadline': task.deadline.strftime('%Y-%m-%d %H:%M:%S'),
'published': task.date.strftime('%Y-%m-%d %H:%M:%S'),
'done': task.done,
}
| true | true |
f72b00ff538cfdf542ff5ed70d45d7fe2e7d661e | 2,499 | py | Python | bin/old/findUnannotated.py | PapenfussLab/Srtools | 6dff62cd8d1615d4f7d4e5b8a0de9ba8eebab90e | [
"Artistic-2.0"
] | null | null | null | bin/old/findUnannotated.py | PapenfussLab/Srtools | 6dff62cd8d1615d4f7d4e5b8a0de9ba8eebab90e | [
"Artistic-2.0"
] | null | null | null | bin/old/findUnannotated.py | PapenfussLab/Srtools | 6dff62cd8d1615d4f7d4e5b8a0de9ba8eebab90e | [
"Artistic-2.0"
] | null | null | null | #!/usr/bin/env python
"""
findUnannotated.py
Author: Tony Papenfuss
Date: Fri Aug 15 12:19:24 EST 2008
"""
import os, sys
from bx.intervals.intersection import *
from fasta import FastaFile
from blast import BlastFile
from useful import progressMessage
print "Load Solexa contigs & store as Intervals in an Intersecter object"
contigData = {}
for h,seq in FastaFile('../solexa/solexa_contigs.fa'):
tokens = h.split()
name = tokens[0]
chrom,se = tokens[1].split(':')
start,end = [int(x) for x in se.split('-')]
contig = Interval(start, end, value=(name,chrom,start,end,seq))
try:
contigData[chrom].add_interval(contig)
except KeyError:
contigData[chrom] = Intersecter()
contigData[chrom].add_interval(contig)
# print "Load 454 contig HSPs & store"
# for b in BlastFile('../454/blastn_contigs_v_genome.txt'):
# b.convertBlockToGenomeCoords()
# contig = Interval(b.sStart, b.sEnd, value=(name,b.subjectId,b.sStart,b.sEnd,''))
# try:
# contigData[chrom].add_interval(contig)
# except KeyError:
# contigData[chrom] = Intersecter()
# contigData[chrom].add_interval(contig)
print 'Parse genes'
iFilename = '/Users/papenfuss/databases/platypus/ensembl/Release50/mart_names_locations.txt'
iFile = open(iFilename)
headers = iFile.readline()
annotated = set()
for i,line in enumerate(iFile):
if (i % 1000)==0:
progressMessage('# genes %s', i)
tokens = line.strip().split('\t')
geneId = tokens[0]
transId = tokens[1]
name = tokens[3]
chrom = tokens[5]
start = int(tokens[6])
end = int(tokens[7])
strand = {'1': '+', '-1': '-'}[tokens[8]]
try:
for contig in contigData[chrom].find(start-500, end+500):
annotated.add(contig.value[0])
except:
pass
print 'Parse toxprot alignments'
iFilename = '../toxprot/tblastn_toxprot_v_genome.txt'
for b in BlastFile(iFilename):
chrom = b.subjectId.split(':')[0]
try:
for contig in contigData[chrom].find(b.sStart, b.sEnd):
annotated.add(contig.value[0])
except:
pass
print "Write out what's left over"
writer = FastaFile('unannotated_contigs.fa', 'w')
for chrom in contigData:
for contig in contigData[chrom].intervals:
if not contig.value[0] in annotated and len(contig.value[4])>60:
name,chrom,start,end,seq = contig.value
writer('%s %s:%i-%i' % (name,chrom,start,end), seq)
writer.close()
| 27.163043 | 92 | 0.648259 |
"""
findUnannotated.py
Author: Tony Papenfuss
Date: Fri Aug 15 12:19:24 EST 2008
"""
import os, sys
from bx.intervals.intersection import *
from fasta import FastaFile
from blast import BlastFile
from useful import progressMessage
print "Load Solexa contigs & store as Intervals in an Intersecter object"
contigData = {}
for h,seq in FastaFile('../solexa/solexa_contigs.fa'):
tokens = h.split()
name = tokens[0]
chrom,se = tokens[1].split(':')
start,end = [int(x) for x in se.split('-')]
contig = Interval(start, end, value=(name,chrom,start,end,seq))
try:
contigData[chrom].add_interval(contig)
except KeyError:
contigData[chrom] = Intersecter()
contigData[chrom].add_interval(contig)
print 'Parse genes'
iFilename = '/Users/papenfuss/databases/platypus/ensembl/Release50/mart_names_locations.txt'
iFile = open(iFilename)
headers = iFile.readline()
annotated = set()
for i,line in enumerate(iFile):
if (i % 1000)==0:
progressMessage('# genes %s', i)
tokens = line.strip().split('\t')
geneId = tokens[0]
transId = tokens[1]
name = tokens[3]
chrom = tokens[5]
start = int(tokens[6])
end = int(tokens[7])
strand = {'1': '+', '-1': '-'}[tokens[8]]
try:
for contig in contigData[chrom].find(start-500, end+500):
annotated.add(contig.value[0])
except:
pass
print 'Parse toxprot alignments'
iFilename = '../toxprot/tblastn_toxprot_v_genome.txt'
for b in BlastFile(iFilename):
chrom = b.subjectId.split(':')[0]
try:
for contig in contigData[chrom].find(b.sStart, b.sEnd):
annotated.add(contig.value[0])
except:
pass
print "Write out what's left over"
writer = FastaFile('unannotated_contigs.fa', 'w')
for chrom in contigData:
for contig in contigData[chrom].intervals:
if not contig.value[0] in annotated and len(contig.value[4])>60:
name,chrom,start,end,seq = contig.value
writer('%s %s:%i-%i' % (name,chrom,start,end), seq)
writer.close()
| false | true |
f72b01644b9c24e4ff1dde34645ffd6b1aec9355 | 2,765 | py | Python | Contrib/LEF/ClusterFps.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 1,609 | 2015-01-05T02:41:13.000Z | 2022-03-30T21:57:24.000Z | Contrib/LEF/ClusterFps.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 3,412 | 2015-01-06T12:13:33.000Z | 2022-03-31T17:25:41.000Z | Contrib/LEF/ClusterFps.py | kazuyaujihara/rdkit | 06027dcd05674787b61f27ba46ec0d42a6037540 | [
"BSD-3-Clause"
] | 811 | 2015-01-11T03:33:48.000Z | 2022-03-28T11:57:49.000Z | #
# Copyright (c) 2009, Novartis Institutes for BioMedical Research Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Novartis Institutes for BioMedical Research Inc.
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Created by Greg Landrum and Anna Vulpetti, March 2009
from rdkit.ML.Cluster import Butina
from rdkit import DataStructs
import sys, pickle
# sims is the list of similarity thresholds used to generate clusters
sims = [.9, .8, .7, .6]
smis = []
uniq = []
uFps = []
for fileN in sys.argv[1:]:
inF = file(sys.argv[1], 'r')
cols = pickle.load(inF)
fps = pickle.load(inF)
for row in fps:
nm, smi, fp = row[:3]
if smi not in smis:
try:
fpIdx = uFps.index(fp)
except ValueError:
fpIdx = len(uFps)
uFps.append(fp)
uniq.append([fp, nm, smi, 'FP_%d' % fpIdx] + row[3:])
smis.append(smi)
def distFunc(a, b):
return 1. - DataStructs.DiceSimilarity(a[0], b[0])
for sim in sims:
clusters = Butina.ClusterData(uniq, len(uniq), 1. - sim, False, distFunc)
print('Sim: %.2f, nClusters: %d' % (sim, len(clusters)), file=sys.stderr)
for i, cluster in enumerate(clusters):
for pt in cluster:
uniq[pt].append(str(i + 1))
cols.append('cluster_thresh_%d' % (int(100 * sim)))
print(' '.join(cols))
for row in uniq:
print(' '.join(row[1:]))
| 37.364865 | 86 | 0.707052 |
from rdkit.ML.Cluster import Butina
from rdkit import DataStructs
import sys, pickle
sims = [.9, .8, .7, .6]
smis = []
uniq = []
uFps = []
for fileN in sys.argv[1:]:
inF = file(sys.argv[1], 'r')
cols = pickle.load(inF)
fps = pickle.load(inF)
for row in fps:
nm, smi, fp = row[:3]
if smi not in smis:
try:
fpIdx = uFps.index(fp)
except ValueError:
fpIdx = len(uFps)
uFps.append(fp)
uniq.append([fp, nm, smi, 'FP_%d' % fpIdx] + row[3:])
smis.append(smi)
def distFunc(a, b):
return 1. - DataStructs.DiceSimilarity(a[0], b[0])
for sim in sims:
clusters = Butina.ClusterData(uniq, len(uniq), 1. - sim, False, distFunc)
print('Sim: %.2f, nClusters: %d' % (sim, len(clusters)), file=sys.stderr)
for i, cluster in enumerate(clusters):
for pt in cluster:
uniq[pt].append(str(i + 1))
cols.append('cluster_thresh_%d' % (int(100 * sim)))
print(' '.join(cols))
for row in uniq:
print(' '.join(row[1:]))
| true | true |
f72b01a7f0fb8665343e290a8c45dfabc5c03f99 | 801 | py | Python | predictability_utils/utils/helpers.py | marpyr/forecast_predictability | 2285b37e20095ae6f67533595bcb0580882924a2 | [
"MIT"
] | 2 | 2020-10-23T08:58:18.000Z | 2021-05-03T17:30:03.000Z | predictability_utils/utils/helpers.py | marpyr/forecast_predictability | 2285b37e20095ae6f67533595bcb0580882924a2 | [
"MIT"
] | null | null | null | predictability_utils/utils/helpers.py | marpyr/forecast_predictability | 2285b37e20095ae6f67533595bcb0580882924a2 | [
"MIT"
] | 1 | 2020-10-23T09:07:19.000Z | 2020-10-23T09:07:19.000Z | import numpy as np
def compute_anomaly_corrs(out_true, out_pred):
anomaly_corrs = np.zeros(out_pred.shape[1])
for i in range(anomaly_corrs.size):
anomaly_corrs[i] = np.corrcoef(out_pred[:,i], out_true[:,i])[0,1]
return anomaly_corrs
def split_train_data(train_months, test_months, train_years, test_years):
def make_idx(months, years): # based on simple broadcasting
return np.asarray(months).reshape(-1,1)+(12*np.asarray(years).flatten())
idx_source_train = make_idx(train_months, train_years)
idx_target_train = make_idx(test_months, train_years)
idx_source_test = make_idx(train_months, test_years)
idx_target_test = make_idx(test_months, test_years)
return idx_source_train, idx_target_train, idx_source_test, idx_target_test | 36.409091 | 80 | 0.740325 | import numpy as np
def compute_anomaly_corrs(out_true, out_pred):
anomaly_corrs = np.zeros(out_pred.shape[1])
for i in range(anomaly_corrs.size):
anomaly_corrs[i] = np.corrcoef(out_pred[:,i], out_true[:,i])[0,1]
return anomaly_corrs
def split_train_data(train_months, test_months, train_years, test_years):
def make_idx(months, years):
return np.asarray(months).reshape(-1,1)+(12*np.asarray(years).flatten())
idx_source_train = make_idx(train_months, train_years)
idx_target_train = make_idx(test_months, train_years)
idx_source_test = make_idx(train_months, test_years)
idx_target_test = make_idx(test_months, test_years)
return idx_source_train, idx_target_train, idx_source_test, idx_target_test | true | true |
f72b01c050db440e10771a348c74c4d89b91660f | 19,971 | py | Python | dfvfs/lib/gzipfile.py | dfjxs/dfvfs | a4154b07bb08c3c86afa2847f3224189dd80c138 | [
"Apache-2.0"
] | 176 | 2015-01-02T13:55:39.000Z | 2022-03-12T11:44:37.000Z | dfvfs/lib/gzipfile.py | dfjxs/dfvfs | a4154b07bb08c3c86afa2847f3224189dd80c138 | [
"Apache-2.0"
] | 495 | 2015-01-13T06:47:06.000Z | 2022-03-12T11:07:03.000Z | dfvfs/lib/gzipfile.py | dfjxs/dfvfs | a4154b07bb08c3c86afa2847f3224189dd80c138 | [
"Apache-2.0"
] | 62 | 2015-02-23T08:19:38.000Z | 2022-03-18T06:01:22.000Z | # -*- coding: utf-8 -*-
"""Gzip compressed stream file."""
# Note: do not rename file to gzip.py this can cause the exception:
# AttributeError: 'module' object has no attribute 'GzipFile'
# when using pip.
import collections
import os
from dtfabric.runtime import fabric as dtfabric_fabric
from dfvfs.compression import zlib_decompressor
from dfvfs.lib import data_format
from dfvfs.lib import errors
class _GzipDecompressorState(object):
"""Deflate decompressor wrapper for reading a gzip member.
This class encapsulates the state of a deflate decompression object, as well
as the location of the decompressor's source data.
Attributes:
uncompressed_offset (int): offset into the uncompressed data in a gzip
member last emitted by the state object.
"""
_MAXIMUM_READ_SIZE = 16 * 1024 * 1024
def __init__(self, stream_start):
"""Initializes a gzip member decompressor wrapper.
Args:
stream_start (int): offset to the compressed stream within the containing
file object.
"""
self._compressed_data = b''
self._decompressor = zlib_decompressor.DeflateDecompressor()
self._last_read = stream_start
self.uncompressed_offset = 0
def Read(self, file_object):
"""Reads the next uncompressed data from the gzip stream.
Args:
file_object (FileIO): file object that contains the compressed stream.
Returns:
bytes: next uncompressed data from the compressed stream.
"""
file_object.seek(self._last_read, os.SEEK_SET)
read_data = file_object.read(self._MAXIMUM_READ_SIZE)
self._last_read = file_object.get_offset()
compressed_data = b''.join([self._compressed_data, read_data])
decompressed_data, remaining_compressed_data = (
self._decompressor.Decompress(compressed_data))
self._compressed_data = remaining_compressed_data
self.uncompressed_offset += len(decompressed_data)
return decompressed_data
def GetUnusedData(self):
"""Retrieves any bytes past the end of the compressed data.
See https://docs.python.org/2/library/zlib.html#zlib.Decompress.unused_data
Unused data can be any bytes after a Deflate compressed block (or chunk).
Returns:
bytes: data past the end of the compressed data, if any has been read from
the gzip file.
"""
return self._decompressor.unused_data
class GzipMember(data_format.DataFormat):
"""Gzip member.
Gzip files have no index of members, so each member must be read
sequentially before metadata and random seeks are possible. This class
provides caching of gzip member data during the initial read of each member.
Attributes:
comment (str): comment stored in the member.
member_end_offset (int): offset to the end of the member in the parent file
object.
member_start_offset (int): offset to the start of the member in the parent
file object.
operating_system (int): type of file system on which the compression
took place.
original_filename (str): original filename of the uncompressed file.
uncompressed_data_offset (int): offset of the start of the uncompressed
data in this member relative to the whole gzip file's uncompressed data.
uncompressed_data_size (int): total size of the data in this gzip member
after decompression.
"""
_DATA_TYPE_FABRIC_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'gzipfile.yaml')
with open(_DATA_TYPE_FABRIC_DEFINITION_FILE, 'rb') as file_object:
_DATA_TYPE_FABRIC_DEFINITION = file_object.read()
_DATA_TYPE_FABRIC = dtfabric_fabric.DataTypeFabric(
yaml_definition=_DATA_TYPE_FABRIC_DEFINITION)
_MEMBER_HEADER = _DATA_TYPE_FABRIC.CreateDataTypeMap(
'gzip_member_header')
_MEMBER_HEADER_SIZE = _MEMBER_HEADER.GetByteSize()
_MEMBER_FOOTER = _DATA_TYPE_FABRIC.CreateDataTypeMap(
'gzip_member_footer')
_MEMBER_FOOTER_SIZE = _MEMBER_FOOTER.GetByteSize()
_UINT16LE = _DATA_TYPE_FABRIC.CreateDataTypeMap('uint16le')
_UINT16LE_SIZE = _UINT16LE.GetByteSize()
_CSTRING = _DATA_TYPE_FABRIC.CreateDataTypeMap('cstring')
_GZIP_SIGNATURE = 0x8b1f
_COMPRESSION_METHOD_DEFLATE = 8
_FLAG_FTEXT = 0x01
_FLAG_FHCRC = 0x02
_FLAG_FEXTRA = 0x04
_FLAG_FNAME = 0x08
_FLAG_FCOMMENT = 0x10
# The maximum size of the uncompressed data cache.
_UNCOMPRESSED_DATA_CACHE_SIZE = 2 * 1024 * 1024
def __init__(
self, file_object, member_start_offset, uncompressed_data_offset):
"""Initializes a gzip member.
Args:
file_object (FileIO): file-like object, containing the gzip member.
member_start_offset (int): offset to the beginning of the gzip member
in the containing file.
uncompressed_data_offset (int): offset of the start of the uncompressed
data in this member relative to the whole gzip file's uncompressed
data.
"""
self._cache = b''
# End offset of the cached uncompressed data of the member.
self._cache_end_offset = None
# Start offset of the cached uncompressed data of the member.
self._cache_start_offset = None
self.comment = None
self.modification_time = None
self.operating_system = None
self.original_filename = None
file_size = file_object.get_size()
file_object.seek(member_start_offset, os.SEEK_SET)
self._ReadMemberHeader(file_object)
data_offset = 0
uncompressed_data_size = 0
compressed_data_offset = file_object.get_offset()
decompressor_state = _GzipDecompressorState(compressed_data_offset)
# Read the member data to determine the uncompressed data size and
# the offset of the member footer.
file_offset = compressed_data_offset
while file_offset < file_size:
data_offset += uncompressed_data_size
decompressed_data = decompressor_state.Read(file_object)
uncompressed_data_size += len(decompressed_data)
# Note that unused data will be set when the decompressor reads beyond
# the end of the compressed data stream.
unused_data = decompressor_state.GetUnusedData()
if unused_data:
file_object.seek(-len(unused_data), os.SEEK_CUR)
file_offset = file_object.get_offset()
break
file_offset = file_object.get_offset()
# Do not read the the last member footer if it is missing, which is
# a common corruption scenario.
if file_offset < file_size:
self._ReadStructure(
file_object, file_offset, self._MEMBER_FOOTER_SIZE,
self._MEMBER_FOOTER, 'member footer')
member_end_offset = file_object.get_offset()
# Initialize the member with data.
self._file_object = file_object
self._file_object.seek(member_start_offset, os.SEEK_SET)
# Cache uncompressed data of gzip files that fit entirely in the cache.
if (data_offset == 0 and
uncompressed_data_size < self._UNCOMPRESSED_DATA_CACHE_SIZE):
self._cache = decompressed_data
self._cache_start_offset = 0
self._cache_end_offset = uncompressed_data_size
# Offset to the beginning of the compressed data in the file object.
self._compressed_data_start = compressed_data_offset
self._decompressor_state = _GzipDecompressorState(compressed_data_offset)
# Offset to the start of the member in the parent file object.
self.member_start_offset = member_start_offset
# Offset to the end of the member in the parent file object.
self.member_end_offset = member_end_offset
# Total size of the data in this gzip member after decompression.
self.uncompressed_data_size = uncompressed_data_size
# Offset of the start of the uncompressed data in this member relative to
# the whole gzip file's uncompressed data.
self.uncompressed_data_offset = uncompressed_data_offset
def _GetCacheSize(self):
"""Determines the size of the uncompressed cached data.
Returns:
int: number of cached bytes.
"""
if None in (self._cache_start_offset, self._cache_end_offset):
return 0
return self._cache_end_offset - self._cache_start_offset
def _IsCacheFull(self):
"""Checks whether the uncompressed data cache is full.
Returns:
bool: True if the cache is full.
"""
return self._GetCacheSize() >= self._UNCOMPRESSED_DATA_CACHE_SIZE
def _LoadDataIntoCache(self, file_object, minimum_offset):
"""Reads and decompresses the data in the member.
This function already loads as much data as possible in the cache, up to
UNCOMPRESSED_DATA_CACHE_SIZE bytes.
Args:
file_object (FileIO): file-like object.
minimum_offset (int): offset into this member's uncompressed data at
which the cache should start.
"""
# Decompression can only be performed from beginning to end of the stream.
# So, if data before the current position of the decompressor in the stream
# is required, it's necessary to throw away the current decompression
# state and start again.
if minimum_offset < self._decompressor_state.uncompressed_offset:
self._ResetDecompressorState()
cache_is_full = self._IsCacheFull()
while not cache_is_full:
decompressed_data = self._decompressor_state.Read(file_object)
# Note that decompressed_data will be empty if there is no data left
# to read and decompress.
if not decompressed_data:
break
decompressed_data_length = len(decompressed_data)
decompressed_end_offset = self._decompressor_state.uncompressed_offset
decompressed_start_offset = (
decompressed_end_offset - decompressed_data_length)
data_to_add = decompressed_data
added_data_start_offset = decompressed_start_offset
if decompressed_start_offset < minimum_offset:
data_to_add = None
if decompressed_start_offset < minimum_offset < decompressed_end_offset:
data_add_offset = decompressed_end_offset - minimum_offset
data_to_add = decompressed_data[-data_add_offset:]
added_data_start_offset = decompressed_end_offset - data_add_offset
if data_to_add and not cache_is_full:
self._cache = b''.join([self._cache, data_to_add])
if self._cache_start_offset is None:
self._cache_start_offset = added_data_start_offset
if self._cache_end_offset is None:
self._cache_end_offset = self._cache_start_offset + len(data_to_add)
else:
self._cache_end_offset += len(data_to_add)
cache_is_full = self._IsCacheFull()
# If there's no more data in the member, the unused_data value is
# populated in the decompressor. When this situation arises, we rewind
# to the end of the compressed_data section.
unused_data = self._decompressor_state.GetUnusedData()
if unused_data:
seek_offset = -len(unused_data)
file_object.seek(seek_offset, os.SEEK_CUR)
self._ResetDecompressorState()
break
def _ReadMemberHeader(self, file_object):
"""Reads a member header.
Args:
file_object (FileIO): file-like object to read from.
Raises:
FileFormatError: if the member header cannot be read.
"""
file_offset = file_object.get_offset()
member_header = self._ReadStructure(
file_object, file_offset, self._MEMBER_HEADER_SIZE,
self._MEMBER_HEADER, 'member header')
if member_header.signature != self._GZIP_SIGNATURE:
raise errors.FileFormatError(
'Unsupported signature: 0x{0:04x}.'.format(member_header.signature))
if member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE:
raise errors.FileFormatError(
'Unsupported compression method: {0:d}.'.format(
member_header.compression_method))
self.modification_time = member_header.modification_time
self.operating_system = member_header.operating_system
if member_header.flags & self._FLAG_FEXTRA:
file_offset = file_object.get_offset()
extra_field_data_size = self._ReadStructure(
file_object, file_offset, self._UINT16LE_SIZE,
self._UINT16LE, 'extra field data size')
file_object.seek(extra_field_data_size, os.SEEK_CUR)
if member_header.flags & self._FLAG_FNAME:
file_offset = file_object.get_offset()
string_value = self._ReadString(
file_object, file_offset, self._CSTRING, 'original filename')
self.original_filename = string_value.rstrip('\x00')
if member_header.flags & self._FLAG_FCOMMENT:
file_offset = file_object.get_offset()
string_value = self._ReadString(
file_object, file_offset, self._CSTRING, 'comment')
self.comment = string_value.rstrip('\x00')
if member_header.flags & self._FLAG_FHCRC:
file_object.read(2)
def _ResetDecompressorState(self):
"""Resets the state of the internal decompression object."""
self._decompressor_state = _GzipDecompressorState(
self._compressed_data_start)
def FlushCache(self):
"""Empties the cache that holds cached decompressed data."""
self._cache = b''
self._cache_start_offset = None
self._cache_end_offset = None
self._ResetDecompressorState()
def ReadAtOffset(self, offset, size=None):
"""Reads a byte string from the gzip member at the specified offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
offset (int): offset within the uncompressed data in this member to
read from.
size (Optional[int]): maximum number of bytes to read, where None
represents all remaining data, to a maximum of the uncompressed
cache size.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
ValueError: if a negative read size or offset is specified.
"""
if size is not None and size < 0:
raise ValueError('Invalid size value {0!s}'.format(size))
if offset < 0:
raise ValueError('Invalid offset value {0!s}'.format(offset))
if size == 0 or offset >= self.uncompressed_data_size:
return b''
if self._cache_start_offset is None:
self._LoadDataIntoCache(self._file_object, offset)
if offset > self._cache_end_offset or offset < self._cache_start_offset:
self.FlushCache()
self._LoadDataIntoCache(self._file_object, offset)
cache_offset = offset - self._cache_start_offset
if not size:
return self._cache[cache_offset:]
data_end_offset = cache_offset + size
if data_end_offset > self._cache_end_offset:
return self._cache[cache_offset:]
return self._cache[cache_offset:data_end_offset]
class GzipCompressedStream(object):
"""File-like object of a gzip compressed stream (file).
The gzip file format is defined in RFC1952: http://www.zlib.org/rfc-gzip.html
Attributes:
uncompressed_data_size (int): total size of the decompressed data stored
in the gzip file.
"""
def __init__(self):
"""Initializes a file-like object."""
super(GzipCompressedStream, self).__init__()
self._compressed_data_size = -1
self._current_offset = 0
self._file_object = None
self._members_by_end_offset = collections.OrderedDict()
self.uncompressed_data_size = 0
@property
def members(self):
"""list(GzipMember): members in the gzip file."""
return list(self._members_by_end_offset.values())
def _GetMemberForOffset(self, offset):
"""Finds the member whose data includes the provided offset.
Args:
offset (int): offset in the uncompressed data to find the
containing member for.
Returns:
GzipMember: gzip file member or None if not available.
Raises:
ValueError: if the provided offset is outside of the bounds of the
uncompressed data.
"""
if offset < 0 or offset >= self.uncompressed_data_size:
raise ValueError('Offset {0:d} is larger than file size {1:d}.'.format(
offset, self.uncompressed_data_size))
for end_offset, member in self._members_by_end_offset.items():
if offset < end_offset:
return member
return None
def Open(self, file_object):
"""Opens the file-like object defined by path specification.
Args:
file_object (FileIO): file-like object that contains the gzip compressed
stream.
Raises:
IOError: if the file-like object could not be opened.
OSError: if the file-like object could not be opened.
"""
file_size = file_object.get_size()
file_object.seek(0, os.SEEK_SET)
uncompressed_data_offset = 0
next_member_offset = 0
while next_member_offset < file_size:
member = GzipMember(
file_object, next_member_offset, uncompressed_data_offset)
uncompressed_data_offset = (
uncompressed_data_offset + member.uncompressed_data_size)
self._members_by_end_offset[uncompressed_data_offset] = member
self.uncompressed_data_size += member.uncompressed_data_size
next_member_offset = member.member_end_offset
self._file_object = file_object
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
# pylint: disable=invalid-name
def close(self):
"""Closes the file-like object."""
self._members_by_end_offset = []
if self._file_object:
self._file_object = None
def read(self, size=None):
"""Reads a byte string from the gzip file at the current offset.
The function will read a byte string up to the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
data = b''
while ((size and len(data) < size) and
self._current_offset < self.uncompressed_data_size):
member = self._GetMemberForOffset(self._current_offset)
member_offset = self._current_offset - member.uncompressed_data_offset
data_read = member.ReadAtOffset(member_offset, size)
if not data_read:
break
self._current_offset += len(data_read)
data = b''.join([data, data_read])
return data
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks to an offset within the file-like object.
Args:
offset (int): offset to seek to.
whence (Optional(int)): value that indicates whether offset is an absolute
or relative position within the file.
Raises:
IOError: if the seek failed or the file has not been opened.
OSError: if the seek failed or the file has not been opened.
"""
if not self._file_object:
raise IOError('Not opened.')
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self.uncompressed_data_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
self._current_offset = offset
def get_offset(self):
"""Retrieves the current offset into the file-like object.
Returns:
int: current offset into the file-like object.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._file_object:
raise IOError('Not opened.')
return self._current_offset
def get_size(self):
"""Retrieves the size of the file-like object.
Returns:
int: size of the file-like object data.
Raises:
IOError: if the file-like object has not been opened.
OSError: if the file-like object has not been opened.
"""
if not self._file_object:
raise IOError('Not opened.')
return self.uncompressed_data_size
| 33.452261 | 80 | 0.714286 |
import collections
import os
from dtfabric.runtime import fabric as dtfabric_fabric
from dfvfs.compression import zlib_decompressor
from dfvfs.lib import data_format
from dfvfs.lib import errors
class _GzipDecompressorState(object):
_MAXIMUM_READ_SIZE = 16 * 1024 * 1024
def __init__(self, stream_start):
self._compressed_data = b''
self._decompressor = zlib_decompressor.DeflateDecompressor()
self._last_read = stream_start
self.uncompressed_offset = 0
def Read(self, file_object):
file_object.seek(self._last_read, os.SEEK_SET)
read_data = file_object.read(self._MAXIMUM_READ_SIZE)
self._last_read = file_object.get_offset()
compressed_data = b''.join([self._compressed_data, read_data])
decompressed_data, remaining_compressed_data = (
self._decompressor.Decompress(compressed_data))
self._compressed_data = remaining_compressed_data
self.uncompressed_offset += len(decompressed_data)
return decompressed_data
def GetUnusedData(self):
return self._decompressor.unused_data
class GzipMember(data_format.DataFormat):
_DATA_TYPE_FABRIC_DEFINITION_FILE = os.path.join(
os.path.dirname(__file__), 'gzipfile.yaml')
with open(_DATA_TYPE_FABRIC_DEFINITION_FILE, 'rb') as file_object:
_DATA_TYPE_FABRIC_DEFINITION = file_object.read()
_DATA_TYPE_FABRIC = dtfabric_fabric.DataTypeFabric(
yaml_definition=_DATA_TYPE_FABRIC_DEFINITION)
_MEMBER_HEADER = _DATA_TYPE_FABRIC.CreateDataTypeMap(
'gzip_member_header')
_MEMBER_HEADER_SIZE = _MEMBER_HEADER.GetByteSize()
_MEMBER_FOOTER = _DATA_TYPE_FABRIC.CreateDataTypeMap(
'gzip_member_footer')
_MEMBER_FOOTER_SIZE = _MEMBER_FOOTER.GetByteSize()
_UINT16LE = _DATA_TYPE_FABRIC.CreateDataTypeMap('uint16le')
_UINT16LE_SIZE = _UINT16LE.GetByteSize()
_CSTRING = _DATA_TYPE_FABRIC.CreateDataTypeMap('cstring')
_GZIP_SIGNATURE = 0x8b1f
_COMPRESSION_METHOD_DEFLATE = 8
_FLAG_FTEXT = 0x01
_FLAG_FHCRC = 0x02
_FLAG_FEXTRA = 0x04
_FLAG_FNAME = 0x08
_FLAG_FCOMMENT = 0x10
_UNCOMPRESSED_DATA_CACHE_SIZE = 2 * 1024 * 1024
def __init__(
self, file_object, member_start_offset, uncompressed_data_offset):
self._cache = b''
self._cache_end_offset = None
self._cache_start_offset = None
self.comment = None
self.modification_time = None
self.operating_system = None
self.original_filename = None
file_size = file_object.get_size()
file_object.seek(member_start_offset, os.SEEK_SET)
self._ReadMemberHeader(file_object)
data_offset = 0
uncompressed_data_size = 0
compressed_data_offset = file_object.get_offset()
decompressor_state = _GzipDecompressorState(compressed_data_offset)
file_offset = compressed_data_offset
while file_offset < file_size:
data_offset += uncompressed_data_size
decompressed_data = decompressor_state.Read(file_object)
uncompressed_data_size += len(decompressed_data)
unused_data = decompressor_state.GetUnusedData()
if unused_data:
file_object.seek(-len(unused_data), os.SEEK_CUR)
file_offset = file_object.get_offset()
break
file_offset = file_object.get_offset()
if file_offset < file_size:
self._ReadStructure(
file_object, file_offset, self._MEMBER_FOOTER_SIZE,
self._MEMBER_FOOTER, 'member footer')
member_end_offset = file_object.get_offset()
self._file_object = file_object
self._file_object.seek(member_start_offset, os.SEEK_SET)
if (data_offset == 0 and
uncompressed_data_size < self._UNCOMPRESSED_DATA_CACHE_SIZE):
self._cache = decompressed_data
self._cache_start_offset = 0
self._cache_end_offset = uncompressed_data_size
self._compressed_data_start = compressed_data_offset
self._decompressor_state = _GzipDecompressorState(compressed_data_offset)
self.member_start_offset = member_start_offset
self.member_end_offset = member_end_offset
self.uncompressed_data_size = uncompressed_data_size
self.uncompressed_data_offset = uncompressed_data_offset
def _GetCacheSize(self):
if None in (self._cache_start_offset, self._cache_end_offset):
return 0
return self._cache_end_offset - self._cache_start_offset
def _IsCacheFull(self):
return self._GetCacheSize() >= self._UNCOMPRESSED_DATA_CACHE_SIZE
def _LoadDataIntoCache(self, file_object, minimum_offset):
# Decompression can only be performed from beginning to end of the stream.
# So, if data before the current position of the decompressor in the stream
# is required, it's necessary to throw away the current decompression
if minimum_offset < self._decompressor_state.uncompressed_offset:
self._ResetDecompressorState()
cache_is_full = self._IsCacheFull()
while not cache_is_full:
decompressed_data = self._decompressor_state.Read(file_object)
if not decompressed_data:
break
decompressed_data_length = len(decompressed_data)
decompressed_end_offset = self._decompressor_state.uncompressed_offset
decompressed_start_offset = (
decompressed_end_offset - decompressed_data_length)
data_to_add = decompressed_data
added_data_start_offset = decompressed_start_offset
if decompressed_start_offset < minimum_offset:
data_to_add = None
if decompressed_start_offset < minimum_offset < decompressed_end_offset:
data_add_offset = decompressed_end_offset - minimum_offset
data_to_add = decompressed_data[-data_add_offset:]
added_data_start_offset = decompressed_end_offset - data_add_offset
if data_to_add and not cache_is_full:
self._cache = b''.join([self._cache, data_to_add])
if self._cache_start_offset is None:
self._cache_start_offset = added_data_start_offset
if self._cache_end_offset is None:
self._cache_end_offset = self._cache_start_offset + len(data_to_add)
else:
self._cache_end_offset += len(data_to_add)
cache_is_full = self._IsCacheFull()
# populated in the decompressor. When this situation arises, we rewind
# to the end of the compressed_data section.
unused_data = self._decompressor_state.GetUnusedData()
if unused_data:
seek_offset = -len(unused_data)
file_object.seek(seek_offset, os.SEEK_CUR)
self._ResetDecompressorState()
break
def _ReadMemberHeader(self, file_object):
file_offset = file_object.get_offset()
member_header = self._ReadStructure(
file_object, file_offset, self._MEMBER_HEADER_SIZE,
self._MEMBER_HEADER, 'member header')
if member_header.signature != self._GZIP_SIGNATURE:
raise errors.FileFormatError(
'Unsupported signature: 0x{0:04x}.'.format(member_header.signature))
if member_header.compression_method != self._COMPRESSION_METHOD_DEFLATE:
raise errors.FileFormatError(
'Unsupported compression method: {0:d}.'.format(
member_header.compression_method))
self.modification_time = member_header.modification_time
self.operating_system = member_header.operating_system
if member_header.flags & self._FLAG_FEXTRA:
file_offset = file_object.get_offset()
extra_field_data_size = self._ReadStructure(
file_object, file_offset, self._UINT16LE_SIZE,
self._UINT16LE, 'extra field data size')
file_object.seek(extra_field_data_size, os.SEEK_CUR)
if member_header.flags & self._FLAG_FNAME:
file_offset = file_object.get_offset()
string_value = self._ReadString(
file_object, file_offset, self._CSTRING, 'original filename')
self.original_filename = string_value.rstrip('\x00')
if member_header.flags & self._FLAG_FCOMMENT:
file_offset = file_object.get_offset()
string_value = self._ReadString(
file_object, file_offset, self._CSTRING, 'comment')
self.comment = string_value.rstrip('\x00')
if member_header.flags & self._FLAG_FHCRC:
file_object.read(2)
def _ResetDecompressorState(self):
self._decompressor_state = _GzipDecompressorState(
self._compressed_data_start)
def FlushCache(self):
self._cache = b''
self._cache_start_offset = None
self._cache_end_offset = None
self._ResetDecompressorState()
def ReadAtOffset(self, offset, size=None):
if size is not None and size < 0:
raise ValueError('Invalid size value {0!s}'.format(size))
if offset < 0:
raise ValueError('Invalid offset value {0!s}'.format(offset))
if size == 0 or offset >= self.uncompressed_data_size:
return b''
if self._cache_start_offset is None:
self._LoadDataIntoCache(self._file_object, offset)
if offset > self._cache_end_offset or offset < self._cache_start_offset:
self.FlushCache()
self._LoadDataIntoCache(self._file_object, offset)
cache_offset = offset - self._cache_start_offset
if not size:
return self._cache[cache_offset:]
data_end_offset = cache_offset + size
if data_end_offset > self._cache_end_offset:
return self._cache[cache_offset:]
return self._cache[cache_offset:data_end_offset]
class GzipCompressedStream(object):
def __init__(self):
super(GzipCompressedStream, self).__init__()
self._compressed_data_size = -1
self._current_offset = 0
self._file_object = None
self._members_by_end_offset = collections.OrderedDict()
self.uncompressed_data_size = 0
@property
def members(self):
return list(self._members_by_end_offset.values())
def _GetMemberForOffset(self, offset):
if offset < 0 or offset >= self.uncompressed_data_size:
raise ValueError('Offset {0:d} is larger than file size {1:d}.'.format(
offset, self.uncompressed_data_size))
for end_offset, member in self._members_by_end_offset.items():
if offset < end_offset:
return member
return None
def Open(self, file_object):
file_size = file_object.get_size()
file_object.seek(0, os.SEEK_SET)
uncompressed_data_offset = 0
next_member_offset = 0
while next_member_offset < file_size:
member = GzipMember(
file_object, next_member_offset, uncompressed_data_offset)
uncompressed_data_offset = (
uncompressed_data_offset + member.uncompressed_data_size)
self._members_by_end_offset[uncompressed_data_offset] = member
self.uncompressed_data_size += member.uncompressed_data_size
next_member_offset = member.member_end_offset
self._file_object = file_object
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
# pylint: disable=invalid-name
def close(self):
self._members_by_end_offset = []
if self._file_object:
self._file_object = None
def read(self, size=None):
data = b''
while ((size and len(data) < size) and
self._current_offset < self.uncompressed_data_size):
member = self._GetMemberForOffset(self._current_offset)
member_offset = self._current_offset - member.uncompressed_data_offset
data_read = member.ReadAtOffset(member_offset, size)
if not data_read:
break
self._current_offset += len(data_read)
data = b''.join([data, data_read])
return data
def seek(self, offset, whence=os.SEEK_SET):
if not self._file_object:
raise IOError('Not opened.')
if whence == os.SEEK_CUR:
offset += self._current_offset
elif whence == os.SEEK_END:
offset += self.uncompressed_data_size
elif whence != os.SEEK_SET:
raise IOError('Unsupported whence.')
if offset < 0:
raise IOError('Invalid offset value less than zero.')
self._current_offset = offset
def get_offset(self):
if not self._file_object:
raise IOError('Not opened.')
return self._current_offset
def get_size(self):
if not self._file_object:
raise IOError('Not opened.')
return self.uncompressed_data_size
| true | true |
f72b027333bbe2d8bc09150e018d4e2a3f9db7df | 11,472 | py | Python | vspk/v4_0/nustaticroute.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nustaticroute.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | vspk/v4_0/nustaticroute.py | mohaimenhasan/vspk-python | 4c7b297427048340b250cc3c74d9214dc0d4bde1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUStaticRoute(NURESTObject):
""" Represents a StaticRoute in the VSD
Notes:
Static routes allow end users to define how traffic is routed through the dVRS in addition to the routes learned by VSC through VM activation. By using static routes, end users can define for example that all traffic with a destination address towards a specific subnet must be forwarded to a specific VM attached in the dVRS and this VM could be a firewall
"""
__rest_name__ = "staticroute"
__resource_name__ = "staticroutes"
## Constants
CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_TYPE_OVERLAY = "OVERLAY"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_TYPE_EXIT_DOMAIN = "EXIT_DOMAIN"
CONST_IP_TYPE_DUALSTACK = "DUALSTACK"
def __init__(self, **kwargs):
""" Initializes a StaticRoute instance
Notes:
You can specify all parameters while calling this methods.
A special argument named `data` will enable you to load the
object from a Python dictionary
Examples:
>>> staticroute = NUStaticRoute(id=u'xxxx-xxx-xxx-xxx', name=u'StaticRoute')
>>> staticroute = NUStaticRoute(data=my_dict)
"""
super(NUStaticRoute, self).__init__()
# Read/Write Attributes
self._ip_type = None
self._ipv6_address = None
self._last_updated_by = None
self._address = None
self._netmask = None
self._next_hop_ip = None
self._entity_scope = None
self._route_distinguisher = None
self._external_id = None
self._type = None
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4', u'IPV6'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="next_hop_ip", remote_name="nextHopIp", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="route_distinguisher", remote_name="routeDistinguisher", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False, choices=[u'EXIT_DOMAIN', u'OVERLAY'])
# Fetchers
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
# Properties
@property
def ip_type(self):
""" Get ip_type value.
Notes:
IPv4 or IPv6
This attribute is named `IPType` in VSD API.
"""
return self._ip_type
@ip_type.setter
def ip_type(self, value):
""" Set ip_type value.
Notes:
IPv4 or IPv6
This attribute is named `IPType` in VSD API.
"""
self._ip_type = value
@property
def ipv6_address(self):
""" Get ipv6_address value.
Notes:
IPv6 address of the route
This attribute is named `IPv6Address` in VSD API.
"""
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
""" Set ipv6_address value.
Notes:
IPv6 address of the route
This attribute is named `IPv6Address` in VSD API.
"""
self._ipv6_address = value
@property
def last_updated_by(self):
""" Get last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
""" Set last_updated_by value.
Notes:
ID of the user who last updated the object.
This attribute is named `lastUpdatedBy` in VSD API.
"""
self._last_updated_by = value
@property
def address(self):
""" Get address value.
Notes:
IP address of the route
"""
return self._address
@address.setter
def address(self, value):
""" Set address value.
Notes:
IP address of the route
"""
self._address = value
@property
def netmask(self):
""" Get netmask value.
Notes:
Netmask associated with the route
"""
return self._netmask
@netmask.setter
def netmask(self, value):
""" Set netmask value.
Notes:
Netmask associated with the route
"""
self._netmask = value
@property
def next_hop_ip(self):
""" Get next_hop_ip value.
Notes:
IP address of the next hop. This must be a VM attached to the dVRS
This attribute is named `nextHopIp` in VSD API.
"""
return self._next_hop_ip
@next_hop_ip.setter
def next_hop_ip(self, value):
""" Set next_hop_ip value.
Notes:
IP address of the next hop. This must be a VM attached to the dVRS
This attribute is named `nextHopIp` in VSD API.
"""
self._next_hop_ip = value
@property
def entity_scope(self):
""" Get entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
""" Set entity_scope value.
Notes:
Specify if scope of entity is Data center or Enterprise level
This attribute is named `entityScope` in VSD API.
"""
self._entity_scope = value
@property
def route_distinguisher(self):
""" Get route_distinguisher value.
Notes:
Route distinguisher associated with the nexthop. System generates this identifier automatically
This attribute is named `routeDistinguisher` in VSD API.
"""
return self._route_distinguisher
@route_distinguisher.setter
def route_distinguisher(self, value):
""" Set route_distinguisher value.
Notes:
Route distinguisher associated with the nexthop. System generates this identifier automatically
This attribute is named `routeDistinguisher` in VSD API.
"""
self._route_distinguisher = value
@property
def external_id(self):
""" Get external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
return self._external_id
@external_id.setter
def external_id(self, value):
""" Set external_id value.
Notes:
External object ID. Used for integration with third party systems
This attribute is named `externalID` in VSD API.
"""
self._external_id = value
@property
def type(self):
""" Get type value.
Notes:
Type flag for static-route provisioning for exit-domain (break-to-underlay) prefixes.
"""
return self._type
@type.setter
def type(self, value):
""" Set type value.
Notes:
Type flag for static-route provisioning for exit-domain (break-to-underlay) prefixes.
"""
self._type = value
| 29.720207 | 369 | 0.602772 |
from .fetchers import NUMetadatasFetcher
from .fetchers import NUGlobalMetadatasFetcher
from .fetchers import NUEventLogsFetcher
from bambou import NURESTObject
class NUStaticRoute(NURESTObject):
__rest_name__ = "staticroute"
__resource_name__ = "staticroutes"
ONST_ENTITY_SCOPE_GLOBAL = "GLOBAL"
CONST_TYPE_OVERLAY = "OVERLAY"
CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE"
CONST_IP_TYPE_IPV6 = "IPV6"
CONST_IP_TYPE_IPV4 = "IPV4"
CONST_TYPE_EXIT_DOMAIN = "EXIT_DOMAIN"
CONST_IP_TYPE_DUALSTACK = "DUALSTACK"
def __init__(self, **kwargs):
super(NUStaticRoute, self).__init__()
self._ip_type = None
self._ipv6_address = None
self._last_updated_by = None
self._address = None
self._netmask = None
self._next_hop_ip = None
self._entity_scope = None
self._route_distinguisher = None
self._external_id = None
self._type = None
self.expose_attribute(local_name="ip_type", remote_name="IPType", attribute_type=str, is_required=False, is_unique=False, choices=[u'DUALSTACK', u'IPV4', u'IPV6'])
self.expose_attribute(local_name="ipv6_address", remote_name="IPv6Address", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="address", remote_name="address", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="netmask", remote_name="netmask", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="next_hop_ip", remote_name="nextHopIp", attribute_type=str, is_required=True, is_unique=False)
self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL'])
self.expose_attribute(local_name="route_distinguisher", remote_name="routeDistinguisher", attribute_type=str, is_required=False, is_unique=False)
self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True)
self.expose_attribute(local_name="type", remote_name="type", attribute_type=str, is_required=False, is_unique=False, choices=[u'EXIT_DOMAIN', u'OVERLAY'])
self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child")
self.event_logs = NUEventLogsFetcher.fetcher_with_object(parent_object=self, relationship="child")
self._compute_args(**kwargs)
@property
def ip_type(self):
return self._ip_type
@ip_type.setter
def ip_type(self, value):
self._ip_type = value
@property
def ipv6_address(self):
return self._ipv6_address
@ipv6_address.setter
def ipv6_address(self, value):
self._ipv6_address = value
@property
def last_updated_by(self):
return self._last_updated_by
@last_updated_by.setter
def last_updated_by(self, value):
self._last_updated_by = value
@property
def address(self):
return self._address
@address.setter
def address(self, value):
self._address = value
@property
def netmask(self):
return self._netmask
@netmask.setter
def netmask(self, value):
self._netmask = value
@property
def next_hop_ip(self):
return self._next_hop_ip
@next_hop_ip.setter
def next_hop_ip(self, value):
self._next_hop_ip = value
@property
def entity_scope(self):
return self._entity_scope
@entity_scope.setter
def entity_scope(self, value):
self._entity_scope = value
@property
def route_distinguisher(self):
return self._route_distinguisher
@route_distinguisher.setter
def route_distinguisher(self, value):
self._route_distinguisher = value
@property
def external_id(self):
return self._external_id
@external_id.setter
def external_id(self, value):
self._external_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
| true | true |
f72b045654dc44f3155f6d877133a3202b759449 | 5,054 | py | Python | python-lib/dku_error_analysis_mpp/dku_error_visualizer.py | dataiku/dss-plugin-model-error-analysis | 4c0f42a5c0aa1710005db3d81ca9bd9d7f829e6b | [
"Apache-2.0"
] | null | null | null | python-lib/dku_error_analysis_mpp/dku_error_visualizer.py | dataiku/dss-plugin-model-error-analysis | 4c0f42a5c0aa1710005db3d81ca9bd9d7f829e6b | [
"Apache-2.0"
] | 2 | 2021-09-29T15:08:25.000Z | 2022-01-13T11:20:58.000Z | python-lib/dku_error_analysis_mpp/dku_error_visualizer.py | dataiku/dss-plugin-model-error-analysis | 4c0f42a5c0aa1710005db3d81ca9bd9d7f829e6b | [
"Apache-2.0"
] | 1 | 2021-09-10T12:25:08.000Z | 2021-09-10T12:25:08.000Z | # -*- coding: utf-8 -*-
import numpy as np
from graphviz import Source
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from dku_error_analysis_mpp.dku_error_analyzer import DkuErrorAnalyzer
from mealy import _BaseErrorVisualizer, ErrorAnalyzerConstants
from dku_error_analysis_utils import safe_str, format_float
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='Error Analysis Plugin | %(levelname)s - %(message)s')
plt.rc('font', family="sans-serif")
SMALL_SIZE, MEDIUM_SIZE, BIGGER_SIZE = 8, 10, 12
plt.rc('axes', titlesize=BIGGER_SIZE, labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
plt.rc("hatch", color="white", linewidth=4)
class DkuErrorVisualizer(_BaseErrorVisualizer):
"""
ErrorVisualizer provides visual utilities to analyze the error classifier in ErrorAnalyzer and DkuErrorAnalyzer.
"""
def __init__(self, error_analyzer):
if not isinstance(error_analyzer, DkuErrorAnalyzer):
raise TypeError('You need to input a DkuErrorAnalyzer object.')
super(DkuErrorVisualizer, self).__init__(error_analyzer)
self._tree = error_analyzer.tree
def plot_error_tree(self, size=(50, 50)):
""" Plot the graph of the decision tree
Args:
size (tuple): Size of the output plot as (width, length), in inches.
"""
return Source(self._tree.to_dot_string(size))
def plot_feature_distributions_on_leaves(self, leaf_selector=None, top_k_features=ErrorAnalyzerConstants.TOP_K_FEATURES,
show_global=True, show_class=False, rank_leaves_by="total_error_fraction", nr_bins=10, figsize=(15, 10)):
""" Return plot of error node feature distribution and compare to global baseline """
leaf_nodes = self._get_ranked_leaf_ids(leaf_selector, rank_leaves_by)
ranked_features = self._tree.ranked_features[:top_k_features]
nr_leaves, nr_features = len(leaf_nodes), len(ranked_features)
logger.info("{} lea{} selected: {}".format(nr_leaves,
"f" if nr_leaves == 1 else "ves",
leaf_nodes))
logger.info("{} feature distribution{} plotted: {}".format(nr_features,
"" if nr_features == 1 else "s",
[f["name"] for f in ranked_features]))
for leaf_id in leaf_nodes:
leaf = self._tree.get_node(leaf_id)
suptitle = 'Leaf {} ({}: {}'.format(leaf.id, leaf.probabilities[0][0], format_float(leaf.probabilities[0][1], 3))
suptitle += ', {}: {})'.format(leaf.probabilities[1][0], format_float(leaf.probabilities[1][1], 3))
for feature in ranked_features:
feature_name = feature["name"]
leaf_stats = self._tree.get_stats(leaf.id, feature_name, nr_bins)
feature_is_numerical = feature["numerical"]
bins = leaf_stats["bin_edge"] if feature_is_numerical else leaf_stats["bin_value"]
if show_global:
root_samples = self._tree.get_node(0).samples[0]
root_stats = self._tree.get_stats(0, feature_name, nr_bins, bins) # TODO: optimize
if show_class:
root_hist_data = {}
for class_value, bar_heights in root_stats["target_distrib"].items():
root_hist_data[class_value] = np.array(bar_heights)/root_samples
else:
root_hist_data, root_prediction = {}, self._tree.get_node(0).prediction
root_hist_data[root_prediction] = np.array(root_stats["count"])/root_samples
else:
root_hist_data = None
if bins:
leaf_hist_data = {}
if show_class:
for class_value, bar_heights in leaf_stats["target_distrib"].items():
leaf_hist_data[class_value] = np.array(bar_heights)/leaf.samples[0]
else:
leaf_hist_data = {leaf.prediction: np.array(leaf_stats["count"])/leaf.samples[0]}
else:
leaf_hist_data = None
logger.info("No values for the feature {} at the leaf {}".format(feature_name, leaf.id))
if show_global:
bins = root_stats["bin_edge"] if feature_is_numerical else root_stats["bin_value"]
x_ticks = range(len(bins))
_BaseErrorVisualizer._add_new_plot(figsize, bins, x_ticks, feature_name, suptitle)
_BaseErrorVisualizer._plot_feature_distribution(x_ticks, feature_is_numerical, leaf_hist_data, root_hist_data)
plt.show()
| 49.54902 | 149 | 0.609616 |
import numpy as np
from graphviz import Source
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
from dku_error_analysis_mpp.dku_error_analyzer import DkuErrorAnalyzer
from mealy import _BaseErrorVisualizer, ErrorAnalyzerConstants
from dku_error_analysis_utils import safe_str, format_float
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO, format='Error Analysis Plugin | %(levelname)s - %(message)s')
plt.rc('font', family="sans-serif")
SMALL_SIZE, MEDIUM_SIZE, BIGGER_SIZE = 8, 10, 12
plt.rc('axes', titlesize=BIGGER_SIZE, labelsize=MEDIUM_SIZE)
plt.rc('xtick', labelsize=SMALL_SIZE)
plt.rc('ytick', labelsize=SMALL_SIZE)
plt.rc('legend', fontsize=SMALL_SIZE)
plt.rc("hatch", color="white", linewidth=4)
class DkuErrorVisualizer(_BaseErrorVisualizer):
def __init__(self, error_analyzer):
if not isinstance(error_analyzer, DkuErrorAnalyzer):
raise TypeError('You need to input a DkuErrorAnalyzer object.')
super(DkuErrorVisualizer, self).__init__(error_analyzer)
self._tree = error_analyzer.tree
def plot_error_tree(self, size=(50, 50)):
return Source(self._tree.to_dot_string(size))
def plot_feature_distributions_on_leaves(self, leaf_selector=None, top_k_features=ErrorAnalyzerConstants.TOP_K_FEATURES,
show_global=True, show_class=False, rank_leaves_by="total_error_fraction", nr_bins=10, figsize=(15, 10)):
leaf_nodes = self._get_ranked_leaf_ids(leaf_selector, rank_leaves_by)
ranked_features = self._tree.ranked_features[:top_k_features]
nr_leaves, nr_features = len(leaf_nodes), len(ranked_features)
logger.info("{} lea{} selected: {}".format(nr_leaves,
"f" if nr_leaves == 1 else "ves",
leaf_nodes))
logger.info("{} feature distribution{} plotted: {}".format(nr_features,
"" if nr_features == 1 else "s",
[f["name"] for f in ranked_features]))
for leaf_id in leaf_nodes:
leaf = self._tree.get_node(leaf_id)
suptitle = 'Leaf {} ({}: {}'.format(leaf.id, leaf.probabilities[0][0], format_float(leaf.probabilities[0][1], 3))
suptitle += ', {}: {})'.format(leaf.probabilities[1][0], format_float(leaf.probabilities[1][1], 3))
for feature in ranked_features:
feature_name = feature["name"]
leaf_stats = self._tree.get_stats(leaf.id, feature_name, nr_bins)
feature_is_numerical = feature["numerical"]
bins = leaf_stats["bin_edge"] if feature_is_numerical else leaf_stats["bin_value"]
if show_global:
root_samples = self._tree.get_node(0).samples[0]
root_stats = self._tree.get_stats(0, feature_name, nr_bins, bins)
if show_class:
root_hist_data = {}
for class_value, bar_heights in root_stats["target_distrib"].items():
root_hist_data[class_value] = np.array(bar_heights)/root_samples
else:
root_hist_data, root_prediction = {}, self._tree.get_node(0).prediction
root_hist_data[root_prediction] = np.array(root_stats["count"])/root_samples
else:
root_hist_data = None
if bins:
leaf_hist_data = {}
if show_class:
for class_value, bar_heights in leaf_stats["target_distrib"].items():
leaf_hist_data[class_value] = np.array(bar_heights)/leaf.samples[0]
else:
leaf_hist_data = {leaf.prediction: np.array(leaf_stats["count"])/leaf.samples[0]}
else:
leaf_hist_data = None
logger.info("No values for the feature {} at the leaf {}".format(feature_name, leaf.id))
if show_global:
bins = root_stats["bin_edge"] if feature_is_numerical else root_stats["bin_value"]
x_ticks = range(len(bins))
_BaseErrorVisualizer._add_new_plot(figsize, bins, x_ticks, feature_name, suptitle)
_BaseErrorVisualizer._plot_feature_distribution(x_ticks, feature_is_numerical, leaf_hist_data, root_hist_data)
plt.show()
| true | true |
f72b0480495825ee249d8a39b4e17d79b9ad98f0 | 1,812 | py | Python | scan_meta.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | scan_meta.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | scan_meta.py | wangzishuo111/bk_zhangdan | 30be7d92c53de4f18d90c00aba1ee73073f47029 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import base64
import httplib
import json
from config import *
from base.log import *
import happybase
from base.timer import Timer
from base import util
__thrift_host = GET_CONF('hbase_thrift', 'host')
__thrift_port = int(GET_CONF('hbase_thrift', 'port'))
thrift_conn = None
def get_thrift_conn():
global thrift_conn
if not thrift_conn:
thrift_conn = happybase.Connection(__thrift_host, __thrift_port)
return thrift_conn
def thrift_reconn():
global thrift_conn
thrift_conn.close()
thrift_conn = happybase.Connection(__thrift_host, __thrift_port)
def _get(table, row, column_family, column):
conn = get_thrift_conn()
table = conn.table(table)
if column_family and column:
columns = []
columns.append('%s:%s' % (column_family, column))
return table.row(row, columns = columns)
elif column_family:
columns = []
columns.append('%s' % (column_family))
return table.row(row, columns = columns)
else:
return table.row(row)
def get(table, row, column_family = None, column = None):
try:
ret = _get(table, row, column_family, column)
except:
thrift_reconn()
ret = _get(table, row, column_family, column)
return ret
def get_col(table, row, column_family, column):
return get(table, row, column_family, column)
def main(task_id):
conn = get_thrift_conn()
table = conn.table('file_meta_prd_v1')
count = 0
for i in range(10):
start_row = str(i) + '-' + task_id
stop_row = str(i) + '-' + task_id + '~'
for row_data in table.scan(row_start = start_row, row_stop = stop_row, batch_size = 1):
rowkey = row_data[0]
if rowkey.endswith('09_000_png'):
print rowkey
if rowkey.endswith('006_webp'):
print rowkey
print 'total count:', count
if __name__ == '__main__':
task_id = sys.argv[1]
main(task_id);
| 24.16 | 89 | 0.712472 |
import base64
import httplib
import json
from config import *
from base.log import *
import happybase
from base.timer import Timer
from base import util
__thrift_host = GET_CONF('hbase_thrift', 'host')
__thrift_port = int(GET_CONF('hbase_thrift', 'port'))
thrift_conn = None
def get_thrift_conn():
global thrift_conn
if not thrift_conn:
thrift_conn = happybase.Connection(__thrift_host, __thrift_port)
return thrift_conn
def thrift_reconn():
global thrift_conn
thrift_conn.close()
thrift_conn = happybase.Connection(__thrift_host, __thrift_port)
def _get(table, row, column_family, column):
conn = get_thrift_conn()
table = conn.table(table)
if column_family and column:
columns = []
columns.append('%s:%s' % (column_family, column))
return table.row(row, columns = columns)
elif column_family:
columns = []
columns.append('%s' % (column_family))
return table.row(row, columns = columns)
else:
return table.row(row)
def get(table, row, column_family = None, column = None):
try:
ret = _get(table, row, column_family, column)
except:
thrift_reconn()
ret = _get(table, row, column_family, column)
return ret
def get_col(table, row, column_family, column):
return get(table, row, column_family, column)
def main(task_id):
conn = get_thrift_conn()
table = conn.table('file_meta_prd_v1')
count = 0
for i in range(10):
start_row = str(i) + '-' + task_id
stop_row = str(i) + '-' + task_id + '~'
for row_data in table.scan(row_start = start_row, row_stop = stop_row, batch_size = 1):
rowkey = row_data[0]
if rowkey.endswith('09_000_png'):
print rowkey
if rowkey.endswith('006_webp'):
print rowkey
print 'total count:', count
if __name__ == '__main__':
task_id = sys.argv[1]
main(task_id);
| false | true |
f72b04ab534d3991395505fbd9524526beed8f88 | 5,288 | py | Python | seahub/api2/endpoints/draft_reviewer.py | odontomachus/seahub | 5b6f2153921da21a473d9ff20ce443d40efc93ab | [
"Apache-2.0"
] | null | null | null | seahub/api2/endpoints/draft_reviewer.py | odontomachus/seahub | 5b6f2153921da21a473d9ff20ce443d40efc93ab | [
"Apache-2.0"
] | 6 | 2019-12-13T09:55:45.000Z | 2022-03-11T23:47:29.000Z | seahub/api2/endpoints/draft_reviewer.py | odontomachus/seahub | 5b6f2153921da21a473d9ff20ce443d40efc93ab | [
"Apache-2.0"
] | 1 | 2019-05-16T06:58:16.000Z | 2019-05-16T06:58:16.000Z | # Copyright (c) 2012-2016 Seafile Ltd.
import posixpath
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import ugettext as _
from seaserv import seafile_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error, user_to_dict
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.base.accounts import User
from seahub.tags.models import FileUUIDMap
from seahub.views import check_folder_permission
from seahub.utils import is_valid_username
from seahub.drafts.models import Draft, DraftReviewer
from seahub.drafts.signals import request_reviewer_successful
class DraftReviewerView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, pk, format=None):
try:
d = Draft.objects.get(pk=pk)
except Draft.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND,
'Draft %s not found' % pk)
# format user result
try:
avatar_size = int(request.GET.get('avatar_size', 32))
except ValueError:
avatar_size = 32
# get reviewer list
reviewers = []
for x in d.draftreviewer_set.all():
reviewer = user_to_dict(x.reviewer, request=request, avatar_size=avatar_size)
reviewers.append(reviewer)
return Response({'reviewers': reviewers})
def post(self, request, pk, format=None):
"""add draft reviewer
"""
try:
d = Draft.objects.get(pk=pk)
except Draft.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND,
'Draft %s not found' % pk)
result = {}
result['failed'] = []
result['success'] = []
reviewers = request.data.getlist('reviewer')
for reviewer in reviewers:
if not is_valid_username(reviewer):
result['failed'].append({
'email': reviewer,
'error_msg': _(u'username invalid.')
})
continue
try:
User.objects.get(email=reviewer)
except User.DoesNotExist:
result['failed'].append({
'email': reviewer,
'error_msg': _(u'User %s not found.') % reviewer
})
continue
# can't share to owner
if reviewer == d.username:
error_msg = 'Draft can not be asked owner to review.'
result['failed'].append({
'email': reviewer,
'error_msg': error_msg
})
continue
uuid = FileUUIDMap.objects.get_fileuuidmap_by_uuid(d.origin_file_uuid)
origin_file_path = posixpath.join(uuid.parent_path, uuid.filename)
# check perm
if seafile_api.check_permission_by_path(d.origin_repo_id, origin_file_path, reviewer) != 'rw':
error_msg = _(u'Permission denied.')
result['failed'].append({
'email': reviewer,
'error_msg': error_msg
})
continue
if DraftReviewer.objects.filter(draft=d, reviewer=reviewer):
error_msg = u'Reviewer %s has existed.' % reviewer
result['failed'].append({
'email': reviewer,
'error_msg': error_msg
})
continue
result['success'].append({
"user_info": {
"name": reviewer,
"nickname": email2nickname(reviewer)
}
})
DraftReviewer.objects.add(reviewer, d)
request_reviewer_successful.send(sender=None, from_user=request.user.username,
to_user=reviewer, draft_id=d.id)
return Response(result)
def delete(self, request, pk):
"""Delete a reviewer
"""
try:
d = Draft.objects.get(pk=pk)
except Draft.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND,
'Draft %s not found' % pk)
perm = check_folder_permission(request, d.origin_repo_id, '/')
if perm is None:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
reviewer = request.GET.get('username')
if reviewer is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Email %s invalid.' % reviewer)
try:
reviewer = DraftReviewer.objects.get(reviewer=reviewer, draft=d)
except DraftReviewer.DoesNotExist:
return Response(status.HTTP_200_OK)
reviewer.delete()
return Response(status.HTTP_200_OK)
| 34.562092 | 106 | 0.587368 |
import posixpath
from rest_framework import status
from rest_framework.authentication import SessionAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from django.utils.translation import ugettext as _
from seaserv import seafile_api
from seahub.api2.authentication import TokenAuthentication
from seahub.api2.throttling import UserRateThrottle
from seahub.api2.utils import api_error, user_to_dict
from seahub.base.templatetags.seahub_tags import email2nickname
from seahub.base.accounts import User
from seahub.tags.models import FileUUIDMap
from seahub.views import check_folder_permission
from seahub.utils import is_valid_username
from seahub.drafts.models import Draft, DraftReviewer
from seahub.drafts.signals import request_reviewer_successful
class DraftReviewerView(APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated, )
throttle_classes = (UserRateThrottle, )
def get(self, request, pk, format=None):
try:
d = Draft.objects.get(pk=pk)
except Draft.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND,
'Draft %s not found' % pk)
try:
avatar_size = int(request.GET.get('avatar_size', 32))
except ValueError:
avatar_size = 32
reviewers = []
for x in d.draftreviewer_set.all():
reviewer = user_to_dict(x.reviewer, request=request, avatar_size=avatar_size)
reviewers.append(reviewer)
return Response({'reviewers': reviewers})
def post(self, request, pk, format=None):
try:
d = Draft.objects.get(pk=pk)
except Draft.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND,
'Draft %s not found' % pk)
result = {}
result['failed'] = []
result['success'] = []
reviewers = request.data.getlist('reviewer')
for reviewer in reviewers:
if not is_valid_username(reviewer):
result['failed'].append({
'email': reviewer,
'error_msg': _(u'username invalid.')
})
continue
try:
User.objects.get(email=reviewer)
except User.DoesNotExist:
result['failed'].append({
'email': reviewer,
'error_msg': _(u'User %s not found.') % reviewer
})
continue
if reviewer == d.username:
error_msg = 'Draft can not be asked owner to review.'
result['failed'].append({
'email': reviewer,
'error_msg': error_msg
})
continue
uuid = FileUUIDMap.objects.get_fileuuidmap_by_uuid(d.origin_file_uuid)
origin_file_path = posixpath.join(uuid.parent_path, uuid.filename)
# check perm
if seafile_api.check_permission_by_path(d.origin_repo_id, origin_file_path, reviewer) != 'rw':
error_msg = _(u'Permission denied.')
result['failed'].append({
'email': reviewer,
'error_msg': error_msg
})
continue
if DraftReviewer.objects.filter(draft=d, reviewer=reviewer):
error_msg = u'Reviewer %s has existed.' % reviewer
result['failed'].append({
'email': reviewer,
'error_msg': error_msg
})
continue
result['success'].append({
"user_info": {
"name": reviewer,
"nickname": email2nickname(reviewer)
}
})
DraftReviewer.objects.add(reviewer, d)
request_reviewer_successful.send(sender=None, from_user=request.user.username,
to_user=reviewer, draft_id=d.id)
return Response(result)
def delete(self, request, pk):
try:
d = Draft.objects.get(pk=pk)
except Draft.DoesNotExist:
return api_error(status.HTTP_404_NOT_FOUND,
'Draft %s not found' % pk)
perm = check_folder_permission(request, d.origin_repo_id, '/')
if perm is None:
error_msg = 'Permission denied.'
return api_error(status.HTTP_403_FORBIDDEN, error_msg)
reviewer = request.GET.get('username')
if reviewer is None:
return api_error(status.HTTP_400_BAD_REQUEST, 'Email %s invalid.' % reviewer)
try:
reviewer = DraftReviewer.objects.get(reviewer=reviewer, draft=d)
except DraftReviewer.DoesNotExist:
return Response(status.HTTP_200_OK)
reviewer.delete()
return Response(status.HTTP_200_OK)
| true | true |
f72b04c22d26af35d88e3f843c7d2b7c9e606c26 | 120 | py | Python | module_2/lab2_1_1_7.py | dzooli/pcep_prepare | ddf34991a2d6ef2cfe3bda706ec333e9caa2aea5 | [
"MIT"
] | null | null | null | module_2/lab2_1_1_7.py | dzooli/pcep_prepare | ddf34991a2d6ef2cfe3bda706ec333e9caa2aea5 | [
"MIT"
] | null | null | null | module_2/lab2_1_1_7.py | dzooli/pcep_prepare | ddf34991a2d6ef2cfe3bda706ec333e9caa2aea5 | [
"MIT"
] | null | null | null | print("Hello, Python!")
print("Zoltan")
#print(Zoltan)
#print "Zoltan"
print('Zoltan')
print('''
Alma
on the
tree
'''
)
| 10 | 23 | 0.65 | print("Hello, Python!")
print("Zoltan")
print('Zoltan')
print('''
Alma
on the
tree
'''
)
| true | true |
f72b058123386b2f12effdfae7010abf516ca956 | 13,314 | py | Python | Lib/json/__init__.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | null | null | null | Lib/json/__init__.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | null | null | null | Lib/json/__init__.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | null | null | null | r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`json` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is derived from a
version of the externally maintained simplejson library.
Encoding basic Python object hierarchies::
>>> import json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print(json.dumps("\"foo\bar"))
"\"foo\bar"
>>> print(json.dumps('\u1234'))
"\u1234"
>>> print(json.dumps('\\'))
"\\"
>>> print(json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True))
{"a": 0, "b": 0, "c": 0}
>>> from io import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import json
>>> from collections import OrderedDict
>>> mydict = OrderedDict([('4', 5), ('6', 7)])
>>> json.dumps([1,2,3,mydict], separators=(',', ':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import json
>>> print(json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4))
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import json
>>> obj = ['foo', {'bar': ['baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == '"foo\x08ar'
True
>>> from io import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using json.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m json.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m json.tool
Expecting property name enclosed in double quotes: line 1 column 3 (char 2)
"""
__version__ = '2.0.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from .decoder import JSONDecoder, JSONDecodeError
from .encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
default=None, sort_keys=False, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the strings written to ``fp`` can
contain non-ASCII characters if they appear in strings contained in
``obj``. Otherwise, all such characters are escaped in JSON strings.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If specified, ``separators`` should be an ``(item_separator, key_separator)``
tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and
``(',', ': ')`` otherwise. To get the most compact JSON representation,
you should specify ``(',', ':')`` to eliminate whitespace.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is true (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
default is None and not sort_keys and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators,
default=default, sort_keys=sort_keys, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
default=None, sort_keys=False, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``int``, ``float``, ``bool``, ``None``) will be skipped
instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value can contain non-ASCII
characters if they appear in strings contained in ``obj``. Otherwise, all
such characters are escaped in JSON strings.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a non-negative integer, then JSON array elements and
object members will be pretty-printed with that indent level. An indent
level of 0 will only insert newlines. ``None`` is the most compact
representation.
If specified, ``separators`` should be an ``(item_separator, key_separator)``
tuple. The default is ``(', ', ': ')`` if *indent* is ``None`` and
``(',', ': ')`` otherwise. To get the most compact JSON representation,
you should specify ``(',', ':')`` to eliminate whitespace.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *sort_keys* is true (default: ``False``), then the output of
dictionaries will be sorted by key.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg; otherwise ``JSONEncoder`` is used.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
default is None and not sort_keys and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, default=default, sort_keys=sort_keys,
**kw).encode(obj)
_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None)
def load(fp, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
"""
return loads(fp.read(),
cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
"""Deserialize ``s`` (a ``str`` instance containing a JSON
document) to a Python object.
``object_hook`` is an optional function that will be called with the
result of any object literal decode (a ``dict``). The return value of
``object_hook`` will be used instead of the ``dict``. This feature
can be used to implement custom decoders (e.g. JSON-RPC class hinting).
``object_pairs_hook`` is an optional function that will be called with the
result of any object literal decoded with an ordered list of pairs. The
return value of ``object_pairs_hook`` will be used instead of the ``dict``.
This feature can be used to implement custom decoders that rely on the
order that the key and value pairs are decoded (for example,
collections.OrderedDict will remember the order of insertion). If
``object_hook`` is also defined, the ``object_pairs_hook`` takes priority.
``parse_float``, if specified, will be called with the string
of every JSON float to be decoded. By default this is equivalent to
float(num_str). This can be used to use another datatype or parser
for JSON floats (e.g. decimal.Decimal).
``parse_int``, if specified, will be called with the string
of every JSON int to be decoded. By default this is equivalent to
int(num_str). This can be used to use another datatype or parser
for JSON integers (e.g. float).
``parse_constant``, if specified, will be called with one of the
following strings: -Infinity, Infinity, NaN, null, true, false.
This can be used to raise an exception if invalid JSON numbers
are encountered.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg; otherwise ``JSONDecoder`` is used.
The ``encoding`` argument is ignored and deprecated.
"""
if not isinstance(s, str):
raise TypeError('the JSON object must be str, not {!r}'.format(
s.__class__.__name__))
if s.startswith(u'\ufeff'):
raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)",
s, 0)
if (cls is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(**kw).decode(s)
| 39.981982 | 81 | 0.653372 | __version__ = '2.0.9'
__all__ = [
'dump', 'dumps', 'load', 'loads',
'JSONDecoder', 'JSONDecodeError', 'JSONEncoder',
]
__author__ = 'Bob Ippolito <bob@redivi.com>'
from .decoder import JSONDecoder, JSONDecodeError
from .encoder import JSONEncoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
default=None,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
default=None, sort_keys=False, **kw):
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
default is None and not sort_keys and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators,
default=default, sort_keys=sort_keys, **kw).iterencode(obj)
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
default=None, sort_keys=False, **kw):
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
default is None and not sort_keys and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, default=default, sort_keys=sort_keys,
**kw).encode(obj)
_default_decoder = JSONDecoder(object_hook=None, object_pairs_hook=None)
def load(fp, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
return loads(fp.read(),
cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None, **kw):
if not isinstance(s, str):
raise TypeError('the JSON object must be str, not {!r}'.format(
s.__class__.__name__))
if s.startswith(u'\ufeff'):
raise JSONDecodeError("Unexpected UTF-8 BOM (decode using utf-8-sig)",
s, 0)
if (cls is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
return cls(**kw).decode(s)
| true | true |
f72b05a1e16676d1178d4682bdc7c44175562994 | 3,192 | py | Python | scripts/loadelastic-aurora.py | dbmi-pitt/aurora-meta | a0d3d3963fce2639081cb55715b5357cd0e21902 | [
"Apache-2.0"
] | null | null | null | scripts/loadelastic-aurora.py | dbmi-pitt/aurora-meta | a0d3d3963fce2639081cb55715b5357cd0e21902 | [
"Apache-2.0"
] | null | null | null | scripts/loadelastic-aurora.py | dbmi-pitt/aurora-meta | a0d3d3963fce2639081cb55715b5357cd0e21902 | [
"Apache-2.0"
] | null | null | null | import requests, json, os
import argparse
import pandas as pd
import ijson
import time
# Elasticsearch python libs
from elasticsearch import Elasticsearch
from elasticsearch import helpers
directory = ""
indexName = "aurora-meta2"
typeName = "patient"
THRESHOLD = 10000 # this regulates how much data gets loaded then is processed in a bulk group
PK = "ID"
json_root = "item"
errors = []
def loadit():
es = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
for filename in os.listdir(directory):
if filename.endswith(".json"):
json_filename = directory+filename
print("Loading " + json_filename)
with open(json_filename, 'r') as input_file:
i = 1
batchCtr = 1
bulk_action = []
bulkCount = 0
ij = ijson.items(input_file, json_root)
print(ij)
for rec in ij:
print(rec)
#pk = rec['clin'][PK]
pk = rec['clin'][PK]
print(pk)
bulk = {
"_index" : indexName,
#"_type" : typeName,
"_id" : pk,
"_source" : rec,
}
bulk_action.append(bulk)
i = i + 1
batchCtr = batchCtr + 1
if batchCtr > THRESHOLD:
try:
#print(bulk_action)
bulkCount = bulkCount + batchCtr
rtn_status = helpers.bulk(es, bulk_action)
if rtn_status:
print(rtn_status)
#print ('Imported data ' + str(bulkCount-1) + ' successfully from ' + json_filename)
batchCtr = 1
bulk_action = []
except Exception as ex:
print ("Loading failed for " + json_filename)
errors.append(json_filename)
print ('Error:' + str(ex))
#print ("Loading failed!")
#pass
if i < THRESHOLD:
try:
rtn_status = helpers.bulk(es, bulk_action)
if rtn_status:
print(rtn_status)
#print ('Imported data ' + str(i-1) + ' successfully from ' + json_filename)
batchCtr = 1
bulk_action = []
except Exception as ex:
print ('Error:' + str(ex))
print ("Loading failed for " + json_filename)
errors.append(json_filename)
#pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", required=True, help="dir path to json file(s)")
parser.add_argument("-thres", help="set the batch threshold")
parser.add_argument("-i", help="set the index name")
parser.add_argument("-t", help="set the type")
parser.add_argument("-pk", help="primary key of the record, default 'ID'")
parser.add_argument("-r", help="json root node, default 'item', passing 'NOROOT' will ignore the root item")
args = parser.parse_args()
print("Args:")
print(args)
if args.d:
directory = args.d
if directory[-1] != '/':
directory = directory + '/'
if args.thres:
THRESHOLD = int(args.thres)
print ("Batch threshold: " + str(THRESHOLD))
print(type(THRESHOLD))
if args.i:
indexName = args.i
if args.t:
typeName = args.t
if args.pk:
PK = args.pk
if args.r:
if args.r == "NOROOT":
json_root = "" # ignore the root
else:
json_root = args.r
start = time.time()
loadit()
end = time.time()
print("Elapsed time: {}".format((end-start)))
if len(errors) > 0:
print("The following files failed:")
print(errors)
| 25.95122 | 109 | 0.628446 | import requests, json, os
import argparse
import pandas as pd
import ijson
import time
from elasticsearch import Elasticsearch
from elasticsearch import helpers
directory = ""
indexName = "aurora-meta2"
typeName = "patient"
THRESHOLD = 10000
PK = "ID"
json_root = "item"
errors = []
def loadit():
es = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
for filename in os.listdir(directory):
if filename.endswith(".json"):
json_filename = directory+filename
print("Loading " + json_filename)
with open(json_filename, 'r') as input_file:
i = 1
batchCtr = 1
bulk_action = []
bulkCount = 0
ij = ijson.items(input_file, json_root)
print(ij)
for rec in ij:
print(rec)
pk = rec['clin'][PK]
print(pk)
bulk = {
"_index" : indexName,
"_id" : pk,
"_source" : rec,
}
bulk_action.append(bulk)
i = i + 1
batchCtr = batchCtr + 1
if batchCtr > THRESHOLD:
try:
bulkCount = bulkCount + batchCtr
rtn_status = helpers.bulk(es, bulk_action)
if rtn_status:
print(rtn_status)
batchCtr = 1
bulk_action = []
except Exception as ex:
print ("Loading failed for " + json_filename)
errors.append(json_filename)
print ('Error:' + str(ex))
if i < THRESHOLD:
try:
rtn_status = helpers.bulk(es, bulk_action)
if rtn_status:
print(rtn_status)
batchCtr = 1
bulk_action = []
except Exception as ex:
print ('Error:' + str(ex))
print ("Loading failed for " + json_filename)
errors.append(json_filename)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", required=True, help="dir path to json file(s)")
parser.add_argument("-thres", help="set the batch threshold")
parser.add_argument("-i", help="set the index name")
parser.add_argument("-t", help="set the type")
parser.add_argument("-pk", help="primary key of the record, default 'ID'")
parser.add_argument("-r", help="json root node, default 'item', passing 'NOROOT' will ignore the root item")
args = parser.parse_args()
print("Args:")
print(args)
if args.d:
directory = args.d
if directory[-1] != '/':
directory = directory + '/'
if args.thres:
THRESHOLD = int(args.thres)
print ("Batch threshold: " + str(THRESHOLD))
print(type(THRESHOLD))
if args.i:
indexName = args.i
if args.t:
typeName = args.t
if args.pk:
PK = args.pk
if args.r:
if args.r == "NOROOT":
json_root = ""
else:
json_root = args.r
start = time.time()
loadit()
end = time.time()
print("Elapsed time: {}".format((end-start)))
if len(errors) > 0:
print("The following files failed:")
print(errors)
| true | true |
f72b05a397836379cf15a5545dc470a6f2762a91 | 5,781 | py | Python | smoke/data/build.py | SmallMunich/Smoke | 591a03bdb5cad962999914c9a97c7a8bed9e529b | [
"MIT"
] | 2 | 2022-03-08T02:54:57.000Z | 2022-03-10T09:09:40.000Z | smoke/data/build.py | SmallMunich/Smoke | 591a03bdb5cad962999914c9a97c7a8bed9e529b | [
"MIT"
] | null | null | null | smoke/data/build.py | SmallMunich/Smoke | 591a03bdb5cad962999914c9a97c7a8bed9e529b | [
"MIT"
] | null | null | null | import logging
import copy
import bisect
import numpy as np
import torch.utils.data
from smoke.utils.comm import get_world_size
from smoke.utils.imports import import_file
from smoke.utils.envs import seed_all_rng
from . import datasets as D
from . import samplers
from .transforms import build_transforms
from .collate_batch import BatchCollator
def build_dataset(cfg, transforms, dataset_catalog, is_train=True):
'''
Args:
dataset_list (list[str]): Contains the names of the datasets.
transforms (callable): transforms to apply to each (image, target) sample
dataset_catalog (DatasetCatalog): contains the information on how to
construct a dataset.
is_train (bool): whether to setup the dataset for training or testing
Returns:
'''
dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list)
)
datasets = []
for dataset_name in dataset_list:
data = dataset_catalog.get(dataset_name)
factory = getattr(D, data["factory"])
args = data["args"]
args["cfg"] = cfg
args["is_train"] = is_train
args["transforms"] = transforms
# make dataset from factory
dataset = factory(**args)
datasets.append(dataset)
# for testing, return a list of datasets
if not is_train:
return datasets
# for training, concatenate all datasets into a single one
dataset = datasets[0]
if len(datasets) > 1:
dataset = D.ConcatDataset(datasets)
return [dataset]
def make_data_loader(cfg, is_train=True):
num_gpus = get_world_size()
if is_train:
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert images_per_batch % num_gpus == 0, \
"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used." \
.format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
else:
images_per_batch = cfg.TEST.IMS_PER_BATCH
assert images_per_batch % num_gpus == 0, \
"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used." \
.format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
# if images_per_gpu > 1:
# logger = logging.getLogger(__name__)
# logger.warning(
# "When using more than one image per GPU you may encounter "
# "an out-of-memory (OOM) error if your GPU does not have "
# "sufficient memory. If this happens, you can reduce "
# "SOLVER.IMS_PER_BATCH (for training) or "
# "TEST.IMS_PER_BATCH (for inference). For training, you must "
# "also adjust the learning rate and schedule length according "
# "to the linear scaling rule. See for example: "
# "https://github.com/facebookresearch/Detectron/blob/master/configs/getting_started/tutorial_1gpu_e2e_faster_rcnn_R-50-FPN.yaml#L14"
# )
# group images which have similar aspect ratio. In this case, we only
# group in two cases: those with width / height > 1, and the other way around,
# but the code supports more general grouping strategy
aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []
path_catalog = import_file(
"smoke.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = path_catalog.DatasetCatalog
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = samplers.TrainingSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_gpu, drop_last=True
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
# import pdb; pdb.set_trace()
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
worker_init_fn=worker_init_reset_seed,
)
data_loaders.append(data_loader)
if is_train:
# during training, a single (possibly concatenated) data_loader is returned
assert len(data_loaders) == 1
return data_loaders[0]
return data_loaders
def build_test_loader(cfg, is_train=False):
path_catalog = import_file(
"smoke.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = path_catalog.DatasetCatalog
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = samplers.InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, 1, drop_last=False
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
)
data_loaders.append(data_loader)
# Origin is data_loader, Now I think this should be data_loaders
return data_loader
def trivial_batch_collator(batch):
"""
A batch collator that does nothing.
"""
return batch
def worker_init_reset_seed(worker_id):
seed_all_rng(np.random.randint(2 ** 31) + worker_id)
| 34.825301 | 145 | 0.669088 | import logging
import copy
import bisect
import numpy as np
import torch.utils.data
from smoke.utils.comm import get_world_size
from smoke.utils.imports import import_file
from smoke.utils.envs import seed_all_rng
from . import datasets as D
from . import samplers
from .transforms import build_transforms
from .collate_batch import BatchCollator
def build_dataset(cfg, transforms, dataset_catalog, is_train=True):
dataset_list = cfg.DATASETS.TRAIN if is_train else cfg.DATASETS.TEST
if not isinstance(dataset_list, (list, tuple)):
raise RuntimeError(
"dataset_list should be a list of strings, got {}".format(dataset_list)
)
datasets = []
for dataset_name in dataset_list:
data = dataset_catalog.get(dataset_name)
factory = getattr(D, data["factory"])
args = data["args"]
args["cfg"] = cfg
args["is_train"] = is_train
args["transforms"] = transforms
dataset = factory(**args)
datasets.append(dataset)
if not is_train:
return datasets
dataset = datasets[0]
if len(datasets) > 1:
dataset = D.ConcatDataset(datasets)
return [dataset]
def make_data_loader(cfg, is_train=True):
num_gpus = get_world_size()
if is_train:
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert images_per_batch % num_gpus == 0, \
"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used." \
.format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
else:
images_per_batch = cfg.TEST.IMS_PER_BATCH
assert images_per_batch % num_gpus == 0, \
"SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of GPUs ({}) used." \
.format(images_per_batch, num_gpus)
images_per_gpu = images_per_batch // num_gpus
aspect_grouping = [1] if cfg.DATALOADER.ASPECT_RATIO_GROUPING else []
path_catalog = import_file(
"smoke.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = path_catalog.DatasetCatalog
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = samplers.TrainingSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_gpu, drop_last=True
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
worker_init_fn=worker_init_reset_seed,
)
data_loaders.append(data_loader)
if is_train:
assert len(data_loaders) == 1
return data_loaders[0]
return data_loaders
def build_test_loader(cfg, is_train=False):
path_catalog = import_file(
"smoke.config.paths_catalog", cfg.PATHS_CATALOG, True
)
DatasetCatalog = path_catalog.DatasetCatalog
transforms = build_transforms(cfg, is_train)
datasets = build_dataset(cfg, transforms, DatasetCatalog, is_train)
data_loaders = []
for dataset in datasets:
sampler = samplers.InferenceSampler(len(dataset))
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, 1, drop_last=False
)
collator = BatchCollator(cfg.DATALOADER.SIZE_DIVISIBILITY)
num_workers = cfg.DATALOADER.NUM_WORKERS
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=num_workers,
batch_sampler=batch_sampler,
collate_fn=collator,
)
data_loaders.append(data_loader)
return data_loader
def trivial_batch_collator(batch):
return batch
def worker_init_reset_seed(worker_id):
seed_all_rng(np.random.randint(2 ** 31) + worker_id)
| true | true |
f72b0684f170d3fddc3fc47d05fff76101d188b3 | 1,072 | py | Python | i3wsgroups/cli.py | damani42/i3-workspace-groups | 13fe8e22e829166eb22df031b4c39f3501dfb362 | [
"MIT"
] | null | null | null | i3wsgroups/cli.py | damani42/i3-workspace-groups | 13fe8e22e829166eb22df031b4c39f3501dfb362 | [
"MIT"
] | null | null | null | i3wsgroups/cli.py | damani42/i3-workspace-groups | 13fe8e22e829166eb22df031b4c39f3501dfb362 | [
"MIT"
] | null | null | null | import argparse
def add_common_args(parser: argparse.ArgumentParser):
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='If true, will not actually do any changes to i3 workspaces.')
parser.add_argument(
'--log-level',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='warning',
help='Logging level for stderr and syslog.')
def add_workspace_naming_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--window-icons-all-groups',
action='store_true',
default=False,
help='If true, will add the icons of the open windows to workspaces'
' in all groups, and not just the active group. Also implies '
'--window-icons.')
parser.add_argument(
'--renumber-workspaces',
action='store_true',
default=False,
help='If true, will renumber workspaces in every groups so that they '
'are in numerical order, similar to tmux\'s renumber-windows option.')
| 34.580645 | 78 | 0.636194 | import argparse
def add_common_args(parser: argparse.ArgumentParser):
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='If true, will not actually do any changes to i3 workspaces.')
parser.add_argument(
'--log-level',
choices=('debug', 'info', 'warning', 'error', 'critical'),
default='warning',
help='Logging level for stderr and syslog.')
def add_workspace_naming_args(parser: argparse.ArgumentParser) -> None:
parser.add_argument(
'--window-icons-all-groups',
action='store_true',
default=False,
help='If true, will add the icons of the open windows to workspaces'
' in all groups, and not just the active group. Also implies '
'--window-icons.')
parser.add_argument(
'--renumber-workspaces',
action='store_true',
default=False,
help='If true, will renumber workspaces in every groups so that they '
'are in numerical order, similar to tmux\'s renumber-windows option.')
| true | true |
f72b073f2c249ce06aea52ce2b03bad057fb64ac | 10,626 | py | Python | src/neqsim/process/processTools.py | kwafafoa/neqsimpython | 2a540297552b39dac2666bbfb7c76eda0f5779db | [
"Apache-2.0"
] | null | null | null | src/neqsim/process/processTools.py | kwafafoa/neqsimpython | 2a540297552b39dac2666bbfb7c76eda0f5779db | [
"Apache-2.0"
] | null | null | null | src/neqsim/process/processTools.py | kwafafoa/neqsimpython | 2a540297552b39dac2666bbfb7c76eda0f5779db | [
"Apache-2.0"
] | null | null | null | import jpype
import jpype.imports
from jpype.types import *
from neqsim.neqsimpython import neqsim
processoperations = neqsim.processSimulation.processSystem.ProcessSystem()
def stream(thermoSystem, name="stream ?", t=0, p=0):
if t != 0:
thermoSystem.setTemperature(t)
if p != 0:
thermoSystem.setPressure(p)
stream = neqsim.processSimulation.processEquipment.stream.Stream(thermoSystem)
stream.setName(name)
processoperations.add(stream)
return stream
def neqstream(thermoSystem, name="stream ?", t=0, p=0):
if t != 0:
thermoSystem.setTemperature(t)
if p != 0:
thermoSystem.setPressure(p)
stream = neqsim.processSimulation.processEquipment.stream.NeqStream(thermoSystem)
stream.setName(name)
processoperations.add(stream)
return stream
def recycle(teststream, name="recycle ?"):
recycle1 = neqsim.processSimulation.processEquipment.util.Recycle()
recycle1.addStream(teststream)
processoperations.add(recycle1)
return recycle1
def saturator(teststream, name="water saturator"):
streamsaturator = neqsim.processSimulation.processEquipment.util.StreamSaturatorUtil(teststream)
processoperations.add(streamsaturator)
return streamsaturator
def glycoldehydrationlmodule(teststream, name="TEG process"):
dehydrationlmodule = neqsim.processSimulation.processSystem.processModules.GlycolDehydrationlModule()
dehydrationlmodule.setName(name)
dehydrationlmodule.addInputStream("gasStreamToAbsorber", teststream)
processoperations.add(dehydrationlmodule)
return dehydrationlmodule
def openprocess(filename):
processoperations = neqsim.processSimulation.processSystem.ProcessSystem.open(filename)
return processoperations
def separator(teststream, name="separator ?"):
separator = neqsim.processSimulation.processEquipment.separator.Separator(teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def GORfitter(teststream, name="GOR fitter ?"):
GORfitter1 = neqsim.processSimulation.processEquipment.util.GORfitter(name, teststream)
GORfitter1.setName(name)
processoperations.add(GORfitter1)
return GORfitter1
def simpleTEGAbsorber(name="TEG absorber ?"):
absorber = neqsim.processSimulation.processEquipment.absorber.SimpleTEGAbsorber()
absorber.setName(name)
processoperations.add(absorber)
return absorber
def waterStripperColumn(name="water stripper ?"):
stripper = neqsim.processSimulation.processEquipment.absorber.WaterStripperColumn()
stripper.setName(name)
processoperations.add(stripper)
return stripper
def gasscrubber(teststream, name="scrubber ?"):
separator = neqsim.processSimulation.processEquipment.separator.GasScrubber(teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def separator3phase(teststream, name="separator ?"):
separator = neqsim.processSimulation.processEquipment.separator.ThreePhaseSeparator(teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def valve(teststream, p=1.0, name="valve ?"):
valve = neqsim.processSimulation.processEquipment.valve.ThrottlingValve(teststream)
valve.setOutletPressure(p)
valve.setName(name)
processoperations.add(valve)
return valve
def recycle2(name="recycle ?"):
recyc = neqsim.processSimulation.processEquipment.util.Recycle(name)
processoperations.add(recyc)
return recyc
def calculator(name="calculator ?"):
calc2 = neqsim.processSimulation.processEquipment.util.Calculator(name)
processoperations.add(calc2)
return calc2
def setpoint(name1, unit1, name2, unit2):
setp = neqsim.processSimulation.processEquipment.util.SetPoint(name1, unit1, name2, unit2)
processoperations.add(setp)
return setp
def filters(teststream):
filter2 = neqsim.processSimulation.processEquipment.filter.Filter(teststream)
processoperations.add(filter2)
return filter2
def compressor(teststream, pres=10.0, name="compressor ?"):
compressor = neqsim.processSimulation.processEquipment.compressor.Compressor(teststream)
compressor.setOutletPressure(pres)
compressor.setName(name)
processoperations.add(compressor)
return compressor
def compressorChart(compressor, curveConditions, speed, flow, head, polyEff ):
compressor.getCompressorChart().setCurves(JDouble[:](curveConditions), JDouble[:](speed), JDouble[:][:](flow), JDouble[:][:](head), JDouble[:][:](polyEff))
def compressorSurgeCurve(compressor, curveConditions, surgeflow, surgehead):
compressor.getCompressorChart().getSurgeCurve().setCurve(JDouble[:](curveConditions), JDouble[:](surgeflow), JDouble[:](surgehead))
def compressorStoneWallCurve(compressor, curveConditions, stoneWallflow, stoneWallHead):
compressor.getCompressorChart().getStoneWallCurve().setCurve(JDouble[:](curveConditions), JDouble[:](stoneWallflow), JDouble[:](stoneWallHead))
def pump(teststream, p=1.0, name="pump ?"):
pump = neqsim.processSimulation.processEquipment.pump.Pump(teststream)
pump.setOutletPressure(p)
pump.setName(name)
processoperations.add(pump)
return pump
def expander(teststream, p, name="expander ?"):
expander = neqsim.processSimulation.processEquipment.expander.Expander(teststream)
expander.setOutletPressure(p)
expander.setName(name)
processoperations.add(expander)
return expander
def mixer(name=""):
mixer = neqsim.processSimulation.processEquipment.mixer.StaticMixer()
mixer.setName(name)
processoperations.add(mixer)
return mixer
def phasemixer(name=""):
mixer = neqsim.processSimulation.processEquipment.mixer.StaticPhaseMixer()
mixer.setName(name)
processoperations.add(mixer)
return mixer
def nequnit(teststream, equipment="pipeline", flowpattern="stratified", numberOfNodes=100):
neqUn = neqsim.processSimulation.processEquipment.util.NeqSimUnit(teststream, equipment, flowpattern)
neqUn.setNumberOfNodes(numberOfNodes)
processoperations.add(neqUn)
return neqUn
def splitter(teststream, splitfactors, name=""):
splitter = neqsim.processSimulation.processEquipment.splitter.Splitter(teststream)
splitter.setSplitNumber(len(splitfactors))
splitter.setSplitFactors(JDouble[:](splitfactors))
splitter.setName(name)
processoperations.add(splitter)
return splitter
def heater(teststream, name=""):
heater = neqsim.processSimulation.processEquipment.heatExchanger.Heater(teststream)
heater.setName(name)
processoperations.add(heater)
return heater
def simplereservoir(fluid, name="Reservoir 1", gasvolume=10.0 * 1e7, oilvolume=120.0 * 1e6, watervolume=10.0e6):
reserv = neqsim.processSimulation.processEquipment.reservoir.SimpleReservoir(name)
reserv.setReservoirFluid(fluid, gasvolume, oilvolume, watervolume)
processoperations.add(reserv)
return reserv
def cooler(teststream, name=""):
cooler = neqsim.processSimulation.processEquipment.heatExchanger.Cooler(teststream)
cooler.setName(name)
processoperations.add(cooler)
return cooler
def heatExchanger(stream1, stream2=None, name=""):
if stream2==None:
heater = neqsim.processSimulation.processEquipment.heatExchanger.HeatExchanger(stream1)
else:
heater = neqsim.processSimulation.processEquipment.heatExchanger.HeatExchanger(stream1, stream2)
heater.setName(name)
processoperations.add(heater)
return heater
def distillationColumn(trays=5, reboil=True, condenser=True, name="destColumn"):
distillationColumn = neqsim.processSimulation.processEquipment.distillation.DistillationColumn(trays, reboil, condenser)
distillationColumn.setName(name)
processoperations.add(distillationColumn)
return distillationColumn
def neqheater(teststream, name=""):
neqheater = neqsim.processSimulation.processEquipment.heatExchanger.NeqHeater(teststream)
neqheater.setName(name)
processoperations.add(neqheater)
return neqheater
def twophasepipe(teststream, position, diameter, height, outTemp, rough):
pipe = neqsim.processSimulation.processEquipment.pipeline.TwoPhasePipeLine(teststream)
pipe.setOutputFileName("c:/tempNew20.nc")
pipe.setInitialFlowPattern("annular")
numberOfLegs = len(position) - 1
numberOfNodesInLeg = 60
pipe.setNumberOfLegs(numberOfLegs)
pipe.setNumberOfNodesInLeg(numberOfNodesInLeg)
pipe.setLegPositions(position)
pipe.setHeightProfile(height)
pipe.setPipeDiameters(diameter)
pipe.setPipeWallRoughness(rough)
pipe.setOuterTemperatures(outTemp)
pipe.setEquilibriumMassTransfer(0)
pipe.setEquilibriumHeatTransfer(1)
processoperations.add(pipe)
return pipe
def pipe(teststream, length, deltaElevation, diameter, rough):
pipe = neqsim.processSimulation.processEquipment.pipeline.AdiabaticPipe(teststream)
pipe.setDiameter(diameter)
pipe.setLength(length)
pipe.setPipeWallRoughness(rough)
pipe.setInletElevation(0.0)
pipe.setOutletElevation(deltaElevation)
processoperations.add(pipe)
return pipe
def pipeline(teststream, position, diameter, height, outTemp, rough, outerHeatTransferCoefficients, pipeWallHeatTransferCoefficients, numberOfNodesInLeg = 50):
pipe = neqsim.processSimulation.processEquipment.pipeline.OnePhasePipeLine(teststream)
pipe.setOutputFileName("c:/tempNew20.nc")
numberOfLegs = len(position) - 1
pipe.setNumberOfLegs(numberOfLegs)
pipe.setNumberOfNodesInLeg(numberOfNodesInLeg)
pipe.setLegPositions(JDouble[:](position))
pipe.setHeightProfile(JDouble[:](height))
pipe.setPipeDiameters(JDouble[:](diameter))
pipe.setPipeWallRoughness(JDouble[:](rough))
pipe.setPipeOuterHeatTransferCoefficients(JDouble[:](outerHeatTransferCoefficients))
pipe.setPipeWallHeatTransferCoefficients(JDouble[:](pipeWallHeatTransferCoefficients))
pipe.setOuterTemperatures(JDouble[:](outTemp))
processoperations.add(pipe)
return pipe
def clear():
processoperations.clearAll()
def run():
processoperations.run()
def clearProcess():
processoperations.clearAll()
def runProcess():
processoperations.run()
def runProcessAsThread(process):
Thread = jpype.JPackage('java.lang.Thread')
threadProcess = Thread(process)
threadProcess.run()
return threadProcess
def getProcess():
return processoperations
def runtrans():
processoperations.runTransient()
def view():
processoperations.displayResult()
def viewProcess():
processoperations.displayResult()
| 36.768166 | 159 | 0.769245 | import jpype
import jpype.imports
from jpype.types import *
from neqsim.neqsimpython import neqsim
processoperations = neqsim.processSimulation.processSystem.ProcessSystem()
def stream(thermoSystem, name="stream ?", t=0, p=0):
if t != 0:
thermoSystem.setTemperature(t)
if p != 0:
thermoSystem.setPressure(p)
stream = neqsim.processSimulation.processEquipment.stream.Stream(thermoSystem)
stream.setName(name)
processoperations.add(stream)
return stream
def neqstream(thermoSystem, name="stream ?", t=0, p=0):
if t != 0:
thermoSystem.setTemperature(t)
if p != 0:
thermoSystem.setPressure(p)
stream = neqsim.processSimulation.processEquipment.stream.NeqStream(thermoSystem)
stream.setName(name)
processoperations.add(stream)
return stream
def recycle(teststream, name="recycle ?"):
recycle1 = neqsim.processSimulation.processEquipment.util.Recycle()
recycle1.addStream(teststream)
processoperations.add(recycle1)
return recycle1
def saturator(teststream, name="water saturator"):
streamsaturator = neqsim.processSimulation.processEquipment.util.StreamSaturatorUtil(teststream)
processoperations.add(streamsaturator)
return streamsaturator
def glycoldehydrationlmodule(teststream, name="TEG process"):
dehydrationlmodule = neqsim.processSimulation.processSystem.processModules.GlycolDehydrationlModule()
dehydrationlmodule.setName(name)
dehydrationlmodule.addInputStream("gasStreamToAbsorber", teststream)
processoperations.add(dehydrationlmodule)
return dehydrationlmodule
def openprocess(filename):
processoperations = neqsim.processSimulation.processSystem.ProcessSystem.open(filename)
return processoperations
def separator(teststream, name="separator ?"):
separator = neqsim.processSimulation.processEquipment.separator.Separator(teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def GORfitter(teststream, name="GOR fitter ?"):
GORfitter1 = neqsim.processSimulation.processEquipment.util.GORfitter(name, teststream)
GORfitter1.setName(name)
processoperations.add(GORfitter1)
return GORfitter1
def simpleTEGAbsorber(name="TEG absorber ?"):
absorber = neqsim.processSimulation.processEquipment.absorber.SimpleTEGAbsorber()
absorber.setName(name)
processoperations.add(absorber)
return absorber
def waterStripperColumn(name="water stripper ?"):
stripper = neqsim.processSimulation.processEquipment.absorber.WaterStripperColumn()
stripper.setName(name)
processoperations.add(stripper)
return stripper
def gasscrubber(teststream, name="scrubber ?"):
separator = neqsim.processSimulation.processEquipment.separator.GasScrubber(teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def separator3phase(teststream, name="separator ?"):
separator = neqsim.processSimulation.processEquipment.separator.ThreePhaseSeparator(teststream)
separator.setName(name)
processoperations.add(separator)
return separator
def valve(teststream, p=1.0, name="valve ?"):
valve = neqsim.processSimulation.processEquipment.valve.ThrottlingValve(teststream)
valve.setOutletPressure(p)
valve.setName(name)
processoperations.add(valve)
return valve
def recycle2(name="recycle ?"):
recyc = neqsim.processSimulation.processEquipment.util.Recycle(name)
processoperations.add(recyc)
return recyc
def calculator(name="calculator ?"):
calc2 = neqsim.processSimulation.processEquipment.util.Calculator(name)
processoperations.add(calc2)
return calc2
def setpoint(name1, unit1, name2, unit2):
setp = neqsim.processSimulation.processEquipment.util.SetPoint(name1, unit1, name2, unit2)
processoperations.add(setp)
return setp
def filters(teststream):
filter2 = neqsim.processSimulation.processEquipment.filter.Filter(teststream)
processoperations.add(filter2)
return filter2
def compressor(teststream, pres=10.0, name="compressor ?"):
compressor = neqsim.processSimulation.processEquipment.compressor.Compressor(teststream)
compressor.setOutletPressure(pres)
compressor.setName(name)
processoperations.add(compressor)
return compressor
def compressorChart(compressor, curveConditions, speed, flow, head, polyEff ):
compressor.getCompressorChart().setCurves(JDouble[:](curveConditions), JDouble[:](speed), JDouble[:][:](flow), JDouble[:][:](head), JDouble[:][:](polyEff))
def compressorSurgeCurve(compressor, curveConditions, surgeflow, surgehead):
compressor.getCompressorChart().getSurgeCurve().setCurve(JDouble[:](curveConditions), JDouble[:](surgeflow), JDouble[:](surgehead))
def compressorStoneWallCurve(compressor, curveConditions, stoneWallflow, stoneWallHead):
compressor.getCompressorChart().getStoneWallCurve().setCurve(JDouble[:](curveConditions), JDouble[:](stoneWallflow), JDouble[:](stoneWallHead))
def pump(teststream, p=1.0, name="pump ?"):
pump = neqsim.processSimulation.processEquipment.pump.Pump(teststream)
pump.setOutletPressure(p)
pump.setName(name)
processoperations.add(pump)
return pump
def expander(teststream, p, name="expander ?"):
expander = neqsim.processSimulation.processEquipment.expander.Expander(teststream)
expander.setOutletPressure(p)
expander.setName(name)
processoperations.add(expander)
return expander
def mixer(name=""):
mixer = neqsim.processSimulation.processEquipment.mixer.StaticMixer()
mixer.setName(name)
processoperations.add(mixer)
return mixer
def phasemixer(name=""):
mixer = neqsim.processSimulation.processEquipment.mixer.StaticPhaseMixer()
mixer.setName(name)
processoperations.add(mixer)
return mixer
def nequnit(teststream, equipment="pipeline", flowpattern="stratified", numberOfNodes=100):
neqUn = neqsim.processSimulation.processEquipment.util.NeqSimUnit(teststream, equipment, flowpattern)
neqUn.setNumberOfNodes(numberOfNodes)
processoperations.add(neqUn)
return neqUn
def splitter(teststream, splitfactors, name=""):
splitter = neqsim.processSimulation.processEquipment.splitter.Splitter(teststream)
splitter.setSplitNumber(len(splitfactors))
splitter.setSplitFactors(JDouble[:](splitfactors))
splitter.setName(name)
processoperations.add(splitter)
return splitter
def heater(teststream, name=""):
heater = neqsim.processSimulation.processEquipment.heatExchanger.Heater(teststream)
heater.setName(name)
processoperations.add(heater)
return heater
def simplereservoir(fluid, name="Reservoir 1", gasvolume=10.0 * 1e7, oilvolume=120.0 * 1e6, watervolume=10.0e6):
reserv = neqsim.processSimulation.processEquipment.reservoir.SimpleReservoir(name)
reserv.setReservoirFluid(fluid, gasvolume, oilvolume, watervolume)
processoperations.add(reserv)
return reserv
def cooler(teststream, name=""):
cooler = neqsim.processSimulation.processEquipment.heatExchanger.Cooler(teststream)
cooler.setName(name)
processoperations.add(cooler)
return cooler
def heatExchanger(stream1, stream2=None, name=""):
if stream2==None:
heater = neqsim.processSimulation.processEquipment.heatExchanger.HeatExchanger(stream1)
else:
heater = neqsim.processSimulation.processEquipment.heatExchanger.HeatExchanger(stream1, stream2)
heater.setName(name)
processoperations.add(heater)
return heater
def distillationColumn(trays=5, reboil=True, condenser=True, name="destColumn"):
distillationColumn = neqsim.processSimulation.processEquipment.distillation.DistillationColumn(trays, reboil, condenser)
distillationColumn.setName(name)
processoperations.add(distillationColumn)
return distillationColumn
def neqheater(teststream, name=""):
neqheater = neqsim.processSimulation.processEquipment.heatExchanger.NeqHeater(teststream)
neqheater.setName(name)
processoperations.add(neqheater)
return neqheater
def twophasepipe(teststream, position, diameter, height, outTemp, rough):
pipe = neqsim.processSimulation.processEquipment.pipeline.TwoPhasePipeLine(teststream)
pipe.setOutputFileName("c:/tempNew20.nc")
pipe.setInitialFlowPattern("annular")
numberOfLegs = len(position) - 1
numberOfNodesInLeg = 60
pipe.setNumberOfLegs(numberOfLegs)
pipe.setNumberOfNodesInLeg(numberOfNodesInLeg)
pipe.setLegPositions(position)
pipe.setHeightProfile(height)
pipe.setPipeDiameters(diameter)
pipe.setPipeWallRoughness(rough)
pipe.setOuterTemperatures(outTemp)
pipe.setEquilibriumMassTransfer(0)
pipe.setEquilibriumHeatTransfer(1)
processoperations.add(pipe)
return pipe
def pipe(teststream, length, deltaElevation, diameter, rough):
pipe = neqsim.processSimulation.processEquipment.pipeline.AdiabaticPipe(teststream)
pipe.setDiameter(diameter)
pipe.setLength(length)
pipe.setPipeWallRoughness(rough)
pipe.setInletElevation(0.0)
pipe.setOutletElevation(deltaElevation)
processoperations.add(pipe)
return pipe
def pipeline(teststream, position, diameter, height, outTemp, rough, outerHeatTransferCoefficients, pipeWallHeatTransferCoefficients, numberOfNodesInLeg = 50):
pipe = neqsim.processSimulation.processEquipment.pipeline.OnePhasePipeLine(teststream)
pipe.setOutputFileName("c:/tempNew20.nc")
numberOfLegs = len(position) - 1
pipe.setNumberOfLegs(numberOfLegs)
pipe.setNumberOfNodesInLeg(numberOfNodesInLeg)
pipe.setLegPositions(JDouble[:](position))
pipe.setHeightProfile(JDouble[:](height))
pipe.setPipeDiameters(JDouble[:](diameter))
pipe.setPipeWallRoughness(JDouble[:](rough))
pipe.setPipeOuterHeatTransferCoefficients(JDouble[:](outerHeatTransferCoefficients))
pipe.setPipeWallHeatTransferCoefficients(JDouble[:](pipeWallHeatTransferCoefficients))
pipe.setOuterTemperatures(JDouble[:](outTemp))
processoperations.add(pipe)
return pipe
def clear():
processoperations.clearAll()
def run():
processoperations.run()
def clearProcess():
processoperations.clearAll()
def runProcess():
processoperations.run()
def runProcessAsThread(process):
Thread = jpype.JPackage('java.lang.Thread')
threadProcess = Thread(process)
threadProcess.run()
return threadProcess
def getProcess():
return processoperations
def runtrans():
processoperations.runTransient()
def view():
processoperations.displayResult()
def viewProcess():
processoperations.displayResult()
| true | true |
f72b0759efafb83d0661f521221014ba2f8d3aab | 7,021 | py | Python | tests/graph/test_floyd_warshall.py | aalekhpatel07/retworkx | ae93fcab17d55bc259476c65a677221b4177870a | [
"Apache-2.0"
] | 1 | 2021-11-29T23:15:07.000Z | 2021-11-29T23:15:07.000Z | tests/graph/test_floyd_warshall.py | aalekhpatel07/retworkx | ae93fcab17d55bc259476c65a677221b4177870a | [
"Apache-2.0"
] | 40 | 2020-08-31T06:09:06.000Z | 2022-03-18T19:02:34.000Z | tests/graph/test_floyd_warshall.py | aalekhpatel07/retworkx | ae93fcab17d55bc259476c65a677221b4177870a | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import numpy
import retworkx
class TestFloydWarshall(unittest.TestCase):
parallel_threshold = 300
def test_vs_dijkstra_all_pairs(self):
graph = retworkx.PyGraph()
a = graph.add_node("A")
b = graph.add_node("B")
c = graph.add_node("C")
d = graph.add_node("D")
e = graph.add_node("E")
f = graph.add_node("F")
edge_list = [
(a, b, 7),
(c, a, 9),
(a, d, 14),
(b, c, 10),
(d, c, 2),
(d, e, 9),
(b, f, 15),
(c, f, 11),
(e, f, 6),
]
graph.add_edges_from(edge_list)
dijkstra_lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
graph, float
)
expected = {k: {**v, k: 0.0} for k, v in dijkstra_lengths.items()}
result = retworkx.graph_floyd_warshall(
graph, float, parallel_threshold=self.parallel_threshold
)
self.assertEqual(result, expected)
def test_vs_dijkstra_all_pairs_with_node_removal(self):
graph = retworkx.PyGraph()
a = graph.add_node("A")
b = graph.add_node("B")
c = graph.add_node("C")
d = graph.add_node("D")
e = graph.add_node("E")
f = graph.add_node("F")
edge_list = [
(a, b, 7),
(c, a, 9),
(a, d, 14),
(b, c, 10),
(d, c, 2),
(d, e, 9),
(b, f, 15),
(c, f, 11),
(e, f, 6),
]
graph.add_edges_from(edge_list)
graph.remove_node(d)
dijkstra_lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
graph, float
)
expected = {k: {**v, k: 0.0} for k, v in dijkstra_lengths.items()}
result = retworkx.graph_floyd_warshall(
graph, float, parallel_threshold=self.parallel_threshold
)
self.assertEqual(result, expected)
def test_floyd_warshall_empty_graph(self):
graph = retworkx.PyGraph()
self.assertEqual({}, retworkx.graph_floyd_warshall(graph, float))
def test_floyd_warshall_graph_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(1000)))
expected = {x: {} for x in range(1000)}
self.assertEqual(
expected,
retworkx.graph_floyd_warshall(graph, float),
)
def test_floyd_warshall_numpy_three_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(6)))
weights = [2, 12, 1, 5, 1]
graph.add_edges_from([(i, i + 1, weights[i]) for i in range(5)])
graph.add_edge(5, 0, 10)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 15)
self.assertEqual(dist[3, 0], 15)
def test_weighted_numpy_two_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.add_edges_from(
[
(0, 1, 2),
(1, 2, 2),
(2, 3, 1),
(3, 4, 1),
(4, 5, 1),
(5, 6, 1),
(6, 7, 1),
(7, 0, 1),
]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 2], 4)
self.assertEqual(dist[2, 0], 4)
def test_weighted_numpy_negative_cycle(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(4)))
graph.add_edges_from(
[
(0, 1, 1),
(1, 2, -1),
(2, 3, -1),
(3, 0, -1),
]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
self.assertTrue(numpy.all(numpy.diag(dist) < 0))
def test_floyd_warshall_numpy_cycle(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(7)))
graph.add_edges_from_no_data(
[(0, 1), (0, 6), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: 1, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 3)
self.assertEqual(dist[0, 4], 3)
def test_numpy_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(4)))
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
expected = numpy.full((4, 4), numpy.inf)
numpy.fill_diagonal(expected, 0)
self.assertTrue(numpy.array_equal(dist, expected))
def test_floyd_warshall_numpy_graph_cycle_with_removals(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.remove_node(0)
graph.add_edges_from_no_data(
[(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: 1, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 3)
self.assertEqual(dist[0, 4], 3)
def test_floyd_warshall_numpy_graph_cycle_no_weight_fn(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.remove_node(0)
graph.add_edges_from_no_data(
[(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]
)
dist = retworkx.graph_floyd_warshall_numpy(graph)
self.assertEqual(dist[0, 3], 3)
self.assertEqual(dist[0, 4], 3)
def test_floyd_warshall_numpy_graph_cycle_default_weight(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.remove_node(0)
graph.add_edges_from_no_data(
[(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, default_weight=2, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 6)
self.assertEqual(dist[0, 4], 6)
class TestParallelFloydWarshall(TestFloydWarshall):
parallel_threshold = 0
| 32.808411 | 79 | 0.565874 |
import unittest
import numpy
import retworkx
class TestFloydWarshall(unittest.TestCase):
parallel_threshold = 300
def test_vs_dijkstra_all_pairs(self):
graph = retworkx.PyGraph()
a = graph.add_node("A")
b = graph.add_node("B")
c = graph.add_node("C")
d = graph.add_node("D")
e = graph.add_node("E")
f = graph.add_node("F")
edge_list = [
(a, b, 7),
(c, a, 9),
(a, d, 14),
(b, c, 10),
(d, c, 2),
(d, e, 9),
(b, f, 15),
(c, f, 11),
(e, f, 6),
]
graph.add_edges_from(edge_list)
dijkstra_lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
graph, float
)
expected = {k: {**v, k: 0.0} for k, v in dijkstra_lengths.items()}
result = retworkx.graph_floyd_warshall(
graph, float, parallel_threshold=self.parallel_threshold
)
self.assertEqual(result, expected)
def test_vs_dijkstra_all_pairs_with_node_removal(self):
graph = retworkx.PyGraph()
a = graph.add_node("A")
b = graph.add_node("B")
c = graph.add_node("C")
d = graph.add_node("D")
e = graph.add_node("E")
f = graph.add_node("F")
edge_list = [
(a, b, 7),
(c, a, 9),
(a, d, 14),
(b, c, 10),
(d, c, 2),
(d, e, 9),
(b, f, 15),
(c, f, 11),
(e, f, 6),
]
graph.add_edges_from(edge_list)
graph.remove_node(d)
dijkstra_lengths = retworkx.graph_all_pairs_dijkstra_path_lengths(
graph, float
)
expected = {k: {**v, k: 0.0} for k, v in dijkstra_lengths.items()}
result = retworkx.graph_floyd_warshall(
graph, float, parallel_threshold=self.parallel_threshold
)
self.assertEqual(result, expected)
def test_floyd_warshall_empty_graph(self):
graph = retworkx.PyGraph()
self.assertEqual({}, retworkx.graph_floyd_warshall(graph, float))
def test_floyd_warshall_graph_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(1000)))
expected = {x: {} for x in range(1000)}
self.assertEqual(
expected,
retworkx.graph_floyd_warshall(graph, float),
)
def test_floyd_warshall_numpy_three_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(6)))
weights = [2, 12, 1, 5, 1]
graph.add_edges_from([(i, i + 1, weights[i]) for i in range(5)])
graph.add_edge(5, 0, 10)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 15)
self.assertEqual(dist[3, 0], 15)
def test_weighted_numpy_two_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.add_edges_from(
[
(0, 1, 2),
(1, 2, 2),
(2, 3, 1),
(3, 4, 1),
(4, 5, 1),
(5, 6, 1),
(6, 7, 1),
(7, 0, 1),
]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 2], 4)
self.assertEqual(dist[2, 0], 4)
def test_weighted_numpy_negative_cycle(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(4)))
graph.add_edges_from(
[
(0, 1, 1),
(1, 2, -1),
(2, 3, -1),
(3, 0, -1),
]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
self.assertTrue(numpy.all(numpy.diag(dist) < 0))
def test_floyd_warshall_numpy_cycle(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(7)))
graph.add_edges_from_no_data(
[(0, 1), (0, 6), (1, 2), (2, 3), (3, 4), (4, 5), (5, 6)]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: 1, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 3)
self.assertEqual(dist[0, 4], 3)
def test_numpy_no_edges(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(4)))
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: x, parallel_threshold=self.parallel_threshold
)
expected = numpy.full((4, 4), numpy.inf)
numpy.fill_diagonal(expected, 0)
self.assertTrue(numpy.array_equal(dist, expected))
def test_floyd_warshall_numpy_graph_cycle_with_removals(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.remove_node(0)
graph.add_edges_from_no_data(
[(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, lambda x: 1, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 3)
self.assertEqual(dist[0, 4], 3)
def test_floyd_warshall_numpy_graph_cycle_no_weight_fn(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.remove_node(0)
graph.add_edges_from_no_data(
[(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]
)
dist = retworkx.graph_floyd_warshall_numpy(graph)
self.assertEqual(dist[0, 3], 3)
self.assertEqual(dist[0, 4], 3)
def test_floyd_warshall_numpy_graph_cycle_default_weight(self):
graph = retworkx.PyGraph()
graph.add_nodes_from(list(range(8)))
graph.remove_node(0)
graph.add_edges_from_no_data(
[(1, 2), (1, 7), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7)]
)
dist = retworkx.graph_floyd_warshall_numpy(
graph, default_weight=2, parallel_threshold=self.parallel_threshold
)
self.assertEqual(dist[0, 3], 6)
self.assertEqual(dist[0, 4], 6)
class TestParallelFloydWarshall(TestFloydWarshall):
parallel_threshold = 0
| true | true |
f72b08276373a7b8064dc7eb363bb32779d3d0ce | 9,830 | py | Python | anima/ui/widgets/task_dashboard.py | MehmetErer/anima | f92ae599b5a4c181fc8e131a9ccdde537e635303 | [
"MIT"
] | 101 | 2015-02-08T22:20:11.000Z | 2022-03-21T18:56:42.000Z | anima/ui/widgets/task_dashboard.py | MehmetErer/anima | f92ae599b5a4c181fc8e131a9ccdde537e635303 | [
"MIT"
] | 23 | 2016-11-30T08:33:21.000Z | 2021-01-26T12:11:12.000Z | anima/ui/widgets/task_dashboard.py | MehmetErer/anima | f92ae599b5a4c181fc8e131a9ccdde537e635303 | [
"MIT"
] | 27 | 2015-01-03T06:49:45.000Z | 2021-12-28T03:30:54.000Z | # -*- coding: utf-8 -*-
from anima.ui.lib import QtCore, QtWidgets
class TaskDashboardWidget(QtWidgets.QWidget):
"""A widget that displays task related information
"""
def __init__(self, task=None, parent=None, **kwargs):
self._task = None
self.parent = parent
super(TaskDashboardWidget, self).__init__(parent=parent)
# storage for UI stuff
self.vertical_layout = None
self.widget_label = None
self.task_thumbnail_widget = None
self.schedule_info_form_layout = None
self.task_detail_widget = None
self.task_timing_widget = None
self.description_label = None
self.description_field = None
self.description_field_is_updating = False
self.responsible_info_widget = None
self.resource_info_widget = None
self.task_versions_usage_info_widget = None
self.watch_task_button = None
self.fix_task_status_button = None
self.task_status_label = None
self.task_progress = None
self.task_notes_widget = None
self._setup_ui()
self.task = task
def _setup_ui(self):
"""create the UI widgets
"""
# we need a main layout
# may be a vertical one
# or a form layout
self.vertical_layout = QtWidgets.QVBoxLayout(self)
# -------------------------
# Dialog Label and buttons
horizontal_layout3 = QtWidgets.QHBoxLayout()
self.vertical_layout.addLayout(horizontal_layout3)
self.widget_label = QtWidgets.QLabel(self)
self.widget_label.setStyleSheet(
"color: rgb(71, 143, 202);\nfont: 18pt;"
)
horizontal_layout3.addWidget(self.widget_label)
horizontal_layout3.addStretch(1)
# Add Watch Task button
self.watch_task_button = QtWidgets.QPushButton(self)
self.watch_task_button.setMaximumWidth(24)
self.watch_task_button.setMaximumHeight(24)
self.watch_task_button.setText("W")
self.watch_task_button.setToolTip("Watch Task")
self.fix_task_status_button = QtWidgets.QPushButton(self)
self.fix_task_status_button.setMaximumWidth(24)
self.fix_task_status_button.setMaximumHeight(24)
self.fix_task_status_button.setText("F")
self.fix_task_status_button.setToolTip("Fix Task Status")
horizontal_layout3.addWidget(self.watch_task_button)
horizontal_layout3.addWidget(self.fix_task_status_button)
QtCore.QObject.connect(
self.fix_task_status_button,
QtCore.SIGNAL("clicked()"),
self.fix_task_status
)
# Add Status Label
vertical_layout3 = QtWidgets.QVBoxLayout()
from anima.ui.widgets.task_status_label import TaskStatusLabel
self.task_status_label = TaskStatusLabel(task=self.task)
self.task_status_label.setMaximumHeight(12)
vertical_layout3.addWidget(self.task_status_label)
# Add ProgressBar
self.task_progress = QtWidgets.QProgressBar(self)
self.task_progress.setMinimum(0)
self.task_progress.setMaximum(100)
self.task_progress.setValue(50)
self.task_progress.setAlignment(QtCore.Qt.AlignCenter)
self.task_progress.setMaximumHeight(12)
self.task_progress.setStyleSheet("""
QProgressBar::chunk {
background-color: #3add36;
width: 1px;
}
""")
vertical_layout3.addWidget(self.task_progress)
# set items closer to each other
vertical_layout3.setSpacing(0)
horizontal_layout3.addLayout(vertical_layout3)
# Add divider
line = QtWidgets.QFrame(self)
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.vertical_layout.addWidget(line)
horizontal_layout1 = QtWidgets.QHBoxLayout()
self.vertical_layout.addLayout(horizontal_layout1)
vertical_layout1 = QtWidgets.QVBoxLayout()
vertical_layout2 = QtWidgets.QVBoxLayout()
horizontal_layout1.addLayout(vertical_layout1)
horizontal_layout1.addLayout(vertical_layout2)
# --------------------------
# Horizontal Layout for thumbnail and detail widgets
horizontal_layout2 = QtWidgets.QHBoxLayout()
vertical_layout1.addLayout(horizontal_layout2)
# --------------------------
# Task Thumbnail
from anima.ui.widgets.entity_thumbnail import EntityThumbnailWidget
self.task_thumbnail_widget = EntityThumbnailWidget(task=self.task, parent=self)
horizontal_layout2.addWidget(self.task_thumbnail_widget)
# --------------------------
# Task Detail Info
from anima.ui.widgets.task_detail import TaskDetailWidget
self.task_detail_widget = TaskDetailWidget(task=self.task, parent=self)
horizontal_layout2.addWidget(self.task_detail_widget)
# --------------------------
# Task Timing Info
from anima.ui.widgets.task_timing import TaskTimingInfoWidget
self.task_timing_widget = TaskTimingInfoWidget(task=self.task, parent=self)
horizontal_layout2.addWidget(self.task_timing_widget)
# add stretcher
# horizontal_layout2.addStretch(1)
# --------------------------
# Description field
self.description_label = QtWidgets.QLabel(self)
self.description_label.setStyleSheet("""
background-color: gray;
color: white;
font-weight: bold;
padding: 0.5em;
""")
self.description_label.setText("Description")
self.description_field = QtWidgets.QTextEdit(self)
self.description_field.setAcceptRichText(True)
vertical_layout1.addWidget(self.description_label)
vertical_layout1.addWidget(self.description_field)
# add stretcher
vertical_layout1.addStretch(1)
# connect signal
self.description_field.textChanged.connect(self.update_description)
# ---------------------------
# Responsible Info
from anima.ui.widgets.responsible_info import ResponsibleInfoWidget
self.responsible_info_widget = ResponsibleInfoWidget(
task=self.task, parent=self
)
vertical_layout2.addWidget(self.responsible_info_widget)
# ---------------------------
# Resource Info
from anima.ui.widgets.resource_info import ResourceInfoWidget
self.resource_info_widget = ResourceInfoWidget(
task=self.task, parent=self
)
vertical_layout2.addWidget(self.resource_info_widget)
# ---------------------------
# Task Versions Usage Info
from anima.ui.widgets.task_version_usage_info import \
TaskVersionUsageInfoWidget
self.task_versions_usage_info_widget = TaskVersionUsageInfoWidget(
task=self.task, parent=self
)
vertical_layout2.addWidget(self.task_versions_usage_info_widget)
vertical_layout2.addStretch(1)
horizontal_layout1.setStretch(0, 2)
horizontal_layout1.setStretch(1, 1)
# ---------------------------
# Task Notes
from anima.ui.widgets.entity_notes import EntityNotesWidgets
self.task_notes_widget = EntityNotesWidgets(entity=self.task, parent=self)
self.vertical_layout.addWidget(self.task_notes_widget)
@property
def task(self):
"""getter for the _task attribute
"""
return self._task
@task.setter
def task(self, task):
"""setter for the task attribute
"""
from stalker import Task
if isinstance(task, Task):
self._task = task
else:
self._task = None
# self.description_label = None
# self.description_field = None
# self.responsible_info_widget = None
# self.resource_info_widget = None
# self.task_versions_usage_info_widget = None
# self.watch_task_button = None
# self.fix_task_status_button = None
# self.task_progress = None
if self._task:
self.description_field_is_updating = True
self.description_field.setText(self._task.description)
self.description_field_is_updating = False
self.task_progress.setValue(self._task.percent_complete)
else:
self.description_field_is_updating = True
self.description_field.setText('')
self.description_field_is_updating = False
self.task_progress.setValue(0)
self.widget_label.setText(self._task.name if self._task else 'Task Name')
self.task_thumbnail_widget.task = self._task
self.task_detail_widget.task = self._task
self.task_timing_widget.task = self._task
self.task_status_label.task = self._task
self.task_notes_widget.task = self._task
def fix_task_status(self):
"""fix current task status
"""
from stalker import Task
assert isinstance(self.task, Task)
from anima import utils
utils.fix_task_statuses(self.task)
utils.fix_task_computed_time(self.task)
from stalker.db.session import DBSession
DBSession.add(self.task)
DBSession.commit()
def update_description(self):
"""runs when description field has changed
"""
if self.description_field_is_updating:
return
self.description_field_is_updating = True
self.task.description = self.description_field.toPlainText()
from stalker.db.session import DBSession
DBSession.add(self.task)
DBSession.commit()
self.description_field_is_updating = False
| 35.487365 | 87 | 0.649135 |
from anima.ui.lib import QtCore, QtWidgets
class TaskDashboardWidget(QtWidgets.QWidget):
def __init__(self, task=None, parent=None, **kwargs):
self._task = None
self.parent = parent
super(TaskDashboardWidget, self).__init__(parent=parent)
self.vertical_layout = None
self.widget_label = None
self.task_thumbnail_widget = None
self.schedule_info_form_layout = None
self.task_detail_widget = None
self.task_timing_widget = None
self.description_label = None
self.description_field = None
self.description_field_is_updating = False
self.responsible_info_widget = None
self.resource_info_widget = None
self.task_versions_usage_info_widget = None
self.watch_task_button = None
self.fix_task_status_button = None
self.task_status_label = None
self.task_progress = None
self.task_notes_widget = None
self._setup_ui()
self.task = task
def _setup_ui(self):
self.vertical_layout = QtWidgets.QVBoxLayout(self)
horizontal_layout3 = QtWidgets.QHBoxLayout()
self.vertical_layout.addLayout(horizontal_layout3)
self.widget_label = QtWidgets.QLabel(self)
self.widget_label.setStyleSheet(
"color: rgb(71, 143, 202);\nfont: 18pt;"
)
horizontal_layout3.addWidget(self.widget_label)
horizontal_layout3.addStretch(1)
self.watch_task_button = QtWidgets.QPushButton(self)
self.watch_task_button.setMaximumWidth(24)
self.watch_task_button.setMaximumHeight(24)
self.watch_task_button.setText("W")
self.watch_task_button.setToolTip("Watch Task")
self.fix_task_status_button = QtWidgets.QPushButton(self)
self.fix_task_status_button.setMaximumWidth(24)
self.fix_task_status_button.setMaximumHeight(24)
self.fix_task_status_button.setText("F")
self.fix_task_status_button.setToolTip("Fix Task Status")
horizontal_layout3.addWidget(self.watch_task_button)
horizontal_layout3.addWidget(self.fix_task_status_button)
QtCore.QObject.connect(
self.fix_task_status_button,
QtCore.SIGNAL("clicked()"),
self.fix_task_status
)
vertical_layout3 = QtWidgets.QVBoxLayout()
from anima.ui.widgets.task_status_label import TaskStatusLabel
self.task_status_label = TaskStatusLabel(task=self.task)
self.task_status_label.setMaximumHeight(12)
vertical_layout3.addWidget(self.task_status_label)
self.task_progress = QtWidgets.QProgressBar(self)
self.task_progress.setMinimum(0)
self.task_progress.setMaximum(100)
self.task_progress.setValue(50)
self.task_progress.setAlignment(QtCore.Qt.AlignCenter)
self.task_progress.setMaximumHeight(12)
self.task_progress.setStyleSheet("""
QProgressBar::chunk {
background-color: #3add36;
width: 1px;
}
""")
vertical_layout3.addWidget(self.task_progress)
vertical_layout3.setSpacing(0)
horizontal_layout3.addLayout(vertical_layout3)
line = QtWidgets.QFrame(self)
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.vertical_layout.addWidget(line)
horizontal_layout1 = QtWidgets.QHBoxLayout()
self.vertical_layout.addLayout(horizontal_layout1)
vertical_layout1 = QtWidgets.QVBoxLayout()
vertical_layout2 = QtWidgets.QVBoxLayout()
horizontal_layout1.addLayout(vertical_layout1)
horizontal_layout1.addLayout(vertical_layout2)
horizontal_layout2 = QtWidgets.QHBoxLayout()
vertical_layout1.addLayout(horizontal_layout2)
from anima.ui.widgets.entity_thumbnail import EntityThumbnailWidget
self.task_thumbnail_widget = EntityThumbnailWidget(task=self.task, parent=self)
horizontal_layout2.addWidget(self.task_thumbnail_widget)
from anima.ui.widgets.task_detail import TaskDetailWidget
self.task_detail_widget = TaskDetailWidget(task=self.task, parent=self)
horizontal_layout2.addWidget(self.task_detail_widget)
from anima.ui.widgets.task_timing import TaskTimingInfoWidget
self.task_timing_widget = TaskTimingInfoWidget(task=self.task, parent=self)
horizontal_layout2.addWidget(self.task_timing_widget)
self.description_label = QtWidgets.QLabel(self)
self.description_label.setStyleSheet("""
background-color: gray;
color: white;
font-weight: bold;
padding: 0.5em;
""")
self.description_label.setText("Description")
self.description_field = QtWidgets.QTextEdit(self)
self.description_field.setAcceptRichText(True)
vertical_layout1.addWidget(self.description_label)
vertical_layout1.addWidget(self.description_field)
vertical_layout1.addStretch(1)
self.description_field.textChanged.connect(self.update_description)
from anima.ui.widgets.responsible_info import ResponsibleInfoWidget
self.responsible_info_widget = ResponsibleInfoWidget(
task=self.task, parent=self
)
vertical_layout2.addWidget(self.responsible_info_widget)
from anima.ui.widgets.resource_info import ResourceInfoWidget
self.resource_info_widget = ResourceInfoWidget(
task=self.task, parent=self
)
vertical_layout2.addWidget(self.resource_info_widget)
from anima.ui.widgets.task_version_usage_info import \
TaskVersionUsageInfoWidget
self.task_versions_usage_info_widget = TaskVersionUsageInfoWidget(
task=self.task, parent=self
)
vertical_layout2.addWidget(self.task_versions_usage_info_widget)
vertical_layout2.addStretch(1)
horizontal_layout1.setStretch(0, 2)
horizontal_layout1.setStretch(1, 1)
from anima.ui.widgets.entity_notes import EntityNotesWidgets
self.task_notes_widget = EntityNotesWidgets(entity=self.task, parent=self)
self.vertical_layout.addWidget(self.task_notes_widget)
@property
def task(self):
return self._task
@task.setter
def task(self, task):
from stalker import Task
if isinstance(task, Task):
self._task = task
else:
self._task = None
if self._task:
self.description_field_is_updating = True
self.description_field.setText(self._task.description)
self.description_field_is_updating = False
self.task_progress.setValue(self._task.percent_complete)
else:
self.description_field_is_updating = True
self.description_field.setText('')
self.description_field_is_updating = False
self.task_progress.setValue(0)
self.widget_label.setText(self._task.name if self._task else 'Task Name')
self.task_thumbnail_widget.task = self._task
self.task_detail_widget.task = self._task
self.task_timing_widget.task = self._task
self.task_status_label.task = self._task
self.task_notes_widget.task = self._task
def fix_task_status(self):
from stalker import Task
assert isinstance(self.task, Task)
from anima import utils
utils.fix_task_statuses(self.task)
utils.fix_task_computed_time(self.task)
from stalker.db.session import DBSession
DBSession.add(self.task)
DBSession.commit()
def update_description(self):
if self.description_field_is_updating:
return
self.description_field_is_updating = True
self.task.description = self.description_field.toPlainText()
from stalker.db.session import DBSession
DBSession.add(self.task)
DBSession.commit()
self.description_field_is_updating = False
| true | true |
f72b08b59e5cb86bba78fc94a90a6d1fa03c18e3 | 6,363 | py | Python | lsdr/envs/analysis.py | melfm/lsdr | 36b0a85e970fdcaae828eeff6c147432aa767c93 | [
"MIT"
] | 3 | 2019-09-20T19:10:50.000Z | 2021-12-30T02:55:21.000Z | lsdr/envs/analysis.py | melfm/lsdr | 36b0a85e970fdcaae828eeff6c147432aa767c93 | [
"MIT"
] | null | null | null | lsdr/envs/analysis.py | melfm/lsdr | 36b0a85e970fdcaae828eeff6c147432aa767c93 | [
"MIT"
] | 1 | 2020-08-01T21:28:12.000Z | 2020-08-01T21:28:12.000Z | import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import math
import scipy.stats as stats
import lsdr.envs.environment_sampler as env_sampler
from enum import IntEnum
############################
# Optimization Loss Opt
############################
class Objectives(IntEnum):
REWARDS = 1
KL_OPT = 2
REW_AND_KL = 3
def reward_function(x):
return np.exp(-(x-20)**2)
def reward_function_v2(x):
return np.sin(np.sqrt(x**2))
def calculate_reward(x):
return reward_function(x)
def setup_distributions():
##############################
# Initial distribution configs
##############################
test_params = [
np.array([-30.0, 50.0])
]
# This can be modified for the initial distributions
# to be different.
ranges = np.asarray(test_params)
mean = ranges.mean(-1)
covar = (((ranges[:, 1] - ranges[:, 0])**2.0) / 12.0) * np.eye(
ranges.shape[0])
mu_train, L_train = mean, np.linalg.cholesky(covar)
dist_params = [mu_train, L_train]
sampler = env_sampler.init_env_sampler(
'hopper',
seed=0,
experiment_id='test_kl_div_loss_0',
init_dist_params=dist_params,
dist_type='gaussian',
test_dist_params=None)
############################
# Train Distribution
############################
p_train = sampler.train_dist
############################
# Test Distribution
############################
ranges = np.asarray(test_params)
mean = ranges.mean(-1)
covar = (((ranges[:, 1] - ranges[:, 0])**2.0) / 12.0) * np.eye(
ranges.shape[0])
mu_test, L_test = mean, np.linalg.cholesky(covar)
mu_test = torch.tensor(mu_test)
L_test = torch.tensor(L_test)
mu_test = mu_test.float().detach().requires_grad_(False)
L_test = L_test.float().detach().requires_grad_(False)
p_test = torch.distributions.MultivariateNormal(mu_test,
scale_tril=L_test)
train_mean = p_train.mean.detach()
train_std = (p_train._unbroadcasted_scale_tril).diag().detach()
test_mean = p_test.mean.detach()
test_std = (p_test._unbroadcasted_scale_tril).diag().detach()
print('Initial Distributions')
print('Train Distribution Mean ', train_mean)
print('Train Distribution STD ', train_std)
print('Test Distribution Mean ', test_mean)
print('Test Distribution STD ', test_std)
############################
# Plot Initial Distribution
############################
plot_distrs(train_mean, train_std,
test_mean, test_std,
plot_name='initial_train_distr')
return sampler, p_train, p_test
def plot_distrs(train_mean, train_var,
test_mean, test_var,
plot_name='distributions'):
plt.figure()
mu = train_mean
variance = train_var
sigma = math.sqrt(variance)
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
plt.plot(x, stats.norm.pdf(x, mu, sigma), color='green',
label='$p_{\phi}(z)$',
linestyle='-.')
mu = test_mean
variance = test_var
sigma = math.sqrt(variance)
x = np.linspace(mu - 3*sigma, mu + 3*sigma, 100)
plt.plot(x, stats.norm.pdf(x, mu, sigma), color='red', label='$p(z)$')
rew_func_range = np.arange(-20, 50, 1)
plt.plot(rew_func_range, calculate_reward(rew_func_range),
color='orange',
label='$R(\Theta, z)$')
plt.legend(loc='upper left')
res_dir = 'grad_analysis'
if not os.path.exists(res_dir):
os.makedirs(res_dir)
plotname = res_dir + '/' + plot_name + '.png'
plt.savefig(plotname)
def optimize_distribution(sampler, p_train, p_test, objective_opt):
epochs, n_samples = 10000, 1000
alpha = 1e-5
opt = torch.optim.Adam(sampler.params, 1e-2)
mu_grads = []
var_grads = []
def store_mu_grad_rew(grad):
mu_grads.append(np.copy(grad))
def store_tril_grad_rew(grad):
var_grads.append(np.copy(grad))
for _ in range(epochs):
opt.zero_grad()
####################
# Sample from p_test
####################
z = p_test.sample(torch.Size([n_samples]))
contexts = p_train.sample(torch.Size([n_samples]))
################
# Eval Log probs
################
log_p_train = p_train.log_prob(z)
log_p_test = p_test.log_prob(z)
################
# Calculate KL
################
kl_samples = log_p_test - log_p_train
kl_loss = kl_samples.mean(0)
#######################
# Calculate Reward term
#######################
log_probs_context = p_train.log_prob(contexts)
reward_loss = (calculate_reward(contexts) * log_probs_context).mean(0)
if objective_opt == Objectives.REWARDS:
# For this to converge to the reward function,
# need to change `z` sampling to be from train
# distribution.
total_loss = - reward_loss
elif objective_opt == Objectives.KL_OPT:
total_loss = kl_loss
elif objective_opt == Objectives.REW_AND_KL:
total_loss = (-(reward_loss) + (alpha*kl_loss))
else:
raise ValueError('Invalid op')
total_loss.mean().backward()
opt.step()
train_mean = p_train.mean.detach()
train_std = (p_train._unbroadcasted_scale_tril).diag().detach()
test_mean = p_test.mean.detach()
test_std = (p_test._unbroadcasted_scale_tril).diag().detach()
print('Updated Distributions')
print('######################')
print('Train Distribution Mean ', train_mean)
print('Train Distribution STD ', train_std)
print('Test Distribution Mean ', test_mean)
print('Test Distribution STD ', test_std)
plot_distrs(train_mean, train_std,
test_mean, test_std,
plot_name='final_distributions')
if __name__ == '__main__':
sampler, p_train, p_test = setup_distributions()
# objective_opt = Objectives.REWARDS
# objective_opt = Objectives.KL_OPT
objective_opt = Objectives.REW_AND_KL
optimize_distribution(sampler,
p_train,
p_test,
objective_opt)
| 28.28 | 78 | 0.573157 | import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import math
import scipy.stats as stats
import lsdr.envs.environment_sampler as env_sampler
from enum import IntEnum
objective_opt)
| true | true |
f72b09030b2c9ba7bc22260ba632e1a45e870da9 | 1,020 | py | Python | examples/pitz_daily/pitz_daily_runner.py | ImperialCollegeLondon/al_cfd_benchmark | 03b51d7e7d4def804e2ac18084deee8401636851 | [
"MIT"
] | 6 | 2020-09-27T00:14:48.000Z | 2021-11-23T03:35:09.000Z | examples/pitz_daily/pitz_daily_runner.py | ImperialCollegeLondon/al_cfd_benchmark | 03b51d7e7d4def804e2ac18084deee8401636851 | [
"MIT"
] | null | null | null | examples/pitz_daily/pitz_daily_runner.py | ImperialCollegeLondon/al_cfd_benchmark | 03b51d7e7d4def804e2ac18084deee8401636851 | [
"MIT"
] | 2 | 2020-09-27T17:40:33.000Z | 2021-12-13T02:31:49.000Z | # -*- coding: utf-8 -*-
"""Pitz Daily
This case uses the pitzDaily example from the OpenFOAM tutorials
and varies two parameters: Reynolds number and height of the inlet.
It returns the pressure difference between inlet and outlet.
"""
import numpy as np
from active_learning_cfd.cfd_case import CFDCase
import os
class PitzDaily(CFDCase):
mesher = "blockMesh"
solver = "simpleFoam"
template = "pitzDaily"
parameter_names = ("reynolds", "entryHeight")
output_list = (("deltaP", "subtract\(p\) = (.+)"),)
def __call__(self, parameters):
assert len(parameters) == len(self.parameter_names)
parameter_dict = dict(zip(self.parameter_names, parameters))
parameter_dict["reynolds"] = np.power(10, parameter_dict["reynolds"])
self.solve(parameter_dict)
return self.results["deltaP"]
if __name__ == "__main__":
case = PitzDaily()
reynolds = 50800.0
entryHeight = 25.4
print("deltaP = {}".format(case([np.log10(reynolds), entryHeight])))
| 28.333333 | 77 | 0.683333 |
import numpy as np
from active_learning_cfd.cfd_case import CFDCase
import os
class PitzDaily(CFDCase):
mesher = "blockMesh"
solver = "simpleFoam"
template = "pitzDaily"
parameter_names = ("reynolds", "entryHeight")
output_list = (("deltaP", "subtract\(p\) = (.+)"),)
def __call__(self, parameters):
assert len(parameters) == len(self.parameter_names)
parameter_dict = dict(zip(self.parameter_names, parameters))
parameter_dict["reynolds"] = np.power(10, parameter_dict["reynolds"])
self.solve(parameter_dict)
return self.results["deltaP"]
if __name__ == "__main__":
case = PitzDaily()
reynolds = 50800.0
entryHeight = 25.4
print("deltaP = {}".format(case([np.log10(reynolds), entryHeight])))
| true | true |
f72b091c4068f3540061214d903965fad918e1a4 | 5,557 | py | Python | cogdl/oag/dual_position_bert_model.py | li-ziang/cogdl | 60022d3334e3abae2d2a505e6e049a26acf10f39 | [
"MIT"
] | 6 | 2020-07-09T02:48:41.000Z | 2021-06-16T09:04:14.000Z | cogdl/oag/dual_position_bert_model.py | li-ziang/cogdl | 60022d3334e3abae2d2a505e6e049a26acf10f39 | [
"MIT"
] | null | null | null | cogdl/oag/dual_position_bert_model.py | li-ziang/cogdl | 60022d3334e3abae2d2a505e6e049a26acf10f39 | [
"MIT"
] | 1 | 2020-05-19T11:45:45.000Z | 2020-05-19T11:45:45.000Z | import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import logging
from .bert_model import BertPreTrainedModel, BertPreTrainingHeads, BertModel, BertEncoder, BertPooler, BertLayerNorm
logger = logging.getLogger(__name__)
class DualPositionBertEmbeddings(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super(DualPositionBertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.position_embeddings_second = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids, position_ids, position_ids_second):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
position_embeddings_second = self.position_embeddings(position_ids_second)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + position_embeddings_second + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class DualPositionBertModel(BertModel):
def __init__(self, config):
super(DualPositionBertModel, self).__init__(config)
self.embeddings = DualPositionBertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
logger.info("Init BERT pretrain model")
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
checkpoint_activations=False,
position_ids=None,
position_ids_second=None,
):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if len(attention_mask.shape) == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif len(attention_mask.shape) == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise Exception("invalid attention mask shape! shape: %s" % (attention_mask.shape))
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids, position_ids, position_ids_second)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
checkpoint_activations=checkpoint_activations,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class DualPositionBertForPreTrainingPreLN(BertPreTrainedModel):
"""BERT model with pre-training heads and dual position
Params:
config: a BertConfig class instance with the configuration to build a new model.
"""
def __init__(self, config):
super(DualPositionBertForPreTrainingPreLN, self).__init__(config)
self.bert = DualPositionBertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
masked_lm_labels=None,
position_ids=None,
position_ids_second=None,
log=True,
):
sequence_output, pooled_output = self.bert(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
output_all_encoded_layers=False,
checkpoint_activations=False,
position_ids=position_ids,
position_ids_second=position_ids_second,
)
if masked_lm_labels is not None:
# filter out all masked labels.
masked_token_indexes = torch.nonzero((masked_lm_labels + 1).view(-1)).view(-1)
prediction_scores, _ = self.cls(sequence_output, pooled_output, masked_token_indexes)
target = torch.index_select(masked_lm_labels.view(-1), 0, masked_token_indexes)
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), target)
return masked_lm_loss
else:
prediction_scores, _ = self.cls(sequence_output, pooled_output)
return prediction_scores
| 41.781955 | 119 | 0.703617 | import torch
from torch import nn
from torch.nn import CrossEntropyLoss
import logging
from .bert_model import BertPreTrainedModel, BertPreTrainingHeads, BertModel, BertEncoder, BertPooler, BertLayerNorm
logger = logging.getLogger(__name__)
class DualPositionBertEmbeddings(nn.Module):
def __init__(self, config):
super(DualPositionBertEmbeddings, self).__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.position_embeddings_second = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, token_type_ids, position_ids, position_ids_second):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
position_embeddings_second = self.position_embeddings(position_ids_second)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = words_embeddings + position_embeddings + position_embeddings_second + token_type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class DualPositionBertModel(BertModel):
def __init__(self, config):
super(DualPositionBertModel, self).__init__(config)
self.embeddings = DualPositionBertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_bert_weights)
logger.info("Init BERT pretrain model")
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
output_all_encoded_layers=True,
checkpoint_activations=False,
position_ids=None,
position_ids_second=None,
):
if attention_mask is None:
attention_mask = torch.ones_like(input_ids)
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
if len(attention_mask.shape) == 2:
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
elif len(attention_mask.shape) == 3:
extended_attention_mask = attention_mask.unsqueeze(1)
else:
raise Exception("invalid attention mask shape! shape: %s" % (attention_mask.shape))
extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
embedding_output = self.embeddings(input_ids, token_type_ids, position_ids, position_ids_second)
encoded_layers = self.encoder(
embedding_output,
extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers,
checkpoint_activations=checkpoint_activations,
)
sequence_output = encoded_layers[-1]
pooled_output = self.pooler(sequence_output)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers, pooled_output
class DualPositionBertForPreTrainingPreLN(BertPreTrainedModel):
def __init__(self, config):
super(DualPositionBertForPreTrainingPreLN, self).__init__(config)
self.bert = DualPositionBertModel(config)
self.cls = BertPreTrainingHeads(config, self.bert.embeddings.word_embeddings.weight)
self.apply(self.init_bert_weights)
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
masked_lm_labels=None,
position_ids=None,
position_ids_second=None,
log=True,
):
sequence_output, pooled_output = self.bert(
input_ids=input_ids,
token_type_ids=token_type_ids,
attention_mask=attention_mask,
output_all_encoded_layers=False,
checkpoint_activations=False,
position_ids=position_ids,
position_ids_second=position_ids_second,
)
if masked_lm_labels is not None:
masked_token_indexes = torch.nonzero((masked_lm_labels + 1).view(-1)).view(-1)
prediction_scores, _ = self.cls(sequence_output, pooled_output, masked_token_indexes)
target = torch.index_select(masked_lm_labels.view(-1), 0, masked_token_indexes)
loss_fct = CrossEntropyLoss(ignore_index=-1)
masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), target)
return masked_lm_loss
else:
prediction_scores, _ = self.cls(sequence_output, pooled_output)
return prediction_scores
| true | true |
f72b094590d5184ffbaf3cd4a122b4c8a53db388 | 7,097 | py | Python | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/_container_registry_management_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/_container_registry_management_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2020_11_01_preview/_container_registry_management_client.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ConnectedRegistriesOperations, ExportPipelinesOperations, ImportPipelinesOperations, Operations, PipelineRunsOperations, PrivateEndpointConnectionsOperations, RegistriesOperations, ReplicationsOperations, ScopeMapsOperations, TokensOperations, WebhooksOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class ContainerRegistryManagementClient:
"""ContainerRegistryManagementClient.
:ivar connected_registries: ConnectedRegistriesOperations operations
:vartype connected_registries:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ConnectedRegistriesOperations
:ivar export_pipelines: ExportPipelinesOperations operations
:vartype export_pipelines:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ExportPipelinesOperations
:ivar registries: RegistriesOperations operations
:vartype registries:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.RegistriesOperations
:ivar import_pipelines: ImportPipelinesOperations operations
:vartype import_pipelines:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ImportPipelinesOperations
:ivar operations: Operations operations
:vartype operations: azure.mgmt.containerregistry.v2020_11_01_preview.operations.Operations
:ivar pipeline_runs: PipelineRunsOperations operations
:vartype pipeline_runs:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.PipelineRunsOperations
:ivar private_endpoint_connections: PrivateEndpointConnectionsOperations operations
:vartype private_endpoint_connections:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.PrivateEndpointConnectionsOperations
:ivar replications: ReplicationsOperations operations
:vartype replications:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ReplicationsOperations
:ivar scope_maps: ScopeMapsOperations operations
:vartype scope_maps:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.ScopeMapsOperations
:ivar tokens: TokensOperations operations
:vartype tokens: azure.mgmt.containerregistry.v2020_11_01_preview.operations.TokensOperations
:ivar webhooks: WebhooksOperations operations
:vartype webhooks:
azure.mgmt.containerregistry.v2020_11_01_preview.operations.WebhooksOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Microsoft Azure subscription ID.
:type subscription_id: str
:param base_url: Service URL. Default value is 'https://management.azure.com'.
:type base_url: str
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerRegistryManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.connected_registries = ConnectedRegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.export_pipelines = ExportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.scope_maps = ScopeMapsOperations(self._client, self._config, self._serialize, self._deserialize)
self.tokens = TokensOperations(self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs: Any
) -> HttpResponse:
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> ContainerRegistryManagementClient
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 53.360902 | 286 | 0.748908 |
from copy import deepcopy
from typing import Any, Optional, TYPE_CHECKING
from azure.core.rest import HttpRequest, HttpResponse
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
from . import models
from ._configuration import ContainerRegistryManagementClientConfiguration
from .operations import ConnectedRegistriesOperations, ExportPipelinesOperations, ImportPipelinesOperations, Operations, PipelineRunsOperations, PrivateEndpointConnectionsOperations, RegistriesOperations, ReplicationsOperations, ScopeMapsOperations, TokensOperations, WebhooksOperations
if TYPE_CHECKING:
from azure.core.credentials import TokenCredential
class ContainerRegistryManagementClient:
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
base_url: str = "https://management.azure.com",
**kwargs: Any
) -> None:
self._config = ContainerRegistryManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.connected_registries = ConnectedRegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.export_pipelines = ExportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize)
self.import_pipelines = ImportPipelinesOperations(self._client, self._config, self._serialize, self._deserialize)
self.operations = Operations(self._client, self._config, self._serialize, self._deserialize)
self.pipeline_runs = PipelineRunsOperations(self._client, self._config, self._serialize, self._deserialize)
self.private_endpoint_connections = PrivateEndpointConnectionsOperations(self._client, self._config, self._serialize, self._deserialize)
self.replications = ReplicationsOperations(self._client, self._config, self._serialize, self._deserialize)
self.scope_maps = ScopeMapsOperations(self._client, self._config, self._serialize, self._deserialize)
self.tokens = TokensOperations(self._client, self._config, self._serialize, self._deserialize)
self.webhooks = WebhooksOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request,
**kwargs: Any
) -> HttpResponse:
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
self._client.close()
def __enter__(self):
self._client.__enter__()
return self
def __exit__(self, *exc_details):
self._client.__exit__(*exc_details)
| true | true |
f72b097de1b2982d94f31803515377aa94536b9a | 1,869 | py | Python | authentik/stages/deny/tests.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 15 | 2020-01-05T09:09:57.000Z | 2020-11-28T05:27:39.000Z | authentik/stages/deny/tests.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 302 | 2020-01-21T08:03:59.000Z | 2020-12-04T05:04:57.000Z | authentik/stages/deny/tests.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 3 | 2020-03-04T08:21:59.000Z | 2020-08-01T20:37:18.000Z | """deny tests"""
from django.urls import reverse
from authentik.core.tests.utils import create_test_admin_user, create_test_flow
from authentik.flows.markers import StageMarker
from authentik.flows.models import FlowDesignation, FlowStageBinding
from authentik.flows.planner import FlowPlan
from authentik.flows.tests import FlowTestCase
from authentik.flows.views.executor import SESSION_KEY_PLAN
from authentik.stages.deny.models import DenyStage
class TestUserDenyStage(FlowTestCase):
"""Deny tests"""
def setUp(self):
super().setUp()
self.user = create_test_admin_user()
self.flow = create_test_flow(FlowDesignation.AUTHENTICATION)
self.stage = DenyStage.objects.create(name="logout")
self.binding = FlowStageBinding.objects.create(target=self.flow, stage=self.stage, order=2)
def test_valid_get(self):
"""Test with a valid pending user and backend"""
plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()])
session = self.client.session
session[SESSION_KEY_PLAN] = plan
session.save()
response = self.client.get(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug})
)
self.assertStageResponse(response, self.flow, component="ak-stage-access-denied")
def test_valid_post(self):
"""Test with a valid pending user and backend"""
plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()])
session = self.client.session
session[SESSION_KEY_PLAN] = plan
session.save()
response = self.client.post(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug})
)
self.assertStageResponse(response, self.flow, component="ak-stage-access-denied")
| 38.9375 | 99 | 0.70626 | from django.urls import reverse
from authentik.core.tests.utils import create_test_admin_user, create_test_flow
from authentik.flows.markers import StageMarker
from authentik.flows.models import FlowDesignation, FlowStageBinding
from authentik.flows.planner import FlowPlan
from authentik.flows.tests import FlowTestCase
from authentik.flows.views.executor import SESSION_KEY_PLAN
from authentik.stages.deny.models import DenyStage
class TestUserDenyStage(FlowTestCase):
def setUp(self):
super().setUp()
self.user = create_test_admin_user()
self.flow = create_test_flow(FlowDesignation.AUTHENTICATION)
self.stage = DenyStage.objects.create(name="logout")
self.binding = FlowStageBinding.objects.create(target=self.flow, stage=self.stage, order=2)
def test_valid_get(self):
plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()])
session = self.client.session
session[SESSION_KEY_PLAN] = plan
session.save()
response = self.client.get(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug})
)
self.assertStageResponse(response, self.flow, component="ak-stage-access-denied")
def test_valid_post(self):
plan = FlowPlan(flow_pk=self.flow.pk.hex, bindings=[self.binding], markers=[StageMarker()])
session = self.client.session
session[SESSION_KEY_PLAN] = plan
session.save()
response = self.client.post(
reverse("authentik_api:flow-executor", kwargs={"flow_slug": self.flow.slug})
)
self.assertStageResponse(response, self.flow, component="ak-stage-access-denied")
| true | true |
f72b09d34e7b78c00c0b504b76cded6aa3b45a39 | 1,425 | py | Python | models/vasilyev2020/src/score.py | leoribeiro/repro | 7dc2ad611925542b4deb62fd1e30761ba56a7f60 | [
"Apache-2.0"
] | 15 | 2021-07-28T19:52:03.000Z | 2022-03-28T15:55:17.000Z | models/vasilyev2020/src/score.py | leoribeiro/repro | 7dc2ad611925542b4deb62fd1e30761ba56a7f60 | [
"Apache-2.0"
] | 3 | 2021-11-19T17:09:34.000Z | 2022-02-14T19:40:48.000Z | models/vasilyev2020/src/score.py | leoribeiro/repro | 7dc2ad611925542b4deb62fd1e30761ba56a7f60 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
from blanc import BlancHelp, BlancTune
def main(args):
kwargs = json.loads(args.kwargs)
device = "cpu" if args.device == -1 else "cuda"
if args.type == "tune":
blanc = BlancTune(device=device, random_seed=args.random_seed, **kwargs)
elif args.type == "help":
blanc = BlancHelp(device=device, **kwargs)
else:
raise Exception(f"Unknown BLANC type: {args.type}")
documents = []
summaries_list = []
with open(args.input_file, "r") as f:
for line in f:
data = json.loads(line)
documents.append(data["document"])
summaries_list.append(data["summaries"])
scores_list = blanc.eval_summaries_for_docs(documents, summaries_list)
dirname = os.path.dirname(args.output_file)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(args.output_file, "w") as out:
out.write(json.dumps(scores_list))
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument("--input-file", required=True)
argp.add_argument("--type", required=True, choices=["help", "tune"])
argp.add_argument("--device", required=True, type=int)
argp.add_argument("--random-seed", required=True, type=int)
argp.add_argument("--kwargs", required=True)
argp.add_argument("--output-file", required=True)
args = argp.parse_args()
main(args)
| 31.666667 | 80 | 0.655439 | import argparse
import json
import os
from blanc import BlancHelp, BlancTune
def main(args):
kwargs = json.loads(args.kwargs)
device = "cpu" if args.device == -1 else "cuda"
if args.type == "tune":
blanc = BlancTune(device=device, random_seed=args.random_seed, **kwargs)
elif args.type == "help":
blanc = BlancHelp(device=device, **kwargs)
else:
raise Exception(f"Unknown BLANC type: {args.type}")
documents = []
summaries_list = []
with open(args.input_file, "r") as f:
for line in f:
data = json.loads(line)
documents.append(data["document"])
summaries_list.append(data["summaries"])
scores_list = blanc.eval_summaries_for_docs(documents, summaries_list)
dirname = os.path.dirname(args.output_file)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(args.output_file, "w") as out:
out.write(json.dumps(scores_list))
if __name__ == "__main__":
argp = argparse.ArgumentParser()
argp.add_argument("--input-file", required=True)
argp.add_argument("--type", required=True, choices=["help", "tune"])
argp.add_argument("--device", required=True, type=int)
argp.add_argument("--random-seed", required=True, type=int)
argp.add_argument("--kwargs", required=True)
argp.add_argument("--output-file", required=True)
args = argp.parse_args()
main(args)
| true | true |
f72b0a2e2db8a201933a779f2d9eaf3fc70eda33 | 9,937 | py | Python | python/tvm/tensor_graph/testing/relay_examples/lenet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 22 | 2022-03-18T07:29:31.000Z | 2022-03-23T14:54:32.000Z | python/tvm/tensor_graph/testing/relay_examples/lenet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | null | null | null | python/tvm/tensor_graph/testing/relay_examples/lenet.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 2 | 2022-03-18T08:26:34.000Z | 2022-03-20T06:02:48.000Z | import tvm
import numpy as np
from tvm import relay
from tvm.relay.testing import run_infer_type, gradient
def get_lenet(batch_size,
num_classes=10,
image_shape=(1, 28, 28),
dtype="float32"):
"""Get lenet funciton
Parameters
----------
batch_size : int
The batch size used in the model
num_classes : int, optional
Number of claseses
image_shape : tuple, optional
The input image shape
dtype : str, optional
The data type
Returns
-------
net : relay.Function
The dataflow.
"""
data_shape = (batch_size,) + image_shape
data = relay.TensorType(data_shape, dtype=dtype)
data = relay.var("data", data)
conv_w1 = relay.var('c1.weight')
c1 = relay.nn.conv2d(data=data, weight=conv_w1, channels=6, kernel_size=(5, 5),
strides=(1, 1), padding=(2, 2))
conv_b1 = relay.var('c1.bias', dtype=dtype)
c1 = relay.nn.bias_add(c1, conv_b1, axis=-1)
act_c1 = relay.nn.relu(data=c1)
# Max-pooling
# [64, 6, 14, 14]
conv_w2 = relay.var('c2.weight', dtype=dtype)
conv_b2 = relay.var('c2.bias', dtype=dtype)
p1 = relay.nn.conv2d(data=act_c1, weight=conv_w2, channels=6, kernel_size=(2, 2),
strides=(2, 2), padding=(0, 0))
p1 = relay.nn.bias_add(p1, conv_b2, axis=-1)
# Convolution
conv_w3 = relay.var('c3.weight', dtype=dtype)
conv_b3 = relay.var('c3.bias', dtype=dtype)
c2 = relay.nn.conv2d(data=p1, weight=conv_w3, channels=6, kernel_size=(5, 5),
strides=(1, 1), padding=(0, 0))
c2 = relay.nn.bias_add(c2, conv_b3, axis=-1)
# [64, 6, 28, 28]conv2d(p1, 16, (5, 5), (1, 1), (0, 0), 'c2') # [64, 16, 10, 10]
act_c2 = relay.nn.relu(data=c2)
# Max-pooling
# [64, 16, 5, 5]
conv_w4 = relay.var('c4.weight', dtype=dtype)
conv_b4 = relay.var('c4.bias', dtype=dtype)
p2 = relay.nn.conv2d(data=act_c2, weight=conv_w4, channels=6, kernel_size=(2, 2),
strides=(2, 2), padding=(0, 0))
p2 = relay.nn.bias_add(p2, conv_b4, axis=-1)
# reshape
r1 = relay.nn.batch_flatten(data=p2)
w1 = relay.var('fc1.weight', dtype=dtype)
b1 = relay.var('fc1.bias', dtype=dtype)
fc1 = relay.nn.dense(data=r1, weight=w1, units=128)
fc1 = relay.nn.bias_add(fc1, b1, axis=-1)
act1 = relay.nn.relu(data=fc1)
w2 = relay.var('fc2.weight', dtype=dtype)
b2 = relay.var('fc2.bias', dtype=dtype)
fc2 = relay.nn.dense(data=act1, weight=w2, units=64)
fc2 = relay.nn.bias_add(fc2, b2, axis=-1)
act2 = relay.nn.relu(data=fc2)
w3 = relay.var('fc3.weight', dtype=dtype)
b3 = relay.var('fc3.bias', dtype=dtype)
fc3 = relay.nn.dense(data=act2, weight=w3, units=num_classes)
fc3 = relay.nn.bias_add(fc3, b3, axis=-1)
lenet = relay.nn.softmax(data=fc3)
argu_list = [conv_w1, conv_b1, conv_w2, conv_b2, w1, b1, w2, b2, w3, b3]
return relay.Function(relay.analysis.free_vars(lenet), lenet), argu_list
def make_sgd_update_net(loss_function, var, lr=0.002, scale=1.0, wd=0.0, clip=None):
type_loss_function = run_infer_type(loss_function)
grad_func = run_infer_type(gradient(type_loss_function))
grads = relay.TupleWrapper(relay.TupleGetItem(grad_func.body, 1), len(loss_function.params))
useful_grad = []
type_var = []
for var_item in var:
for index, value_item in enumerate(type_loss_function.params):
if var_item.name_hint == value_item.name_hint:
useful_grad.append(grads[index])
type_var.append(value_item)
break
else:
raise("can't get required params from loss function, internal error")
updates = []
for i, v in enumerate(type_var):
g = useful_grad[i]
g = relay.multiply(g, relay.const(scale, "float32"))
if clip is not None:
g = relay.clip(g, a_min=-1 * clip, a_max=clip)
g = relay.subtract(v,
relay.multiply(relay.const(lr, "float32"),
relay.add(g,
relay.multiply(relay.const(wd, "float32"),
v))))
updates.append(g)
sgd_body = relay.Tuple(updates)
return relay.Function(relay.analysis.free_vars(sgd_body), sgd_body)
def make_adam_update_net(loss_function, var, lr=0.001, beta1=0.9, beta2=0.99, scale=1.0, wd=0.0, clip=None, name="adam", dtype='float32'):
type_loss_function = run_infer_type(loss_function)
grad_func = run_infer_type(gradient(type_loss_function))
grads = relay.TupleWrapper(relay.TupleGetItem(grad_func.body, 1), len(loss_function.params))
useful_grad = []
type_var = []
for var_item in var:
for index, value_item in enumerate(type_loss_function.params):
if var_item.name_hint == value_item.name_hint:
useful_grad.append(grads[index])
type_var.append(value_item)
break
else:
raise("can't get required params from loss function, internal error")
print(type_var)
updates = []
m = []
t = relay.zeros(shape=[1], dtype=dtype)
epsilon = 1e-04
const_1 = relay.const(1, dtype=dtype)
const_beta1 = relay.const(beta1, dtype=dtype)
const_beta2 = relay.const(beta2, dtype=dtype)
for i, va in enumerate(type_var):
m.append(relay.zeros_like(va))
update_t = relay.add(t, const_1)
rate = relay.divide(relay.sqrt(relay.subtract(const_1, relay.power(const_beta2, update_t))),
relay.subtract(const_1, relay.power(const_beta1, update_t)))
lr_t = relay.multiply(relay.const(lr, dtype=dtype), rate)
for var, g, m in zip(type_var, useful_grad, m):
update_m = relay.add(relay.multiply(const_beta1, m),
relay.multiply(relay.subtract(const_1, const_beta1), g))
update_v = relay.add(relay.multiply(const_beta2, m),
relay.multiply(relay.subtract(const_1, const_beta2),
relay.multiply(g, g)))
update_var = relay.subtract(var,
relay.divide(relay.multiply(lr_t, update_m),
relay.add(relay.sqrt(update_v),
relay.const(epsilon, dtype="float32"))))
updates.append(update_var)
adam_body = relay.Tuple(updates)
return relay.Function(relay.analysis.free_vars(adam_body), adam_body)
def mse_loss(lenet_function, target):
sub = relay.subtract(lenet_function.body, target)
loss_body = relay.sum(relay.multiply(sub, sub))
return relay.Function(relay.analysis.free_vars(loss_body), loss_body)
# return sum((predict - target)**2) / 2.0
def cross_entropy_loss(lenet_function, target):
loss_body = relay.negative(relay.sum(relay.multiply(relay.log(relay.add(lenet_function.body,
relay.const(1e-5, dtype="float32"))),
target)))
return relay.Function(relay.analysis.free_vars(loss_body), loss_body)
def make_loss_net(lenet_function, target, optim="CROSS"):
"""Get loss funtion for lenet
Parameters
----------
lenet_function : relay.Function
target : relay.Expr
optim : str, optional
loss_function strategy, "CROSS" or "MSE"
Returns
-------
net : relay.Function
The dataflow.
"""
if optim == "CROSS":
return cross_entropy_loss(lenet_function, target)
if optim == "MSE":
return mse_loss(lenet_function, target)
raise("unknown optim, use 'CROSS' or 'MSE'.")
def make_grad_net(loss_function):
"""Get updated funtion for lenet
Parameters
----------
loss_function : relay.Function
Returns
-------
net : relay.Function
The dataflow.
"""
type_loss_function = run_infer_type(loss_function)
grad_func = run_infer_type(gradient(type_loss_function))
return grad_func
def make_update_net(loss_function, weights, optim="SGD"):
"""Get updated funtion for lenet
Parameters
----------
loss_function : relay.Function
weights : [relay.var]
vars to compute gradient
optim : str, optional
updated_function strategy, "ADAM" or "SGD"
Returns
-------
net : relay.Function
The dataflow.
"""
if optim == "ADAM":
return make_adam_update_net(loss_function, weights)
if optim == "SGD":
return make_sgd_update_net(loss_function, weights)
raise("unknown optim, use 'ADAM' or 'SGD'.")
def create_workload(net, initializer=None, seed=0):
"""Helper function to create benchmark image classification workload.
Parameters
----------
net : tvm.relay.Function
The selected function of the network.
initializer : Initializer
The initializer used
seed : int
The seed used in initialization.
Returns
-------
mod : tvm.IRModule
The created relay module.
params : dict of str to NDArray
The parameters.
"""
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.InferType()(mod)
shape_dict = {
v.name_hint : v.checked_type for v in mod["main"].params}
np.random.seed(seed)
initializer = initializer if initializer else Xavier()
params = {}
for k, v in shape_dict.items():
# modify here, skip "label" as well
if k == "data" or k == "label":
continue
init_value = np.zeros(v.concrete_shape).astype(v.dtype)
initializer(k, init_value)
params[k] = tvm.nd.array(init_value, ctx=tvm.cpu(0))
return mod, params
| 36.399267 | 138 | 0.600986 | import tvm
import numpy as np
from tvm import relay
from tvm.relay.testing import run_infer_type, gradient
def get_lenet(batch_size,
num_classes=10,
image_shape=(1, 28, 28),
dtype="float32"):
data_shape = (batch_size,) + image_shape
data = relay.TensorType(data_shape, dtype=dtype)
data = relay.var("data", data)
conv_w1 = relay.var('c1.weight')
c1 = relay.nn.conv2d(data=data, weight=conv_w1, channels=6, kernel_size=(5, 5),
strides=(1, 1), padding=(2, 2))
conv_b1 = relay.var('c1.bias', dtype=dtype)
c1 = relay.nn.bias_add(c1, conv_b1, axis=-1)
act_c1 = relay.nn.relu(data=c1)
conv_w2 = relay.var('c2.weight', dtype=dtype)
conv_b2 = relay.var('c2.bias', dtype=dtype)
p1 = relay.nn.conv2d(data=act_c1, weight=conv_w2, channels=6, kernel_size=(2, 2),
strides=(2, 2), padding=(0, 0))
p1 = relay.nn.bias_add(p1, conv_b2, axis=-1)
conv_w3 = relay.var('c3.weight', dtype=dtype)
conv_b3 = relay.var('c3.bias', dtype=dtype)
c2 = relay.nn.conv2d(data=p1, weight=conv_w3, channels=6, kernel_size=(5, 5),
strides=(1, 1), padding=(0, 0))
c2 = relay.nn.bias_add(c2, conv_b3, axis=-1)
y.nn.relu(data=c2)
conv_w4 = relay.var('c4.weight', dtype=dtype)
conv_b4 = relay.var('c4.bias', dtype=dtype)
p2 = relay.nn.conv2d(data=act_c2, weight=conv_w4, channels=6, kernel_size=(2, 2),
strides=(2, 2), padding=(0, 0))
p2 = relay.nn.bias_add(p2, conv_b4, axis=-1)
r1 = relay.nn.batch_flatten(data=p2)
w1 = relay.var('fc1.weight', dtype=dtype)
b1 = relay.var('fc1.bias', dtype=dtype)
fc1 = relay.nn.dense(data=r1, weight=w1, units=128)
fc1 = relay.nn.bias_add(fc1, b1, axis=-1)
act1 = relay.nn.relu(data=fc1)
w2 = relay.var('fc2.weight', dtype=dtype)
b2 = relay.var('fc2.bias', dtype=dtype)
fc2 = relay.nn.dense(data=act1, weight=w2, units=64)
fc2 = relay.nn.bias_add(fc2, b2, axis=-1)
act2 = relay.nn.relu(data=fc2)
w3 = relay.var('fc3.weight', dtype=dtype)
b3 = relay.var('fc3.bias', dtype=dtype)
fc3 = relay.nn.dense(data=act2, weight=w3, units=num_classes)
fc3 = relay.nn.bias_add(fc3, b3, axis=-1)
lenet = relay.nn.softmax(data=fc3)
argu_list = [conv_w1, conv_b1, conv_w2, conv_b2, w1, b1, w2, b2, w3, b3]
return relay.Function(relay.analysis.free_vars(lenet), lenet), argu_list
def make_sgd_update_net(loss_function, var, lr=0.002, scale=1.0, wd=0.0, clip=None):
type_loss_function = run_infer_type(loss_function)
grad_func = run_infer_type(gradient(type_loss_function))
grads = relay.TupleWrapper(relay.TupleGetItem(grad_func.body, 1), len(loss_function.params))
useful_grad = []
type_var = []
for var_item in var:
for index, value_item in enumerate(type_loss_function.params):
if var_item.name_hint == value_item.name_hint:
useful_grad.append(grads[index])
type_var.append(value_item)
break
else:
raise("can't get required params from loss function, internal error")
updates = []
for i, v in enumerate(type_var):
g = useful_grad[i]
g = relay.multiply(g, relay.const(scale, "float32"))
if clip is not None:
g = relay.clip(g, a_min=-1 * clip, a_max=clip)
g = relay.subtract(v,
relay.multiply(relay.const(lr, "float32"),
relay.add(g,
relay.multiply(relay.const(wd, "float32"),
v))))
updates.append(g)
sgd_body = relay.Tuple(updates)
return relay.Function(relay.analysis.free_vars(sgd_body), sgd_body)
def make_adam_update_net(loss_function, var, lr=0.001, beta1=0.9, beta2=0.99, scale=1.0, wd=0.0, clip=None, name="adam", dtype='float32'):
type_loss_function = run_infer_type(loss_function)
grad_func = run_infer_type(gradient(type_loss_function))
grads = relay.TupleWrapper(relay.TupleGetItem(grad_func.body, 1), len(loss_function.params))
useful_grad = []
type_var = []
for var_item in var:
for index, value_item in enumerate(type_loss_function.params):
if var_item.name_hint == value_item.name_hint:
useful_grad.append(grads[index])
type_var.append(value_item)
break
else:
raise("can't get required params from loss function, internal error")
print(type_var)
updates = []
m = []
t = relay.zeros(shape=[1], dtype=dtype)
epsilon = 1e-04
const_1 = relay.const(1, dtype=dtype)
const_beta1 = relay.const(beta1, dtype=dtype)
const_beta2 = relay.const(beta2, dtype=dtype)
for i, va in enumerate(type_var):
m.append(relay.zeros_like(va))
update_t = relay.add(t, const_1)
rate = relay.divide(relay.sqrt(relay.subtract(const_1, relay.power(const_beta2, update_t))),
relay.subtract(const_1, relay.power(const_beta1, update_t)))
lr_t = relay.multiply(relay.const(lr, dtype=dtype), rate)
for var, g, m in zip(type_var, useful_grad, m):
update_m = relay.add(relay.multiply(const_beta1, m),
relay.multiply(relay.subtract(const_1, const_beta1), g))
update_v = relay.add(relay.multiply(const_beta2, m),
relay.multiply(relay.subtract(const_1, const_beta2),
relay.multiply(g, g)))
update_var = relay.subtract(var,
relay.divide(relay.multiply(lr_t, update_m),
relay.add(relay.sqrt(update_v),
relay.const(epsilon, dtype="float32"))))
updates.append(update_var)
adam_body = relay.Tuple(updates)
return relay.Function(relay.analysis.free_vars(adam_body), adam_body)
def mse_loss(lenet_function, target):
sub = relay.subtract(lenet_function.body, target)
loss_body = relay.sum(relay.multiply(sub, sub))
return relay.Function(relay.analysis.free_vars(loss_body), loss_body)
def cross_entropy_loss(lenet_function, target):
loss_body = relay.negative(relay.sum(relay.multiply(relay.log(relay.add(lenet_function.body,
relay.const(1e-5, dtype="float32"))),
target)))
return relay.Function(relay.analysis.free_vars(loss_body), loss_body)
def make_loss_net(lenet_function, target, optim="CROSS"):
if optim == "CROSS":
return cross_entropy_loss(lenet_function, target)
if optim == "MSE":
return mse_loss(lenet_function, target)
raise("unknown optim, use 'CROSS' or 'MSE'.")
def make_grad_net(loss_function):
type_loss_function = run_infer_type(loss_function)
grad_func = run_infer_type(gradient(type_loss_function))
return grad_func
def make_update_net(loss_function, weights, optim="SGD"):
if optim == "ADAM":
return make_adam_update_net(loss_function, weights)
if optim == "SGD":
return make_sgd_update_net(loss_function, weights)
raise("unknown optim, use 'ADAM' or 'SGD'.")
def create_workload(net, initializer=None, seed=0):
mod = tvm.IRModule.from_expr(net)
mod = relay.transform.InferType()(mod)
shape_dict = {
v.name_hint : v.checked_type for v in mod["main"].params}
np.random.seed(seed)
initializer = initializer if initializer else Xavier()
params = {}
for k, v in shape_dict.items():
if k == "data" or k == "label":
continue
init_value = np.zeros(v.concrete_shape).astype(v.dtype)
initializer(k, init_value)
params[k] = tvm.nd.array(init_value, ctx=tvm.cpu(0))
return mod, params
| true | true |
f72b0a4f41647e949ba4e6202d2c7f3980d53dab | 575 | py | Python | M5_assgmnt.py | AVNEETK99/FANTASY-CRICKET-LEAGUE | 17fc188e48a51c6f3937a9965f1edcead2a8d0b8 | [
"CC0-1.0"
] | 23 | 2018-07-18T10:47:12.000Z | 2021-07-31T21:53:17.000Z | M5_assgmnt.py | RupinSamria/Summer-Training-Python-development | 4fa38344d6aa71581b004c16eddeec22f9f739f4 | [
"CC0-1.0"
] | 3 | 2018-11-18T07:11:05.000Z | 2020-04-30T20:16:51.000Z | M5_assgmnt.py | RupinSamria/Summer-Training-Python-development | 4fa38344d6aa71581b004c16eddeec22f9f739f4 | [
"CC0-1.0"
] | 53 | 2018-10-04T05:49:30.000Z | 2021-12-12T15:52:17.000Z | import sqlite3
mystore=sqlite3.connect('bookstores.db')
mycursor=mystore.cursor()
sql=''' create table book (id integer primary key not null,title text(20),
author text(20),price real);'''
mycursor.execute(sql)
sql='''insert into book
values(1,'think java','rhooney',550.0);'''
mycursor.execute(sql)
mystore.commit()
sql='''insert into book
values(2,'think python','allen',450.0);'''
mycursor.execute(sql)
mystore.commit()
sql='''insert into book
values(3,'think c++','booty',375.0);'''
mycursor.execute(sql)
mystore.commit()
mystore.close()
| 21.296296 | 75 | 0.683478 | import sqlite3
mystore=sqlite3.connect('bookstores.db')
mycursor=mystore.cursor()
sql=''' create table book (id integer primary key not null,title text(20),
author text(20),price real);'''
mycursor.execute(sql)
sql='''insert into book
values(1,'think java','rhooney',550.0);'''
mycursor.execute(sql)
mystore.commit()
sql='''insert into book
values(2,'think python','allen',450.0);'''
mycursor.execute(sql)
mystore.commit()
sql='''insert into book
values(3,'think c++','booty',375.0);'''
mycursor.execute(sql)
mystore.commit()
mystore.close()
| true | true |
f72b0a5531db17b2a97a3179af5c86bd986dd358 | 12,137 | py | Python | test/data_join/test_data_block_dumper.py | chen1i/fedlearner | 981514dadbd0aa49ae87d185dd247d310e35605c | [
"Apache-2.0"
] | null | null | null | test/data_join/test_data_block_dumper.py | chen1i/fedlearner | 981514dadbd0aa49ae87d185dd247d310e35605c | [
"Apache-2.0"
] | null | null | null | test/data_join/test_data_block_dumper.py | chen1i/fedlearner | 981514dadbd0aa49ae87d185dd247d310e35605c | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import unittest
import os
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
from google.protobuf import text_format, timestamp_pb2
import tensorflow_io
from tensorflow.compat.v1 import gfile
from fedlearner.common import db_client
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_join_service_pb2 as dj_pb
from fedlearner.data_join import (
data_block_manager, common, data_block_dumper,
raw_data_manifest_manager, raw_data_visitor, visitor
)
from fedlearner.data_join.data_block_manager import DataBlockBuilder
from fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem
class TestDataBlockDumper(unittest.TestCase):
def setUp(self):
data_source_f = common_pb.DataSource()
data_source_f.data_source_meta.name = "milestone"
data_source_f.data_source_meta.partition_num = 1
data_source_f.output_base_dir = "./output-f"
self.data_source_f = data_source_f
if gfile.Exists(self.data_source_f.output_base_dir):
gfile.DeleteRecursively(self.data_source_f.output_base_dir)
data_source_l = common_pb.DataSource()
data_source_l.data_source_meta.name = "milestone"
data_source_l.data_source_meta.partition_num = 1
data_source_l.output_base_dir = "./output-l"
self.raw_data_dir_l = "./raw_data-l"
self.data_source_l = data_source_l
if gfile.Exists(self.data_source_l.output_base_dir):
gfile.DeleteRecursively(self.data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
self.kvstore = db_client.DBClient('etcd', True)
self.kvstore.delete_prefix(common.data_source_kvstore_base_dir(self.data_source_l.data_source_meta.name))
self.manifest_manager = raw_data_manifest_manager.RawDataManifestManager(
self.kvstore, self.data_source_l)
def generate_follower_data_block(self):
dbm = data_block_manager.DataBlockManager(self.data_source_f, 0)
self.assertEqual(dbm.get_dumped_data_block_count(), 0)
self.assertEqual(dbm.get_lastest_data_block_meta(), None)
leader_index = 0
follower_index = 65536
self.dumped_metas = []
for i in range(5):
builder = DataBlockBuilder(
common.data_source_data_block_dir(self.data_source_f),
self.data_source_f.data_source_meta.name,
0, i, dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
builder.set_data_block_manager(dbm)
for j in range(1024):
feat = {}
example_id = '{}'.format(i * 1024 + j).encode()
feat['example_id'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example_id]))
event_time = 150000000 + i * 1024 + j
feat['event_time'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[event_time]))
feat['leader_index'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[leader_index]))
feat['follower_index'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[follower_index]))
example = tf.train.Example(features=tf.train.Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()),
leader_index, follower_index)
leader_index += 3
follower_index += 1
meta = builder.finish_data_block()
self.dumped_metas.append(meta)
self.leader_start_index = 0
self.leader_end_index = leader_index
self.assertEqual(dbm.get_dumped_data_block_count(), 5)
for (idx, meta) in enumerate(self.dumped_metas):
self.assertEqual(dbm.get_data_block_meta_by_index(idx), meta)
def generate_leader_raw_data(self):
dbm = data_block_manager.DataBlockManager(self.data_source_l, 0)
raw_data_dir = os.path.join(self.raw_data_dir_l, common.partition_repr(0))
if gfile.Exists(raw_data_dir):
gfile.DeleteRecursively(raw_data_dir)
gfile.MakeDirs(raw_data_dir)
rdm = raw_data_visitor.RawDataManager(self.kvstore, self.data_source_l, 0)
block_index = 0
builder = DataBlockBuilder(
self.raw_data_dir_l,
self.data_source_l.data_source_meta.name,
0, block_index, dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
process_index = 0
start_index = 0
for i in range(0, self.leader_end_index + 3):
if (i > 0 and i % 2048 == 0) or (i == self.leader_end_index + 2):
meta = builder.finish_data_block()
if meta is not None:
ofname = common.encode_data_block_fname(
self.data_source_l.data_source_meta.name,
meta
)
fpath = os.path.join(raw_data_dir, ofname)
self.manifest_manager.add_raw_data(
0,
[dj_pb.RawDataMeta(file_path=fpath,
timestamp=timestamp_pb2.Timestamp(seconds=3))],
False)
process_index += 1
start_index += len(meta.example_ids)
block_index += 1
builder = DataBlockBuilder(
self.raw_data_dir_l,
self.data_source_l.data_source_meta.name,
0, block_index, dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
feat = {}
pt = i + 1 << 30
if i % 3 == 0:
pt = i // 3
example_id = '{}'.format(pt).encode()
feat['example_id'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example_id]))
event_time = 150000000 + pt
feat['event_time'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[event_time]))
example = tf.train.Example(features=tf.train.Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()), i, i)
fpaths = [os.path.join(raw_data_dir, f)
for f in gfile.ListDirectory(raw_data_dir)
if not gfile.IsDirectory(os.path.join(raw_data_dir, f))]
for fpath in fpaths:
if not fpath.endswith(common.DataBlockSuffix):
gfile.Remove(fpath)
def test_data_block_dumper(self):
self.generate_follower_data_block()
self.generate_leader_raw_data()
dbd = data_block_dumper.DataBlockDumperManager(
self.kvstore, self.data_source_l, 0,
dj_pb.RawDataOptions(raw_data_iter='TF_RECORD', read_ahead_size=1<<20, read_batch_size=128),
dj_pb.WriterOptions(output_writer='TF_RECORD')
)
self.assertEqual(dbd.get_next_data_block_index(), 0)
for (idx, meta) in enumerate(self.dumped_metas):
success, next_index = dbd.add_synced_data_block_meta(meta)
self.assertTrue(success)
self.assertEqual(next_index, idx + 1)
self.assertTrue(dbd.need_dump())
self.assertEqual(dbd.get_next_data_block_index(), len(self.dumped_metas))
with dbd.make_data_block_dumper() as dumper:
dumper()
dbm_f = data_block_manager.DataBlockManager(self.data_source_f, 0)
dbm_l = data_block_manager.DataBlockManager(self.data_source_l, 0)
self.assertEqual(dbm_f.get_dumped_data_block_count(), len(self.dumped_metas))
self.assertEqual(dbm_f.get_dumped_data_block_count(),
dbm_l.get_dumped_data_block_count())
for (idx, meta) in enumerate(self.dumped_metas):
self.assertEqual(meta.data_block_index, idx)
self.assertEqual(dbm_l.get_data_block_meta_by_index(idx), meta)
self.assertEqual(dbm_f.get_data_block_meta_by_index(idx), meta)
meta_fpth_l = os.path.join(
common.data_source_data_block_dir(self.data_source_l),
common.partition_repr(0),
common.encode_data_block_meta_fname(
self.data_source_l.data_source_meta.name,
0, meta.data_block_index
)
)
mitr = tf.io.tf_record_iterator(meta_fpth_l)
meta_l = text_format.Parse(next(mitr), dj_pb.DataBlockMeta())
self.assertEqual(meta_l, meta)
meta_fpth_f = os.path.join(
common.data_source_data_block_dir(self.data_source_f),
common.partition_repr(0),
common.encode_data_block_meta_fname(
self.data_source_f.data_source_meta.name,
0, meta.data_block_index
)
)
mitr = tf.io.tf_record_iterator(meta_fpth_f)
meta_f = text_format.Parse(next(mitr), dj_pb.DataBlockMeta())
self.assertEqual(meta_f, meta)
data_fpth_l = os.path.join(
common.data_source_data_block_dir(self.data_source_l),
common.partition_repr(0),
common.encode_data_block_fname(
self.data_source_l.data_source_meta.name,
meta_l
)
)
for (iidx, record) in enumerate(tf.io.tf_record_iterator(data_fpth_l)):
example = tf.train.Example()
example.ParseFromString(record)
feat = example.features.feature
self.assertEqual(feat['example_id'].bytes_list.value[0],
meta.example_ids[iidx])
self.assertEqual(len(meta.example_ids), iidx + 1)
data_fpth_f = os.path.join(
common.data_source_data_block_dir(self.data_source_f),
common.partition_repr(0),
common.encode_data_block_fname(
self.data_source_l.data_source_meta.name,
meta_f
)
)
for (iidx, record) in enumerate(tf.io.tf_record_iterator(data_fpth_f)):
example = tf.train.Example()
example.ParseFromString(record)
feat = example.features.feature
self.assertEqual(feat['example_id'].bytes_list.value[0],
meta.example_ids[iidx])
self.assertEqual(len(meta.example_ids), iidx +1)
def tearDown(self):
if gfile.Exists(self.data_source_f.output_base_dir):
gfile.DeleteRecursively(self.data_source_f.output_base_dir)
if gfile.Exists(self.data_source_l.output_base_dir):
gfile.DeleteRecursively(self.data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
self.kvstore.delete_prefix(common.data_source_kvstore_base_dir(self.data_source_l.data_source_meta.name))
if __name__ == '__main__':
unittest.main()
| 49.538776 | 113 | 0.616215 |
import unittest
import os
import tensorflow.compat.v1 as tf
tf.enable_eager_execution()
from google.protobuf import text_format, timestamp_pb2
import tensorflow_io
from tensorflow.compat.v1 import gfile
from fedlearner.common import db_client
from fedlearner.common import common_pb2 as common_pb
from fedlearner.common import data_join_service_pb2 as dj_pb
from fedlearner.data_join import (
data_block_manager, common, data_block_dumper,
raw_data_manifest_manager, raw_data_visitor, visitor
)
from fedlearner.data_join.data_block_manager import DataBlockBuilder
from fedlearner.data_join.raw_data_iter_impl.tf_record_iter import TfExampleItem
class TestDataBlockDumper(unittest.TestCase):
def setUp(self):
data_source_f = common_pb.DataSource()
data_source_f.data_source_meta.name = "milestone"
data_source_f.data_source_meta.partition_num = 1
data_source_f.output_base_dir = "./output-f"
self.data_source_f = data_source_f
if gfile.Exists(self.data_source_f.output_base_dir):
gfile.DeleteRecursively(self.data_source_f.output_base_dir)
data_source_l = common_pb.DataSource()
data_source_l.data_source_meta.name = "milestone"
data_source_l.data_source_meta.partition_num = 1
data_source_l.output_base_dir = "./output-l"
self.raw_data_dir_l = "./raw_data-l"
self.data_source_l = data_source_l
if gfile.Exists(self.data_source_l.output_base_dir):
gfile.DeleteRecursively(self.data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
self.kvstore = db_client.DBClient('etcd', True)
self.kvstore.delete_prefix(common.data_source_kvstore_base_dir(self.data_source_l.data_source_meta.name))
self.manifest_manager = raw_data_manifest_manager.RawDataManifestManager(
self.kvstore, self.data_source_l)
def generate_follower_data_block(self):
dbm = data_block_manager.DataBlockManager(self.data_source_f, 0)
self.assertEqual(dbm.get_dumped_data_block_count(), 0)
self.assertEqual(dbm.get_lastest_data_block_meta(), None)
leader_index = 0
follower_index = 65536
self.dumped_metas = []
for i in range(5):
builder = DataBlockBuilder(
common.data_source_data_block_dir(self.data_source_f),
self.data_source_f.data_source_meta.name,
0, i, dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
builder.set_data_block_manager(dbm)
for j in range(1024):
feat = {}
example_id = '{}'.format(i * 1024 + j).encode()
feat['example_id'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example_id]))
event_time = 150000000 + i * 1024 + j
feat['event_time'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[event_time]))
feat['leader_index'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[leader_index]))
feat['follower_index'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[follower_index]))
example = tf.train.Example(features=tf.train.Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()),
leader_index, follower_index)
leader_index += 3
follower_index += 1
meta = builder.finish_data_block()
self.dumped_metas.append(meta)
self.leader_start_index = 0
self.leader_end_index = leader_index
self.assertEqual(dbm.get_dumped_data_block_count(), 5)
for (idx, meta) in enumerate(self.dumped_metas):
self.assertEqual(dbm.get_data_block_meta_by_index(idx), meta)
def generate_leader_raw_data(self):
dbm = data_block_manager.DataBlockManager(self.data_source_l, 0)
raw_data_dir = os.path.join(self.raw_data_dir_l, common.partition_repr(0))
if gfile.Exists(raw_data_dir):
gfile.DeleteRecursively(raw_data_dir)
gfile.MakeDirs(raw_data_dir)
rdm = raw_data_visitor.RawDataManager(self.kvstore, self.data_source_l, 0)
block_index = 0
builder = DataBlockBuilder(
self.raw_data_dir_l,
self.data_source_l.data_source_meta.name,
0, block_index, dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
process_index = 0
start_index = 0
for i in range(0, self.leader_end_index + 3):
if (i > 0 and i % 2048 == 0) or (i == self.leader_end_index + 2):
meta = builder.finish_data_block()
if meta is not None:
ofname = common.encode_data_block_fname(
self.data_source_l.data_source_meta.name,
meta
)
fpath = os.path.join(raw_data_dir, ofname)
self.manifest_manager.add_raw_data(
0,
[dj_pb.RawDataMeta(file_path=fpath,
timestamp=timestamp_pb2.Timestamp(seconds=3))],
False)
process_index += 1
start_index += len(meta.example_ids)
block_index += 1
builder = DataBlockBuilder(
self.raw_data_dir_l,
self.data_source_l.data_source_meta.name,
0, block_index, dj_pb.WriterOptions(output_writer='TF_RECORD'), None
)
feat = {}
pt = i + 1 << 30
if i % 3 == 0:
pt = i // 3
example_id = '{}'.format(pt).encode()
feat['example_id'] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[example_id]))
event_time = 150000000 + pt
feat['event_time'] = tf.train.Feature(
int64_list=tf.train.Int64List(value=[event_time]))
example = tf.train.Example(features=tf.train.Features(feature=feat))
builder.append_item(TfExampleItem(example.SerializeToString()), i, i)
fpaths = [os.path.join(raw_data_dir, f)
for f in gfile.ListDirectory(raw_data_dir)
if not gfile.IsDirectory(os.path.join(raw_data_dir, f))]
for fpath in fpaths:
if not fpath.endswith(common.DataBlockSuffix):
gfile.Remove(fpath)
def test_data_block_dumper(self):
self.generate_follower_data_block()
self.generate_leader_raw_data()
dbd = data_block_dumper.DataBlockDumperManager(
self.kvstore, self.data_source_l, 0,
dj_pb.RawDataOptions(raw_data_iter='TF_RECORD', read_ahead_size=1<<20, read_batch_size=128),
dj_pb.WriterOptions(output_writer='TF_RECORD')
)
self.assertEqual(dbd.get_next_data_block_index(), 0)
for (idx, meta) in enumerate(self.dumped_metas):
success, next_index = dbd.add_synced_data_block_meta(meta)
self.assertTrue(success)
self.assertEqual(next_index, idx + 1)
self.assertTrue(dbd.need_dump())
self.assertEqual(dbd.get_next_data_block_index(), len(self.dumped_metas))
with dbd.make_data_block_dumper() as dumper:
dumper()
dbm_f = data_block_manager.DataBlockManager(self.data_source_f, 0)
dbm_l = data_block_manager.DataBlockManager(self.data_source_l, 0)
self.assertEqual(dbm_f.get_dumped_data_block_count(), len(self.dumped_metas))
self.assertEqual(dbm_f.get_dumped_data_block_count(),
dbm_l.get_dumped_data_block_count())
for (idx, meta) in enumerate(self.dumped_metas):
self.assertEqual(meta.data_block_index, idx)
self.assertEqual(dbm_l.get_data_block_meta_by_index(idx), meta)
self.assertEqual(dbm_f.get_data_block_meta_by_index(idx), meta)
meta_fpth_l = os.path.join(
common.data_source_data_block_dir(self.data_source_l),
common.partition_repr(0),
common.encode_data_block_meta_fname(
self.data_source_l.data_source_meta.name,
0, meta.data_block_index
)
)
mitr = tf.io.tf_record_iterator(meta_fpth_l)
meta_l = text_format.Parse(next(mitr), dj_pb.DataBlockMeta())
self.assertEqual(meta_l, meta)
meta_fpth_f = os.path.join(
common.data_source_data_block_dir(self.data_source_f),
common.partition_repr(0),
common.encode_data_block_meta_fname(
self.data_source_f.data_source_meta.name,
0, meta.data_block_index
)
)
mitr = tf.io.tf_record_iterator(meta_fpth_f)
meta_f = text_format.Parse(next(mitr), dj_pb.DataBlockMeta())
self.assertEqual(meta_f, meta)
data_fpth_l = os.path.join(
common.data_source_data_block_dir(self.data_source_l),
common.partition_repr(0),
common.encode_data_block_fname(
self.data_source_l.data_source_meta.name,
meta_l
)
)
for (iidx, record) in enumerate(tf.io.tf_record_iterator(data_fpth_l)):
example = tf.train.Example()
example.ParseFromString(record)
feat = example.features.feature
self.assertEqual(feat['example_id'].bytes_list.value[0],
meta.example_ids[iidx])
self.assertEqual(len(meta.example_ids), iidx + 1)
data_fpth_f = os.path.join(
common.data_source_data_block_dir(self.data_source_f),
common.partition_repr(0),
common.encode_data_block_fname(
self.data_source_l.data_source_meta.name,
meta_f
)
)
for (iidx, record) in enumerate(tf.io.tf_record_iterator(data_fpth_f)):
example = tf.train.Example()
example.ParseFromString(record)
feat = example.features.feature
self.assertEqual(feat['example_id'].bytes_list.value[0],
meta.example_ids[iidx])
self.assertEqual(len(meta.example_ids), iidx +1)
def tearDown(self):
if gfile.Exists(self.data_source_f.output_base_dir):
gfile.DeleteRecursively(self.data_source_f.output_base_dir)
if gfile.Exists(self.data_source_l.output_base_dir):
gfile.DeleteRecursively(self.data_source_l.output_base_dir)
if gfile.Exists(self.raw_data_dir_l):
gfile.DeleteRecursively(self.raw_data_dir_l)
self.kvstore.delete_prefix(common.data_source_kvstore_base_dir(self.data_source_l.data_source_meta.name))
if __name__ == '__main__':
unittest.main()
| true | true |
f72b0ab4b78ec9b7eb7deec2b8193a86ca41b48e | 938 | py | Python | year_2020/day13/test_day13.py | mjalkio/advent-of-code | 54dbfcba3850e72d7b736ef1e7d2a3cb91e65d42 | [
"MIT"
] | null | null | null | year_2020/day13/test_day13.py | mjalkio/advent-of-code | 54dbfcba3850e72d7b736ef1e7d2a3cb91e65d42 | [
"MIT"
] | null | null | null | year_2020/day13/test_day13.py | mjalkio/advent-of-code | 54dbfcba3850e72d7b736ef1e7d2a3cb91e65d42 | [
"MIT"
] | null | null | null | import pytest
from year_2020.day13.shuttle_search import (
get_bus_id_times_wait_time,
get_earliest_bus_and_wait_time_for_airport,
get_shuttle_company_solution,
)
TEST_INPUT = """
939
7,13,x,x,59,x,31,19
"""
TEST_INPUT_2 = """
0
17,x,13,19
"""
TEST_INPUT_3 = """
0
67,7,59,61
"""
TEST_INPUT_4 = """
0
67,x,7,59,61
"""
TEST_INPUT_5 = """
0
67,7,x,59,61
"""
TEST_INPUT_6 = """
0
1789,37,47,1889
"""
def test_part_1():
assert get_bus_id_times_wait_time(TEST_INPUT) == 295
assert get_earliest_bus_and_wait_time_for_airport(TEST_INPUT) == (59, 5)
@pytest.mark.parametrize(
"test_input,expected",
[
(TEST_INPUT, 1068781),
(TEST_INPUT_2, 3417),
(TEST_INPUT_3, 754018),
(TEST_INPUT_4, 779210),
(TEST_INPUT_5, 1261476),
(TEST_INPUT_6, 1202161486),
],
)
def test_part_2(test_input, expected):
assert get_shuttle_company_solution(test_input) == expected
| 16.172414 | 76 | 0.672708 | import pytest
from year_2020.day13.shuttle_search import (
get_bus_id_times_wait_time,
get_earliest_bus_and_wait_time_for_airport,
get_shuttle_company_solution,
)
TEST_INPUT = """
939
7,13,x,x,59,x,31,19
"""
TEST_INPUT_2 = """
0
17,x,13,19
"""
TEST_INPUT_3 = """
0
67,7,59,61
"""
TEST_INPUT_4 = """
0
67,x,7,59,61
"""
TEST_INPUT_5 = """
0
67,7,x,59,61
"""
TEST_INPUT_6 = """
0
1789,37,47,1889
"""
def test_part_1():
assert get_bus_id_times_wait_time(TEST_INPUT) == 295
assert get_earliest_bus_and_wait_time_for_airport(TEST_INPUT) == (59, 5)
@pytest.mark.parametrize(
"test_input,expected",
[
(TEST_INPUT, 1068781),
(TEST_INPUT_2, 3417),
(TEST_INPUT_3, 754018),
(TEST_INPUT_4, 779210),
(TEST_INPUT_5, 1261476),
(TEST_INPUT_6, 1202161486),
],
)
def test_part_2(test_input, expected):
assert get_shuttle_company_solution(test_input) == expected
| true | true |
f72b0ad54d6dd35fc8e313c9014957d5d7c84c64 | 2,327 | py | Python | TheoryValidation/CirculantGraphs.py | ctralie/GeometricBeatTracking | 2c35183f638c4afb51808c09e46da0f74384cba6 | [
"Apache-2.0"
] | 2 | 2019-11-03T16:59:34.000Z | 2021-04-17T05:41:01.000Z | TheoryValidation/CirculantGraphs.py | ctralie/GeometricBeatTracking | 2c35183f638c4afb51808c09e46da0f74384cba6 | [
"Apache-2.0"
] | null | null | null | TheoryValidation/CirculantGraphs.py | ctralie/GeometricBeatTracking | 2c35183f638c4afb51808c09e46da0f74384cba6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sparse
import sys
sys.path.append("..")
from Laplacian import *
def getCirculantAdj(N, lags):
#Setup circular parts
I = range(N)*(len(lags)+2)
J = range(1, N+1) + range(-1, N-1)
J[N-1] = 0
J[N] = N-1
for lag in lags:
J = J + (np.mod(np.arange(N) + lag, N)).tolist()
V = np.ones(len(I))
return sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
def getOneOnK(N, k):
lags = [i*N/k for i in range(1, k)]
return getCirculantAdj(N, lags)
def getCircleEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 2 - 2*np.cos(2*np.pi*i/N)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return lambdas
def getMoebiusEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 3 - 2*np.cos(2*np.pi*i/N) - (-1)**i
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
def get3WayEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 4 - 2*np.cos(2*np.pi*i/N) - 2*np.cos(2*np.pi*i/3)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
if __name__ == '__main__':
N = 100
A = getOneOnK(N, 2)
#A = getCirculantAdj(N, [30, 60, 80])
A = A.toarray()
(w, v, L) = getLaplacianEigsDense(A, A.shape[0])
(lambdas, lambdassorted) = get3WayEigs(N)
plt.figure(figsize=(15, 4))
plt.subplot(132)
plt.plot(lambdas)
plt.title("Eigenvalues")
plt.xlabel("Eigenvalue Number")
plt.ylabel("Eigenvalue")
# plt.subplot(224)
# plt.scatter(w, lambdassorted)
# plt.xlabel("Numerically Computed")
# plt.ylabel("Analytic")
# plt.axis('equal')
# plt.title("Checking accuracy")
plt.subplot(131)
plt.imshow(A, interpolation = 'nearest', cmap = 'gray')
plt.title("Adjacency Matrix")
plt.subplot(133)
plt.imshow(v, cmap = 'afmhot', aspect = 'auto', interpolation = 'nearest')
plt.xlabel("k-th Smallest Eigenvector")
plt.title("Eigenvectors")
plt.savefig("Eigs.svg", bbox_inches = 'tight')
| 26.146067 | 78 | 0.5578 | import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sparse
import sys
sys.path.append("..")
from Laplacian import *
def getCirculantAdj(N, lags):
I = range(N)*(len(lags)+2)
J = range(1, N+1) + range(-1, N-1)
J[N-1] = 0
J[N] = N-1
for lag in lags:
J = J + (np.mod(np.arange(N) + lag, N)).tolist()
V = np.ones(len(I))
return sparse.coo_matrix((V, (I, J)), shape=(N, N)).tocsr()
def getOneOnK(N, k):
lags = [i*N/k for i in range(1, k)]
return getCirculantAdj(N, lags)
def getCircleEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 2 - 2*np.cos(2*np.pi*i/N)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return lambdas
def getMoebiusEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 3 - 2*np.cos(2*np.pi*i/N) - (-1)**i
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
def get3WayEigs(N):
lambdas = np.zeros(N)
for i in range(1, N/2+1):
val = 4 - 2*np.cos(2*np.pi*i/N) - 2*np.cos(2*np.pi*i/3)
i1 = i*2-1
i2 = i*2
lambdas[i1] = val
if i2 < N:
lambdas[i2] = val
return (lambdas, np.sort(lambdas))
if __name__ == '__main__':
N = 100
A = getOneOnK(N, 2)
A = A.toarray()
(w, v, L) = getLaplacianEigsDense(A, A.shape[0])
(lambdas, lambdassorted) = get3WayEigs(N)
plt.figure(figsize=(15, 4))
plt.subplot(132)
plt.plot(lambdas)
plt.title("Eigenvalues")
plt.xlabel("Eigenvalue Number")
plt.ylabel("Eigenvalue")
plt.subplot(131)
plt.imshow(A, interpolation = 'nearest', cmap = 'gray')
plt.title("Adjacency Matrix")
plt.subplot(133)
plt.imshow(v, cmap = 'afmhot', aspect = 'auto', interpolation = 'nearest')
plt.xlabel("k-th Smallest Eigenvector")
plt.title("Eigenvectors")
plt.savefig("Eigs.svg", bbox_inches = 'tight')
| true | true |
f72b0b19c49d94d5feee3fd0a9c9902892c5cb86 | 28,656 | py | Python | Lib/test/test_tempfile.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_tempfile.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_tempfile.py | deadsnakes/python3.1 | 88d77610a7873c5161bfc15cd69557fc7697b1a3 | [
"PSF-2.0"
] | null | null | null | # tempfile.py unit tests.
import tempfile
import os
import sys
import re
import errno
import warnings
import unittest
from test import support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
# TEST_FILES may need to be tweaked for systems depending on the maximum
# number of files that can be opened at one time (see ulimit -n)
if sys.platform == 'mac':
TEST_FILES = 32
elif sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
# This is organized as one test for each chunk of code in tempfile.py,
# in order of their appearance in the file. Testing which requires
# threads is not done here.
# Common functionality.
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
# check for equality of the absolute paths!
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
# There are no surprising symbols in the tempfile module
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
"""Test the internal iterator object _RandomNameSequence."""
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
# _RandomNameSequence returns a six-character string
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
# _RandomNameSequence returns no duplicate strings (stochastic)
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertFalse(s in dict)
dict[s] = 1
def supports_iter(self):
# _RandomNameSequence supports the iterator protocol
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
"""Test the internal function _candidate_tempdir_list."""
def test_nonempty_list(self):
# _candidate_tempdir_list returns a nonempty list of strings
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertTrue(isinstance(c, str),
"%s is not a string" % c)
def test_wanted_dirs(self):
# _candidate_tempdir_list contains the expected directories
# Make sure the interesting environment variables are all set.
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertTrue(dirname in cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertTrue(dirname in cand)
# Not practical to try to verify the presence of OS-specific
# paths in this list.
test_classes.append(test__candidate_tempdir_list)
# We test _get_default_tempdir by testing gettempdir.
class test__get_candidate_names(TC):
"""Test the internal function _get_candidate_names."""
def test_retval(self):
# _get_candidate_names returns a _RandomNameSequence object
obj = tempfile._get_candidate_names()
self.assertTrue(isinstance(obj, tempfile._RandomNameSequence))
def test_same_thing(self):
# _get_candidate_names always returns the same object
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
"""Test the internal function _mkstemp_inner."""
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# _mkstemp_inner can create files
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
# _mkstemp_inner can create many files (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
# _mkstemp_inner can create files in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
# _mkstemp_inner creates files with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
# _mkstemp_inner file handles are not inherited by child processes
if not has_spawnl:
return # ugh, can't use SkipTest.
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
# but an arg with embedded spaces should be decorated with double
# quotes on each end
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
# _mkstemp_inner can create files in text mode
if not has_textmode:
return # ugh, can't use SkipTest.
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
"""Test gettempprefix()."""
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertTrue(isinstance(p, str))
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
"""Test gettempdir()."""
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
"""Test mkstemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
"""Test mkdtemp()."""
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777 # Mask off sticky bits inherited from /tmp
expected = 0o700
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
"""Test mktemp()."""
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
# mysteriously appeared in the meanwhile.
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
# mktemp can choose usable file names
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
# mktemp can choose many usable file names (stochastic)
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
## def test_warning(self):
## # mktemp issues a warning when used
## warnings.filterwarnings("error",
## category=RuntimeWarning,
## message="mktemp")
## self.assertRaises(RuntimeWarning,
## tempfile.mktemp, dir=self.dir)
test_classes.append(test_mktemp)
# We test _TemporaryFileWrapper by testing NamedTemporaryFile.
class test_NamedTemporaryFile(TC):
"""Test NamedTemporaryFile()."""
def do_create(self, dir=None, pre="", suf="", delete=True):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.NamedTemporaryFile(dir=dir, prefix=pre, suffix=suf,
delete=delete)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
# NamedTemporaryFile can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
# NamedTemporaryFile creates files with names
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
# A NamedTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
# Tests that delete-on-close can be disabled
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
# A NamedTemporaryFile can be closed many times without error
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
# A NamedTemporaryFile can be used as a context manager
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
# How to test the mode and bufsize parameters?
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
"""Test SpooledTemporaryFile()."""
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
# SpooledTemporaryFile can create files
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
# A SpooledTemporaryFile is deleted when closed
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
# A SpooledTemporaryFile can be written to multiple within the max_size
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
# Verify writelines with a SpooledTemporaryFile
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
# A SpooledTemporaryFile should hold exactly max_size bytes, and roll
# over afterward
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
# A SpooledTemporaryFile that is written late in the file will extend
# when that occurs
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
# A SpooledTemporaryFile should roll over to a real file on fileno()
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
# A SpooledTemporaryFile can be closed many times without error
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
# It should be OK to steal a bound method from a SpooledTemporaryFile
# and use it independently; when the file rolls over, those bound
# methods should continue to function
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
# Creating a SpooledTemporaryFile with a text mode should produce
# a file object reading and writing (Unicode) text strings.
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
# Check that Ctrl+Z doesn't truncate the file
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
"""Test TemporaryFile()."""
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| 31.559471 | 100 | 0.576947 |
import tempfile
import os
import sys
import re
import errno
import warnings
import unittest
from test import support
warnings.filterwarnings("ignore",
category=RuntimeWarning,
message="mktemp", module=__name__)
if hasattr(os, 'stat'):
import stat
has_stat = 1
else:
has_stat = 0
has_textmode = (tempfile._text_openflags != tempfile._bin_openflags)
has_spawnl = hasattr(os, 'spawnl')
if sys.platform == 'mac':
TEST_FILES = 32
elif sys.platform in ('openbsd3', 'openbsd4'):
TEST_FILES = 48
else:
TEST_FILES = 100
class TC(unittest.TestCase):
str_check = re.compile(r"[a-zA-Z0-9_-]{6}$")
def failOnException(self, what, ei=None):
if ei is None:
ei = sys.exc_info()
self.fail("%s raised %s: %s" % (what, ei[0], ei[1]))
def nameCheck(self, name, dir, pre, suf):
(ndir, nbase) = os.path.split(name)
npre = nbase[:len(pre)]
nsuf = nbase[len(nbase)-len(suf):]
self.assertEqual(os.path.abspath(ndir), os.path.abspath(dir),
"file '%s' not in directory '%s'" % (name, dir))
self.assertEqual(npre, pre,
"file '%s' does not begin with '%s'" % (nbase, pre))
self.assertEqual(nsuf, suf,
"file '%s' does not end with '%s'" % (nbase, suf))
nbase = nbase[len(pre):len(nbase)-len(suf)]
self.assertTrue(self.str_check.match(nbase),
"random string '%s' does not match /^[a-zA-Z0-9_-]{6}$/"
% nbase)
test_classes = []
class test_exports(TC):
def test_exports(self):
dict = tempfile.__dict__
expected = {
"NamedTemporaryFile" : 1,
"TemporaryFile" : 1,
"mkstemp" : 1,
"mkdtemp" : 1,
"mktemp" : 1,
"TMP_MAX" : 1,
"gettempprefix" : 1,
"gettempdir" : 1,
"tempdir" : 1,
"template" : 1,
"SpooledTemporaryFile" : 1
}
unexp = []
for key in dict:
if key[0] != '_' and key not in expected:
unexp.append(key)
self.assertTrue(len(unexp) == 0,
"unexpected keys: %s" % unexp)
test_classes.append(test_exports)
class test__RandomNameSequence(TC):
def setUp(self):
self.r = tempfile._RandomNameSequence()
def test_get_six_char_str(self):
s = next(self.r)
self.nameCheck(s, '', '', '')
def test_many(self):
dict = {}
r = self.r
for i in range(TEST_FILES):
s = next(r)
self.nameCheck(s, '', '', '')
self.assertFalse(s in dict)
dict[s] = 1
def supports_iter(self):
i = 0
r = self.r
try:
for s in r:
i += 1
if i == 20:
break
except:
failOnException("iteration")
test_classes.append(test__RandomNameSequence)
class test__candidate_tempdir_list(TC):
def test_nonempty_list(self):
cand = tempfile._candidate_tempdir_list()
self.assertFalse(len(cand) == 0)
for c in cand:
self.assertTrue(isinstance(c, str),
"%s is not a string" % c)
def test_wanted_dirs(self):
with support.EnvironmentVarGuard() as env:
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname:
env[envname] = os.path.abspath(envname)
cand = tempfile._candidate_tempdir_list()
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = os.getenv(envname)
if not dirname: raise ValueError
self.assertTrue(dirname in cand)
try:
dirname = os.getcwd()
except (AttributeError, os.error):
dirname = os.curdir
self.assertTrue(dirname in cand)
test_classes.append(test__candidate_tempdir_list)
class test__get_candidate_names(TC):
def test_retval(self):
obj = tempfile._get_candidate_names()
self.assertTrue(isinstance(obj, tempfile._RandomNameSequence))
def test_same_thing(self):
a = tempfile._get_candidate_names()
b = tempfile._get_candidate_names()
self.assertTrue(a is b)
test_classes.append(test__get_candidate_names)
class test__mkstemp_inner(TC):
class mkstemped:
_bflags = tempfile._bin_openflags
_tflags = tempfile._text_openflags
_close = os.close
_unlink = os.unlink
def __init__(self, dir, pre, suf, bin):
if bin: flags = self._bflags
else: flags = self._tflags
(self.fd, self.name) = tempfile._mkstemp_inner(dir, pre, suf, flags)
def write(self, str):
os.write(self.fd, str)
def __del__(self):
self._close(self.fd)
self._unlink(self.name)
def do_create(self, dir=None, pre="", suf="", bin=1):
if dir is None:
dir = tempfile.gettempdir()
try:
file = self.mkstemped(dir, pre, suf, bin)
except:
self.failOnException("_mkstemp_inner")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
self.do_create().write(b"blat")
self.do_create(pre="a").write(b"blat")
self.do_create(suf="b").write(b"blat")
self.do_create(pre="a", suf="b").write(b"blat")
self.do_create(pre="aa", suf=".txt").write(b"blat")
def test_basic_many(self):
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
def test_choose_directory(self):
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir).write(b"blat")
finally:
os.rmdir(dir)
def test_file_mode(self):
if not has_stat:
return
file = self.do_create()
mode = stat.S_IMODE(os.stat(file.name).st_mode)
expected = 0o600
if sys.platform in ('win32', 'os2emx', 'mac'):
# There's no distinction among 'user', 'group' and 'world';
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
def test_noinherit(self):
if not has_spawnl:
return
if support.verbose:
v="v"
else:
v="q"
file = self.do_create()
fd = "%d" % file.fd
try:
me = __file__
except NameError:
me = sys.argv[0]
# We have to exec something, so that FD_CLOEXEC will take
# effect. The core of this test is therefore in
# tf_inherit_check.py, which see.
tester = os.path.join(os.path.dirname(os.path.abspath(me)),
"tf_inherit_check.py")
# On Windows a spawn* /path/ with embedded spaces shouldn't be quoted,
if sys.platform in ('win32',):
decorated = '"%s"' % sys.executable
tester = '"%s"' % tester
else:
decorated = sys.executable
retval = os.spawnl(os.P_WAIT, sys.executable, decorated, tester, v, fd)
self.assertFalse(retval < 0,
"child process caught fatal signal %d" % -retval)
self.assertFalse(retval > 0, "child process reports failure %d"%retval)
def test_textmode(self):
if not has_textmode:
return
# A text file is truncated at the first Ctrl+Z byte
f = self.do_create(bin=0)
f.write(b"blat\x1a")
f.write(b"extra\n")
os.lseek(f.fd, 0, os.SEEK_SET)
self.assertEqual(os.read(f.fd, 20), b"blat")
test_classes.append(test__mkstemp_inner)
class test_gettempprefix(TC):
def test_sane_template(self):
# gettempprefix returns a nonempty prefix string
p = tempfile.gettempprefix()
self.assertTrue(isinstance(p, str))
self.assertTrue(len(p) > 0)
def test_usable_template(self):
# gettempprefix returns a usable prefix string
# Create a temp directory, avoiding use of the prefix.
# Then attempt to create a file whose name is
# prefix + 'xxxxxx.xxx' in that directory.
p = tempfile.gettempprefix() + "xxxxxx.xxx"
d = tempfile.mkdtemp(prefix="")
try:
p = os.path.join(d, p)
try:
fd = os.open(p, os.O_RDWR | os.O_CREAT)
except:
self.failOnException("os.open")
os.close(fd)
os.unlink(p)
finally:
os.rmdir(d)
test_classes.append(test_gettempprefix)
class test_gettempdir(TC):
def test_directory_exists(self):
# gettempdir returns a directory which exists
dir = tempfile.gettempdir()
self.assertTrue(os.path.isabs(dir) or dir == os.curdir,
"%s is not an absolute path" % dir)
self.assertTrue(os.path.isdir(dir),
"%s is not a directory" % dir)
def test_directory_writable(self):
# gettempdir returns a directory writable by the user
# sneaky: just instantiate a NamedTemporaryFile, which
# defaults to writing into the directory returned by
# gettempdir.
try:
file = tempfile.NamedTemporaryFile()
file.write(b"blat")
file.close()
except:
self.failOnException("create file in %s" % tempfile.gettempdir())
def test_same_thing(self):
# gettempdir always returns the same object
a = tempfile.gettempdir()
b = tempfile.gettempdir()
self.assertTrue(a is b)
test_classes.append(test_gettempdir)
class test_mkstemp(TC):
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
(fd, name) = tempfile.mkstemp(dir=dir, prefix=pre, suffix=suf)
(ndir, nbase) = os.path.split(name)
adir = os.path.abspath(dir)
self.assertEqual(adir, ndir,
"Directory '%s' incorrectly returned as '%s'" % (adir, ndir))
except:
self.failOnException("mkstemp")
try:
self.nameCheck(name, dir, pre, suf)
finally:
os.close(fd)
os.unlink(name)
def test_basic(self):
# mkstemp can create files
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
self.do_create(dir=".")
def test_choose_directory(self):
# mkstemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
self.do_create(dir=dir)
finally:
os.rmdir(dir)
test_classes.append(test_mkstemp)
class test_mkdtemp(TC):
def do_create(self, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
name = tempfile.mkdtemp(dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("mkdtemp")
try:
self.nameCheck(name, dir, pre, suf)
return name
except:
os.rmdir(name)
raise
def test_basic(self):
# mkdtemp can create directories
os.rmdir(self.do_create())
os.rmdir(self.do_create(pre="a"))
os.rmdir(self.do_create(suf="b"))
os.rmdir(self.do_create(pre="a", suf="b"))
os.rmdir(self.do_create(pre="aa", suf=".txt"))
def test_basic_many(self):
# mkdtemp can create many directories (stochastic)
extant = list(range(TEST_FILES))
try:
for i in extant:
extant[i] = self.do_create(pre="aa")
finally:
for i in extant:
if(isinstance(i, str)):
os.rmdir(i)
def test_choose_directory(self):
# mkdtemp can create directories in a user-selected directory
dir = tempfile.mkdtemp()
try:
os.rmdir(self.do_create(dir=dir))
finally:
os.rmdir(dir)
def test_mode(self):
# mkdtemp creates directories with the proper mode
if not has_stat:
return # ugh, can't use SkipTest.
dir = self.do_create()
try:
mode = stat.S_IMODE(os.stat(dir).st_mode)
mode &= 0o777
expected = 0o700
if sys.platform in ('win32', 'os2emx', 'mac'):
# replicate the 'user' bits.
user = expected >> 6
expected = user * (1 + 8 + 64)
self.assertEqual(mode, expected)
finally:
os.rmdir(dir)
test_classes.append(test_mkdtemp)
class test_mktemp(TC):
# For safety, all use of mktemp must occur in a private directory.
# We must also suppress the RuntimeWarning it generates.
def setUp(self):
self.dir = tempfile.mkdtemp()
def tearDown(self):
if self.dir:
os.rmdir(self.dir)
self.dir = None
class mktemped:
_unlink = os.unlink
_bflags = tempfile._bin_openflags
def __init__(self, dir, pre, suf):
self.name = tempfile.mktemp(dir=dir, prefix=pre, suffix=suf)
# Create the file. This will raise an exception if it's
os.close(os.open(self.name, self._bflags, 0o600))
def __del__(self):
self._unlink(self.name)
def do_create(self, pre="", suf=""):
try:
file = self.mktemped(self.dir, pre, suf)
except:
self.failOnException("mktemp")
self.nameCheck(file.name, self.dir, pre, suf)
return file
def test_basic(self):
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_many(self):
extant = list(range(TEST_FILES))
for i in extant:
extant[i] = self.do_create(pre="aa")
te)
except:
self.failOnException("NamedTemporaryFile")
self.nameCheck(file.name, dir, pre, suf)
return file
def test_basic(self):
self.do_create()
self.do_create(pre="a")
self.do_create(suf="b")
self.do_create(pre="a", suf="b")
self.do_create(pre="aa", suf=".txt")
def test_creates_named(self):
f = tempfile.NamedTemporaryFile()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s does not exist" % f.name)
def test_del_on_close(self):
dir = tempfile.mkdtemp()
try:
f = tempfile.NamedTemporaryFile(dir=dir)
f.write(b'blat')
f.close()
self.assertFalse(os.path.exists(f.name),
"NamedTemporaryFile %s exists after close" % f.name)
finally:
os.rmdir(dir)
def test_dis_del_on_close(self):
dir = tempfile.mkdtemp()
tmp = None
try:
f = tempfile.NamedTemporaryFile(dir=dir, delete=False)
tmp = f.name
f.write(b'blat')
f.close()
self.assertTrue(os.path.exists(f.name),
"NamedTemporaryFile %s missing after close" % f.name)
finally:
if tmp is not None:
os.unlink(tmp)
os.rmdir(dir)
def test_multiple_close(self):
f = tempfile.NamedTemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_context_manager(self):
with tempfile.NamedTemporaryFile() as f:
self.assertTrue(os.path.exists(f.name))
self.assertFalse(os.path.exists(f.name))
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_NamedTemporaryFile)
class test_SpooledTemporaryFile(TC):
def do_create(self, max_size=0, dir=None, pre="", suf=""):
if dir is None:
dir = tempfile.gettempdir()
try:
file = tempfile.SpooledTemporaryFile(max_size=max_size, dir=dir, prefix=pre, suffix=suf)
except:
self.failOnException("SpooledTemporaryFile")
return file
def test_basic(self):
f = self.do_create()
self.assertFalse(f._rolled)
f = self.do_create(max_size=100, pre="a", suf=".txt")
self.assertFalse(f._rolled)
def test_del_on_close(self):
dir = tempfile.mkdtemp()
try:
f = tempfile.SpooledTemporaryFile(max_size=10, dir=dir)
self.assertFalse(f._rolled)
f.write(b'blat ' * 5)
self.assertTrue(f._rolled)
filename = f.name
f.close()
self.assertFalse(isinstance(filename, str) and os.path.exists(filename),
"SpooledTemporaryFile %s exists after close" % filename)
finally:
os.rmdir(dir)
def test_rewrite_small(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
for i in range(5):
f.seek(0, 0)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
def test_write_sequential(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.write(b'x' * 20)
self.assertFalse(f._rolled)
f.write(b'x' * 10)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_writelines(self):
f = self.do_create()
f.writelines((b'x', b'y', b'z'))
f.seek(0)
buf = f.read()
self.assertEqual(buf, b'xyz')
def test_writelines_sequential(self):
f = self.do_create(max_size=35)
f.writelines((b'x' * 20, b'x' * 10, b'x' * 5))
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_sparse(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
f.seek(100, 0)
self.assertFalse(f._rolled)
f.write(b'x')
self.assertTrue(f._rolled)
def test_fileno(self):
f = self.do_create(max_size=30)
self.assertFalse(f._rolled)
self.assertTrue(f.fileno() > 0)
self.assertTrue(f._rolled)
def test_multiple_close_before_rollover(self):
f = tempfile.SpooledTemporaryFile()
f.write(b'abc\n')
self.assertFalse(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_multiple_close_after_rollover(self):
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
self.assertTrue(f._rolled)
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
def test_bound_methods(self):
f = self.do_create(max_size=30)
read = f.read
write = f.write
seek = f.seek
write(b"a" * 35)
write(b"b" * 35)
seek(0, 0)
self.assertEqual(read(70), b'a'*35 + b'b'*35)
def test_text_mode(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10)
f.write("abc\n")
f.seek(0)
self.assertEqual(f.read(), "abc\n")
f.write("def\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\n")
f.write("xyzzy\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\n")
f.write("foo\x1abar\n")
f.seek(0)
self.assertEqual(f.read(), "abc\ndef\nxyzzy\nfoo\x1abar\n")
def test_text_newline_and_encoding(self):
f = tempfile.SpooledTemporaryFile(mode='w+', max_size=10,
newline='', encoding='utf-8')
f.write("\u039B\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n")
self.assertFalse(f._rolled)
f.write("\u039B" * 20 + "\r\n")
f.seek(0)
self.assertEqual(f.read(), "\u039B\r\n" + ("\u039B" * 20) + "\r\n")
self.assertTrue(f._rolled)
def test_context_manager_before_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_during_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
with tempfile.SpooledTemporaryFile(max_size=1) as f:
self.assertFalse(f._rolled)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
def test_context_manager_after_rollover(self):
# A SpooledTemporaryFile can be used as a context manager
f = tempfile.SpooledTemporaryFile(max_size=1)
f.write(b'abc\n')
f.flush()
self.assertTrue(f._rolled)
with f:
self.assertFalse(f.closed)
self.assertTrue(f.closed)
def use_closed():
with f:
pass
self.assertRaises(ValueError, use_closed)
test_classes.append(test_SpooledTemporaryFile)
class test_TemporaryFile(TC):
def test_basic(self):
# TemporaryFile can create files
# No point in testing the name params - the file has no name.
try:
tempfile.TemporaryFile()
except:
self.failOnException("TemporaryFile")
def test_has_no_name(self):
# TemporaryFile creates files with no names (on this system)
dir = tempfile.mkdtemp()
f = tempfile.TemporaryFile(dir=dir)
f.write(b'blat')
# Sneaky: because this file has no name, it should not prevent
# us from removing the directory it was created in.
try:
os.rmdir(dir)
except:
ei = sys.exc_info()
# cleanup
f.close()
os.rmdir(dir)
self.failOnException("rmdir", ei)
def test_multiple_close(self):
# A TemporaryFile can be closed many times without error
f = tempfile.TemporaryFile()
f.write(b'abc\n')
f.close()
try:
f.close()
f.close()
except:
self.failOnException("close")
# How to test the mode and bufsize parameters?
def test_mode_and_encoding(self):
def roundtrip(input, *args, **kwargs):
with tempfile.TemporaryFile(*args, **kwargs) as fileobj:
fileobj.write(input)
fileobj.seek(0)
self.assertEqual(input, fileobj.read())
roundtrip(b"1234", "w+b")
roundtrip("abdc\n", "w+")
roundtrip("\u039B", "w+", encoding="utf-16")
roundtrip("foo\r\n", "w+", newline="")
if tempfile.NamedTemporaryFile is not tempfile.TemporaryFile:
test_classes.append(test_TemporaryFile)
def test_main():
support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
| true | true |
f72b0b29ec60b1e3fa0dcfba14c0246d70315797 | 1,173 | py | Python | peamt/features/polyphony.py | adrienycart/PEAMT | d3ae41e86dedeb64fcf54e2454c9feee993574f9 | [
"MIT"
] | 5 | 2020-05-28T18:03:58.000Z | 2021-11-01T13:14:26.000Z | peamt/features/polyphony.py | adrienycart/PEAMT | d3ae41e86dedeb64fcf54e2454c9feee993574f9 | [
"MIT"
] | 5 | 2020-09-26T01:12:41.000Z | 2022-02-10T02:01:25.000Z | peamt/features/polyphony.py | adrienycart/PEAMT | d3ae41e86dedeb64fcf54e2454c9feee993574f9 | [
"MIT"
] | null | null | null | import numpy as np
########################################
### Polyphony --- discarded
########################################
def polyphony_level_diff(roll_output,roll_target):
poly_output = np.sum(roll_output,axis=0)
poly_target = np.sum(roll_target,axis=0)
poly_diff = np.abs(poly_output-poly_target)
return np.mean(poly_diff),np.std(poly_diff),np.min(poly_diff),np.max(poly_diff)
# discarded
def false_negative_polyphony_level(roll_target,intervals_target,match):
fs = 100
if len(match) == 0:
unmatched_targets = list(range(intervals_target))
else:
matched_targets, matched_outputs = zip(*match)
# unmatched_targets= list(set(range(len(vel_target)))-set(matched_targets))
unmatched_targets= list(set(range(len(intervals_target)))-set(matched_targets))
unmatched_intervals = intervals_target[unmatched_targets,:]
all_avg_poly = []
for [start,end] in unmatched_intervals:
start_idx = int(round(start*fs))
end_idx = int(round(end*fs))
avg_poly = np.mean(np.sum(roll_target[:,start_idx:end_idx],axis=0))
all_avg_poly += [avg_poly]
return all_avg_poly
| 30.076923 | 87 | 0.656436 | import numpy as np
| true | true |
f72b0bcdbe61d8b42e2ce9462ada3ba434fd8b03 | 2,078 | py | Python | tests/common/test_run/round_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/round_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | tests/common/test_run/round_run.py | KnowingNothing/akg-test | 114d8626b824b9a31af50a482afc07ab7121862b | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import secrets
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import round
from tests.common.gen_random import random_gaussian
secretsGenerator = secrets.SystemRandom()
def round_run(shape, dtype, attrs):
in_shape = [shape]
in_dtype = [dtype]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name='round', attrs=attrs)
expect, input, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (input, output), expect=expect)
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
input = random_gaussian(shape, miu=1, sigma=10).astype(dtype)
a = secretsGenerator.randint(0, 9)
if a % 2 == 0:
input = input.astype('int32') + 0.5
input = input.astype(dtype)
input_f16 = input.astype(np.float16)
expect = np.round(input_f16).astype("int32")
output = np.full(shape, np.nan, "int32")
return expect, input, output
| 39.961538 | 120 | 0.702117 |
import numpy as np
import secrets
from tests.common.tensorio import compare_tensor
from akg.utils import kernel_exec as utils
from tests.common.test_op import round
from tests.common.gen_random import random_gaussian
secretsGenerator = secrets.SystemRandom()
def round_run(shape, dtype, attrs):
in_shape = [shape]
in_dtype = [dtype]
if 'tuning' in attrs.keys():
t = attrs.get("tuning", False)
kernel_name = attrs.get("kernel_name", False)
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name=kernel_name, attrs=attrs, tuning=t)
if t:
expect, input, output = gen_data(dtype, shape)
return mod, expect, (input, output)
else:
return mod
else:
mod = utils.op_build_test(round.round_value, in_shape, in_dtype, kernel_name='round', attrs=attrs)
expect, input, output = gen_data(dtype, shape)
output = utils.mod_launch(mod, (input, output), expect=expect)
return input, output, expect, compare_tensor(output, expect, rtol=5e-03, equal_nan=True)
def gen_data(dtype, shape):
input = random_gaussian(shape, miu=1, sigma=10).astype(dtype)
a = secretsGenerator.randint(0, 9)
if a % 2 == 0:
input = input.astype('int32') + 0.5
input = input.astype(dtype)
input_f16 = input.astype(np.float16)
expect = np.round(input_f16).astype("int32")
output = np.full(shape, np.nan, "int32")
return expect, input, output
| true | true |
f72b0bdf689c564a67a58c7ea477390e6e8c6c23 | 24,215 | py | Python | homeassistant/components/mqtt/fan.py | wlcrs/core | cf27b82d2fdce406fda3b1b9cd52d42d7f7d00d6 | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mqtt/fan.py | wlcrs/core | cf27b82d2fdce406fda3b1b9cd52d42d7f7d00d6 | [
"Apache-2.0"
] | 7 | 2022-03-01T06:32:08.000Z | 2022-03-31T07:20:49.000Z | homeassistant/components/mqtt/fan.py | fblondeau/home-assistant | a8da0eedd32ac8198f06d4e32622d0f8b40b4a41 | [
"Apache-2.0"
] | null | null | null | """Support for MQTT fans."""
from __future__ import annotations
import asyncio
import functools
import logging
import math
import voluptuous as vol
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
FanEntity,
FanEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_STATE,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import subscription
from .config import MQTT_RW_SCHEMA
from .const import (
CONF_COMMAND_TEMPLATE,
CONF_COMMAND_TOPIC,
CONF_ENCODING,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_STATE_VALUE_TEMPLATE,
PAYLOAD_NONE,
)
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttEntity,
async_get_platform_config_from_yaml,
async_setup_entry_helper,
async_setup_platform_helper,
warn_for_legacy_schema,
)
from .models import MqttCommandTemplate, MqttValueTemplate
from .util import valid_publish_topic, valid_subscribe_topic
CONF_PERCENTAGE_STATE_TOPIC = "percentage_state_topic"
CONF_PERCENTAGE_COMMAND_TOPIC = "percentage_command_topic"
CONF_PERCENTAGE_VALUE_TEMPLATE = "percentage_value_template"
CONF_PERCENTAGE_COMMAND_TEMPLATE = "percentage_command_template"
CONF_PAYLOAD_RESET_PERCENTAGE = "payload_reset_percentage"
CONF_SPEED_RANGE_MIN = "speed_range_min"
CONF_SPEED_RANGE_MAX = "speed_range_max"
CONF_PRESET_MODE_STATE_TOPIC = "preset_mode_state_topic"
CONF_PRESET_MODE_COMMAND_TOPIC = "preset_mode_command_topic"
CONF_PRESET_MODE_VALUE_TEMPLATE = "preset_mode_value_template"
CONF_PRESET_MODE_COMMAND_TEMPLATE = "preset_mode_command_template"
CONF_PRESET_MODES_LIST = "preset_modes"
CONF_PAYLOAD_RESET_PRESET_MODE = "payload_reset_preset_mode"
CONF_SPEED_STATE_TOPIC = "speed_state_topic"
CONF_SPEED_COMMAND_TOPIC = "speed_command_topic"
CONF_SPEED_VALUE_TEMPLATE = "speed_value_template"
CONF_OSCILLATION_STATE_TOPIC = "oscillation_state_topic"
CONF_OSCILLATION_COMMAND_TOPIC = "oscillation_command_topic"
CONF_OSCILLATION_VALUE_TEMPLATE = "oscillation_value_template"
CONF_OSCILLATION_COMMAND_TEMPLATE = "oscillation_command_template"
CONF_PAYLOAD_OSCILLATION_ON = "payload_oscillation_on"
CONF_PAYLOAD_OSCILLATION_OFF = "payload_oscillation_off"
CONF_PAYLOAD_OFF_SPEED = "payload_off_speed"
CONF_PAYLOAD_LOW_SPEED = "payload_low_speed"
CONF_PAYLOAD_MEDIUM_SPEED = "payload_medium_speed"
CONF_PAYLOAD_HIGH_SPEED = "payload_high_speed"
CONF_SPEED_LIST = "speeds"
DEFAULT_NAME = "MQTT Fan"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_PAYLOAD_RESET = "None"
DEFAULT_OPTIMISTIC = False
DEFAULT_SPEED_RANGE_MIN = 1
DEFAULT_SPEED_RANGE_MAX = 100
OSCILLATE_ON_PAYLOAD = "oscillate_on"
OSCILLATE_OFF_PAYLOAD = "oscillate_off"
MQTT_FAN_ATTRIBUTES_BLOCKED = frozenset(
{
fan.ATTR_DIRECTION,
fan.ATTR_OSCILLATING,
fan.ATTR_PERCENTAGE_STEP,
fan.ATTR_PERCENTAGE,
fan.ATTR_PRESET_MODE,
fan.ATTR_PRESET_MODES,
}
)
_LOGGER = logging.getLogger(__name__)
def valid_speed_range_configuration(config):
"""Validate that the fan speed_range configuration is valid, throws if it isn't."""
if config.get(CONF_SPEED_RANGE_MIN) == 0:
raise ValueError("speed_range_min must be > 0")
if config.get(CONF_SPEED_RANGE_MIN) >= config.get(CONF_SPEED_RANGE_MAX):
raise ValueError("speed_range_max must be > speed_range_min")
return config
def valid_preset_mode_configuration(config):
"""Validate that the preset mode reset payload is not one of the preset modes."""
if config.get(CONF_PAYLOAD_RESET_PRESET_MODE) in config.get(CONF_PRESET_MODES_LIST):
raise ValueError("preset_modes must not contain payload_reset_preset_mode")
return config
_PLATFORM_SCHEMA_BASE = MQTT_RW_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_OSCILLATION_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_PERCENTAGE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PERCENTAGE_VALUE_TEMPLATE): cv.template,
# CONF_PRESET_MODE_COMMAND_TOPIC and CONF_PRESET_MODES_LIST must be used together
vol.Inclusive(
CONF_PRESET_MODE_COMMAND_TOPIC, "preset_modes"
): valid_publish_topic,
vol.Inclusive(
CONF_PRESET_MODES_LIST, "preset_modes", default=[]
): cv.ensure_list,
vol.Optional(CONF_PRESET_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PRESET_MODE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PRESET_MODE_VALUE_TEMPLATE): cv.template,
vol.Optional(
CONF_SPEED_RANGE_MIN, default=DEFAULT_SPEED_RANGE_MIN
): cv.positive_int,
vol.Optional(
CONF_SPEED_RANGE_MAX, default=DEFAULT_SPEED_RANGE_MAX
): cv.positive_int,
vol.Optional(
CONF_PAYLOAD_RESET_PERCENTAGE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(
CONF_PAYLOAD_RESET_PRESET_MODE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_OFF, default=OSCILLATE_OFF_PAYLOAD
): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_ON, default=OSCILLATE_ON_PAYLOAD
): cv.string,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_SPEED_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
# Configuring MQTT Fans under the fan platform key is deprecated in HA Core 2022.6
PLATFORM_SCHEMA = vol.All(
cv.PLATFORM_SCHEMA.extend(_PLATFORM_SCHEMA_BASE.schema),
valid_speed_range_configuration,
valid_preset_mode_configuration,
warn_for_legacy_schema(fan.DOMAIN),
)
PLATFORM_SCHEMA_MODERN = vol.All(
_PLATFORM_SCHEMA_BASE,
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
DISCOVERY_SCHEMA = vol.All(
# CONF_SPEED_COMMAND_TOPIC, CONF_SPEED_LIST, CONF_SPEED_STATE_TOPIC, CONF_SPEED_VALUE_TEMPLATE and
# Speeds SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH SPEED_OFF,
# are no longer supported, support was removed in release 2021.12
cv.removed(CONF_PAYLOAD_HIGH_SPEED),
cv.removed(CONF_PAYLOAD_LOW_SPEED),
cv.removed(CONF_PAYLOAD_MEDIUM_SPEED),
cv.removed(CONF_SPEED_COMMAND_TOPIC),
cv.removed(CONF_SPEED_LIST),
cv.removed(CONF_SPEED_STATE_TOPIC),
cv.removed(CONF_SPEED_VALUE_TEMPLATE),
_PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA),
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up MQTT fans configured under the fan platform key (deprecated)."""
# Deprecated in HA Core 2022.6
await async_setup_platform_helper(
hass, fan.DOMAIN, config, async_add_entities, _async_setup_entity
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up MQTT fan through configuration.yaml and dynamically through MQTT discovery."""
# load and initialize platform config from configuration.yaml
await asyncio.gather(
*(
_async_setup_entity(hass, async_add_entities, config, config_entry)
for config in await async_get_platform_config_from_yaml(
hass, fan.DOMAIN, PLATFORM_SCHEMA_MODERN
)
)
)
# setup for discovery
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, fan.DOMAIN, setup, DISCOVERY_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(hass, config, config_entry, discovery_data)])
class MqttFan(MqttEntity, FanEntity):
"""A MQTT fan component."""
_entity_id_format = fan.ENTITY_ID_FORMAT
_attributes_extra_blocked = MQTT_FAN_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT fan."""
self._state = None
self._percentage = None
self._preset_mode = None
self._oscillation = None
self._supported_features = 0
self._topic = None
self._payload = None
self._value_templates = None
self._command_templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_percentage = None
self._optimistic_preset_mode = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return DISCOVERY_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._speed_range = (
config.get(CONF_SPEED_RANGE_MIN),
config.get(CONF_SPEED_RANGE_MAX),
)
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_PERCENTAGE_STATE_TOPIC,
CONF_PERCENTAGE_COMMAND_TOPIC,
CONF_PRESET_MODE_STATE_TOPIC,
CONF_PRESET_MODE_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._value_templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_VALUE_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_VALUE_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_VALUE_TEMPLATE),
}
self._command_templates = {
CONF_STATE: config.get(CONF_COMMAND_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_COMMAND_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_COMMAND_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_COMMAND_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"OSCILLATE_ON_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_ON],
"OSCILLATE_OFF_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_OFF],
"PERCENTAGE_RESET": config[CONF_PAYLOAD_RESET_PERCENTAGE],
"PRESET_MODE_RESET": config[CONF_PAYLOAD_RESET_PRESET_MODE],
}
self._feature_percentage = CONF_PERCENTAGE_COMMAND_TOPIC in config
self._feature_preset_mode = CONF_PRESET_MODE_COMMAND_TOPIC in config
if self._feature_preset_mode:
self._preset_modes = config[CONF_PRESET_MODES_LIST]
else:
self._preset_modes = []
self._speed_count = (
min(int_states_in_range(self._speed_range), 100)
if self._feature_percentage
else 100
)
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None
)
self._optimistic_percentage = (
optimistic or self._topic[CONF_PERCENTAGE_STATE_TOPIC] is None
)
self._optimistic_preset_mode = (
optimistic or self._topic[CONF_PRESET_MODE_STATE_TOPIC] is None
)
self._supported_features = 0
self._supported_features |= (
self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is not None
and FanEntityFeature.OSCILLATE
)
if self._feature_percentage:
self._supported_features |= FanEntityFeature.SET_SPEED
if self._feature_preset_mode:
self._supported_features |= FanEntityFeature.PRESET_MODE
for key, tpl in self._command_templates.items():
self._command_templates[key] = MqttCommandTemplate(
tpl, entity=self
).async_render
for key, tpl in self._value_templates.items():
self._value_templates[key] = MqttValueTemplate(
tpl,
entity=self,
).async_render_with_possible_json_value
def _prepare_subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new received MQTT message."""
payload = self._value_templates[CONF_STATE](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty state from '%s'", msg.topic)
return
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
elif payload == PAYLOAD_NONE:
self._state = None
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
@callback
@log_messages(self.hass, self.entity_id)
def percentage_received(msg):
"""Handle new received MQTT message for the percentage."""
rendered_percentage_payload = self._value_templates[ATTR_PERCENTAGE](
msg.payload
)
if not rendered_percentage_payload:
_LOGGER.debug("Ignoring empty speed from '%s'", msg.topic)
return
if rendered_percentage_payload == self._payload["PERCENTAGE_RESET"]:
self._percentage = None
self.async_write_ha_state()
return
try:
percentage = ranged_value_to_percentage(
self._speed_range, int(rendered_percentage_payload)
)
except ValueError:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
if percentage < 0 or percentage > 100:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
self._percentage = percentage
self.async_write_ha_state()
if self._topic[CONF_PERCENTAGE_STATE_TOPIC] is not None:
topics[CONF_PERCENTAGE_STATE_TOPIC] = {
"topic": self._topic[CONF_PERCENTAGE_STATE_TOPIC],
"msg_callback": percentage_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._percentage = None
@callback
@log_messages(self.hass, self.entity_id)
def preset_mode_received(msg):
"""Handle new received MQTT message for preset mode."""
preset_mode = self._value_templates[ATTR_PRESET_MODE](msg.payload)
if preset_mode == self._payload["PRESET_MODE_RESET"]:
self._preset_mode = None
self.async_write_ha_state()
return
if not preset_mode:
_LOGGER.debug("Ignoring empty preset_mode from '%s'", msg.topic)
return
if preset_mode not in self.preset_modes:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid preset mode",
msg.payload,
msg.topic,
preset_mode,
)
return
self._preset_mode = preset_mode
self.async_write_ha_state()
if self._topic[CONF_PRESET_MODE_STATE_TOPIC] is not None:
topics[CONF_PRESET_MODE_STATE_TOPIC] = {
"topic": self._topic[CONF_PRESET_MODE_STATE_TOPIC],
"msg_callback": preset_mode_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._preset_mode = None
@callback
@log_messages(self.hass, self.entity_id)
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = self._value_templates[ATTR_OSCILLATING](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty oscillation from '%s'", msg.topic)
return
if payload == self._payload["OSCILLATE_ON_PAYLOAD"]:
self._oscillation = True
elif payload == self._payload["OSCILLATE_OFF_PAYLOAD"]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
"topic": self._topic[CONF_OSCILLATION_STATE_TOPIC],
"msg_callback": oscillation_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._oscillation = False
self._sub_state = subscription.async_prepare_subscribe_topics(
self.hass, self._sub_state, topics
)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
await subscription.async_subscribe_topics(self.hass, self._sub_state)
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def is_on(self) -> bool | None:
"""Return true if device is on."""
return self._state
@property
def percentage(self):
"""Return the current percentage."""
return self._percentage
@property
def preset_mode(self):
"""Return the current preset _mode."""
return self._preset_mode
@property
def preset_modes(self) -> list:
"""Get the list of available preset modes."""
return self._preset_modes
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed_count(self) -> int:
"""Return the number of speeds the fan supports."""
return self._speed_count
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillation
# The speed attribute deprecated in the schema, support will be removed after a quarter (2021.7)
async def async_turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_ON"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if percentage:
await self.async_set_percentage(percentage)
if preset_mode:
await self.async_set_preset_mode(preset_mode)
if self._optimistic:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_OFF"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
self._state = False
self.async_write_ha_state()
async def async_set_percentage(self, percentage: int) -> None:
"""Set the percentage of the fan.
This method is a coroutine.
"""
percentage_payload = math.ceil(
percentage_to_ranged_value(self._speed_range, percentage)
)
mqtt_payload = self._command_templates[ATTR_PERCENTAGE](percentage_payload)
await self.async_publish(
self._topic[CONF_PERCENTAGE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_percentage:
self._percentage = percentage
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set the preset mode of the fan.
This method is a coroutine.
"""
self._valid_preset_mode_or_raise(preset_mode)
mqtt_payload = self._command_templates[ATTR_PRESET_MODE](preset_mode)
await self.async_publish(
self._topic[CONF_PRESET_MODE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_preset_mode:
self._preset_mode = preset_mode
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if oscillating:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_ON_PAYLOAD"]
)
else:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_OFF_PAYLOAD"]
)
await self.async_publish(
self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
| 36.800912 | 102 | 0.663556 | from __future__ import annotations
import asyncio
import functools
import logging
import math
import voluptuous as vol
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_OSCILLATING,
ATTR_PERCENTAGE,
ATTR_PRESET_MODE,
FanEntity,
FanEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_STATE,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util.percentage import (
int_states_in_range,
percentage_to_ranged_value,
ranged_value_to_percentage,
)
from . import subscription
from .config import MQTT_RW_SCHEMA
from .const import (
CONF_COMMAND_TEMPLATE,
CONF_COMMAND_TOPIC,
CONF_ENCODING,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
CONF_STATE_VALUE_TEMPLATE,
PAYLOAD_NONE,
)
from .debug_info import log_messages
from .mixins import (
MQTT_ENTITY_COMMON_SCHEMA,
MqttEntity,
async_get_platform_config_from_yaml,
async_setup_entry_helper,
async_setup_platform_helper,
warn_for_legacy_schema,
)
from .models import MqttCommandTemplate, MqttValueTemplate
from .util import valid_publish_topic, valid_subscribe_topic
CONF_PERCENTAGE_STATE_TOPIC = "percentage_state_topic"
CONF_PERCENTAGE_COMMAND_TOPIC = "percentage_command_topic"
CONF_PERCENTAGE_VALUE_TEMPLATE = "percentage_value_template"
CONF_PERCENTAGE_COMMAND_TEMPLATE = "percentage_command_template"
CONF_PAYLOAD_RESET_PERCENTAGE = "payload_reset_percentage"
CONF_SPEED_RANGE_MIN = "speed_range_min"
CONF_SPEED_RANGE_MAX = "speed_range_max"
CONF_PRESET_MODE_STATE_TOPIC = "preset_mode_state_topic"
CONF_PRESET_MODE_COMMAND_TOPIC = "preset_mode_command_topic"
CONF_PRESET_MODE_VALUE_TEMPLATE = "preset_mode_value_template"
CONF_PRESET_MODE_COMMAND_TEMPLATE = "preset_mode_command_template"
CONF_PRESET_MODES_LIST = "preset_modes"
CONF_PAYLOAD_RESET_PRESET_MODE = "payload_reset_preset_mode"
CONF_SPEED_STATE_TOPIC = "speed_state_topic"
CONF_SPEED_COMMAND_TOPIC = "speed_command_topic"
CONF_SPEED_VALUE_TEMPLATE = "speed_value_template"
CONF_OSCILLATION_STATE_TOPIC = "oscillation_state_topic"
CONF_OSCILLATION_COMMAND_TOPIC = "oscillation_command_topic"
CONF_OSCILLATION_VALUE_TEMPLATE = "oscillation_value_template"
CONF_OSCILLATION_COMMAND_TEMPLATE = "oscillation_command_template"
CONF_PAYLOAD_OSCILLATION_ON = "payload_oscillation_on"
CONF_PAYLOAD_OSCILLATION_OFF = "payload_oscillation_off"
CONF_PAYLOAD_OFF_SPEED = "payload_off_speed"
CONF_PAYLOAD_LOW_SPEED = "payload_low_speed"
CONF_PAYLOAD_MEDIUM_SPEED = "payload_medium_speed"
CONF_PAYLOAD_HIGH_SPEED = "payload_high_speed"
CONF_SPEED_LIST = "speeds"
DEFAULT_NAME = "MQTT Fan"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_PAYLOAD_RESET = "None"
DEFAULT_OPTIMISTIC = False
DEFAULT_SPEED_RANGE_MIN = 1
DEFAULT_SPEED_RANGE_MAX = 100
OSCILLATE_ON_PAYLOAD = "oscillate_on"
OSCILLATE_OFF_PAYLOAD = "oscillate_off"
MQTT_FAN_ATTRIBUTES_BLOCKED = frozenset(
{
fan.ATTR_DIRECTION,
fan.ATTR_OSCILLATING,
fan.ATTR_PERCENTAGE_STEP,
fan.ATTR_PERCENTAGE,
fan.ATTR_PRESET_MODE,
fan.ATTR_PRESET_MODES,
}
)
_LOGGER = logging.getLogger(__name__)
def valid_speed_range_configuration(config):
if config.get(CONF_SPEED_RANGE_MIN) == 0:
raise ValueError("speed_range_min must be > 0")
if config.get(CONF_SPEED_RANGE_MIN) >= config.get(CONF_SPEED_RANGE_MAX):
raise ValueError("speed_range_max must be > speed_range_min")
return config
def valid_preset_mode_configuration(config):
if config.get(CONF_PAYLOAD_RESET_PRESET_MODE) in config.get(CONF_PRESET_MODES_LIST):
raise ValueError("preset_modes must not contain payload_reset_preset_mode")
return config
_PLATFORM_SCHEMA_BASE = MQTT_RW_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_OSCILLATION_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_PERCENTAGE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PERCENTAGE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PERCENTAGE_VALUE_TEMPLATE): cv.template,
vol.Inclusive(
CONF_PRESET_MODE_COMMAND_TOPIC, "preset_modes"
): valid_publish_topic,
vol.Inclusive(
CONF_PRESET_MODES_LIST, "preset_modes", default=[]
): cv.ensure_list,
vol.Optional(CONF_PRESET_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_PRESET_MODE_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_PRESET_MODE_VALUE_TEMPLATE): cv.template,
vol.Optional(
CONF_SPEED_RANGE_MIN, default=DEFAULT_SPEED_RANGE_MIN
): cv.positive_int,
vol.Optional(
CONF_SPEED_RANGE_MAX, default=DEFAULT_SPEED_RANGE_MAX
): cv.positive_int,
vol.Optional(
CONF_PAYLOAD_RESET_PERCENTAGE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(
CONF_PAYLOAD_RESET_PRESET_MODE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_OFF, default=OSCILLATE_OFF_PAYLOAD
): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_ON, default=OSCILLATE_ON_PAYLOAD
): cv.string,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): valid_publish_topic,
vol.Optional(CONF_SPEED_STATE_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema)
PLATFORM_SCHEMA = vol.All(
cv.PLATFORM_SCHEMA.extend(_PLATFORM_SCHEMA_BASE.schema),
valid_speed_range_configuration,
valid_preset_mode_configuration,
warn_for_legacy_schema(fan.DOMAIN),
)
PLATFORM_SCHEMA_MODERN = vol.All(
_PLATFORM_SCHEMA_BASE,
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
DISCOVERY_SCHEMA = vol.All(
cv.removed(CONF_PAYLOAD_HIGH_SPEED),
cv.removed(CONF_PAYLOAD_LOW_SPEED),
cv.removed(CONF_PAYLOAD_MEDIUM_SPEED),
cv.removed(CONF_SPEED_COMMAND_TOPIC),
cv.removed(CONF_SPEED_LIST),
cv.removed(CONF_SPEED_STATE_TOPIC),
cv.removed(CONF_SPEED_VALUE_TEMPLATE),
_PLATFORM_SCHEMA_BASE.extend({}, extra=vol.REMOVE_EXTRA),
valid_speed_range_configuration,
valid_preset_mode_configuration,
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
await async_setup_platform_helper(
hass, fan.DOMAIN, config, async_add_entities, _async_setup_entity
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
await asyncio.gather(
*(
_async_setup_entity(hass, async_add_entities, config, config_entry)
for config in await async_get_platform_config_from_yaml(
hass, fan.DOMAIN, PLATFORM_SCHEMA_MODERN
)
)
)
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, fan.DOMAIN, setup, DISCOVERY_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
async_add_entities([MqttFan(hass, config, config_entry, discovery_data)])
class MqttFan(MqttEntity, FanEntity):
_entity_id_format = fan.ENTITY_ID_FORMAT
_attributes_extra_blocked = MQTT_FAN_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
self._state = None
self._percentage = None
self._preset_mode = None
self._oscillation = None
self._supported_features = 0
self._topic = None
self._payload = None
self._value_templates = None
self._command_templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_percentage = None
self._optimistic_preset_mode = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
return DISCOVERY_SCHEMA
def _setup_from_config(self, config):
self._speed_range = (
config.get(CONF_SPEED_RANGE_MIN),
config.get(CONF_SPEED_RANGE_MAX),
)
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_PERCENTAGE_STATE_TOPIC,
CONF_PERCENTAGE_COMMAND_TOPIC,
CONF_PRESET_MODE_STATE_TOPIC,
CONF_PRESET_MODE_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._value_templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_VALUE_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_VALUE_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_VALUE_TEMPLATE),
}
self._command_templates = {
CONF_STATE: config.get(CONF_COMMAND_TEMPLATE),
ATTR_PERCENTAGE: config.get(CONF_PERCENTAGE_COMMAND_TEMPLATE),
ATTR_PRESET_MODE: config.get(CONF_PRESET_MODE_COMMAND_TEMPLATE),
ATTR_OSCILLATING: config.get(CONF_OSCILLATION_COMMAND_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"OSCILLATE_ON_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_ON],
"OSCILLATE_OFF_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_OFF],
"PERCENTAGE_RESET": config[CONF_PAYLOAD_RESET_PERCENTAGE],
"PRESET_MODE_RESET": config[CONF_PAYLOAD_RESET_PRESET_MODE],
}
self._feature_percentage = CONF_PERCENTAGE_COMMAND_TOPIC in config
self._feature_preset_mode = CONF_PRESET_MODE_COMMAND_TOPIC in config
if self._feature_preset_mode:
self._preset_modes = config[CONF_PRESET_MODES_LIST]
else:
self._preset_modes = []
self._speed_count = (
min(int_states_in_range(self._speed_range), 100)
if self._feature_percentage
else 100
)
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None
)
self._optimistic_percentage = (
optimistic or self._topic[CONF_PERCENTAGE_STATE_TOPIC] is None
)
self._optimistic_preset_mode = (
optimistic or self._topic[CONF_PRESET_MODE_STATE_TOPIC] is None
)
self._supported_features = 0
self._supported_features |= (
self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is not None
and FanEntityFeature.OSCILLATE
)
if self._feature_percentage:
self._supported_features |= FanEntityFeature.SET_SPEED
if self._feature_preset_mode:
self._supported_features |= FanEntityFeature.PRESET_MODE
for key, tpl in self._command_templates.items():
self._command_templates[key] = MqttCommandTemplate(
tpl, entity=self
).async_render
for key, tpl in self._value_templates.items():
self._value_templates[key] = MqttValueTemplate(
tpl,
entity=self,
).async_render_with_possible_json_value
def _prepare_subscribe_topics(self):
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
payload = self._value_templates[CONF_STATE](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty state from '%s'", msg.topic)
return
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
elif payload == PAYLOAD_NONE:
self._state = None
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
@callback
@log_messages(self.hass, self.entity_id)
def percentage_received(msg):
rendered_percentage_payload = self._value_templates[ATTR_PERCENTAGE](
msg.payload
)
if not rendered_percentage_payload:
_LOGGER.debug("Ignoring empty speed from '%s'", msg.topic)
return
if rendered_percentage_payload == self._payload["PERCENTAGE_RESET"]:
self._percentage = None
self.async_write_ha_state()
return
try:
percentage = ranged_value_to_percentage(
self._speed_range, int(rendered_percentage_payload)
)
except ValueError:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
if percentage < 0 or percentage > 100:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid speed within the speed range",
msg.payload,
msg.topic,
rendered_percentage_payload,
)
return
self._percentage = percentage
self.async_write_ha_state()
if self._topic[CONF_PERCENTAGE_STATE_TOPIC] is not None:
topics[CONF_PERCENTAGE_STATE_TOPIC] = {
"topic": self._topic[CONF_PERCENTAGE_STATE_TOPIC],
"msg_callback": percentage_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._percentage = None
@callback
@log_messages(self.hass, self.entity_id)
def preset_mode_received(msg):
preset_mode = self._value_templates[ATTR_PRESET_MODE](msg.payload)
if preset_mode == self._payload["PRESET_MODE_RESET"]:
self._preset_mode = None
self.async_write_ha_state()
return
if not preset_mode:
_LOGGER.debug("Ignoring empty preset_mode from '%s'", msg.topic)
return
if preset_mode not in self.preset_modes:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid preset mode",
msg.payload,
msg.topic,
preset_mode,
)
return
self._preset_mode = preset_mode
self.async_write_ha_state()
if self._topic[CONF_PRESET_MODE_STATE_TOPIC] is not None:
topics[CONF_PRESET_MODE_STATE_TOPIC] = {
"topic": self._topic[CONF_PRESET_MODE_STATE_TOPIC],
"msg_callback": preset_mode_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._preset_mode = None
@callback
@log_messages(self.hass, self.entity_id)
def oscillation_received(msg):
payload = self._value_templates[ATTR_OSCILLATING](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty oscillation from '%s'", msg.topic)
return
if payload == self._payload["OSCILLATE_ON_PAYLOAD"]:
self._oscillation = True
elif payload == self._payload["OSCILLATE_OFF_PAYLOAD"]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
"topic": self._topic[CONF_OSCILLATION_STATE_TOPIC],
"msg_callback": oscillation_received,
"qos": self._config[CONF_QOS],
"encoding": self._config[CONF_ENCODING] or None,
}
self._oscillation = False
self._sub_state = subscription.async_prepare_subscribe_topics(
self.hass, self._sub_state, topics
)
async def _subscribe_topics(self):
await subscription.async_subscribe_topics(self.hass, self._sub_state)
@property
def assumed_state(self):
return self._optimistic
@property
def is_on(self) -> bool | None:
return self._state
@property
def percentage(self):
return self._percentage
@property
def preset_mode(self):
return self._preset_mode
@property
def preset_modes(self) -> list:
return self._preset_modes
@property
def supported_features(self) -> int:
return self._supported_features
@property
def speed_count(self) -> int:
return self._speed_count
@property
def oscillating(self):
return self._oscillation
async def async_turn_on(
self,
percentage: int = None,
preset_mode: str = None,
**kwargs,
) -> None:
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_ON"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if percentage:
await self.async_set_percentage(percentage)
if preset_mode:
await self.async_set_preset_mode(preset_mode)
if self._optimistic:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_OFF"])
await self.async_publish(
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
self._state = False
self.async_write_ha_state()
async def async_set_percentage(self, percentage: int) -> None:
percentage_payload = math.ceil(
percentage_to_ranged_value(self._speed_range, percentage)
)
mqtt_payload = self._command_templates[ATTR_PERCENTAGE](percentage_payload)
await self.async_publish(
self._topic[CONF_PERCENTAGE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_percentage:
self._percentage = percentage
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
self._valid_preset_mode_or_raise(preset_mode)
mqtt_payload = self._command_templates[ATTR_PRESET_MODE](preset_mode)
await self.async_publish(
self._topic[CONF_PRESET_MODE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_preset_mode:
self._preset_mode = preset_mode
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
if oscillating:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_ON_PAYLOAD"]
)
else:
mqtt_payload = self._command_templates[ATTR_OSCILLATING](
self._payload["OSCILLATE_OFF_PAYLOAD"]
)
await self.async_publish(
self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
| true | true |
f72b0d77ba36f92d632793f510174fe192e614ec | 390 | py | Python | src/utils.py | CarlSchader/poker-api | 446c036367fdb75f5b0fd7f93f347d839bbf71b6 | [
"MIT"
] | null | null | null | src/utils.py | CarlSchader/poker-api | 446c036367fdb75f5b0fd7f93f347d839bbf71b6 | [
"MIT"
] | null | null | null | src/utils.py | CarlSchader/poker-api | 446c036367fdb75f5b0fd7f93f347d839bbf71b6 | [
"MIT"
] | null | null | null | import functools
def dict_cmp(x, y, key):
if str(x[key]) > str(y[key]):
return 1
elif str(x[key]) < str(y[key]):
return -1
else:
return 0
def sort_dict(dictionary, cmp_func):
arr = []
for key in dictionary:
arr.append((key, dictionary[key]))
arr.sort(key=functools.cmp_to_key(lambda x, y : cmp_func(x[1], y[1])))
return arr | 22.941176 | 74 | 0.574359 | import functools
def dict_cmp(x, y, key):
if str(x[key]) > str(y[key]):
return 1
elif str(x[key]) < str(y[key]):
return -1
else:
return 0
def sort_dict(dictionary, cmp_func):
arr = []
for key in dictionary:
arr.append((key, dictionary[key]))
arr.sort(key=functools.cmp_to_key(lambda x, y : cmp_func(x[1], y[1])))
return arr | true | true |
f72b0e39b7ac8a2190ca5bc480dd257ebdc5b8a6 | 290 | py | Python | generate/partial-header/dataclass/annotation.py | kurusugawa-computer/annofab-api-python-client | 9920e0745f1ee8ea79c26e26a61013b415351982 | [
"MIT"
] | 17 | 2019-05-04T04:24:28.000Z | 2021-12-14T02:43:24.000Z | generate/partial-header/dataclass/annotation.py | kurusugawa-computer/annofab-api-python-client | 9920e0745f1ee8ea79c26e26a61013b415351982 | [
"MIT"
] | 214 | 2019-05-13T01:07:28.000Z | 2022-03-28T20:02:34.000Z | generate/partial-header/dataclass/annotation.py | kurusugawa-computer/annofab-api-python-client | 9920e0745f1ee8ea79c26e26a61013b415351982 | [
"MIT"
] | 2 | 2019-06-15T05:01:50.000Z | 2019-07-04T02:29:55.000Z | from annofabapi.models import (
AdditionalDataDefinitionType,
AnnotationDataHoldingType,
AnnotationType,
InternationalizationMessage,
TaskPhase,
TaskStatus,
)
AnnotationData = Union[str, Dict[str, Any]]
FullAnnotationData = Any
AdditionalDataValue = Dict[str, Any]
| 22.307692 | 43 | 0.762069 | from annofabapi.models import (
AdditionalDataDefinitionType,
AnnotationDataHoldingType,
AnnotationType,
InternationalizationMessage,
TaskPhase,
TaskStatus,
)
AnnotationData = Union[str, Dict[str, Any]]
FullAnnotationData = Any
AdditionalDataValue = Dict[str, Any]
| true | true |
f72b0f69d6927ac9a2071aaa3c495a33948a8289 | 7,677 | py | Python | homeassistant/components/epson/media_player.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 30,023 | 2016-04-13T10:17:53.000Z | 2020-03-02T12:56:31.000Z | homeassistant/components/epson/media_player.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 24,710 | 2016-04-13T08:27:26.000Z | 2020-03-02T12:59:13.000Z | homeassistant/components/epson/media_player.py | mtarjoianu/core | 44e9146463ac505eb3d1c0651ad126cb25c28a54 | [
"Apache-2.0"
] | 11,956 | 2016-04-13T18:42:31.000Z | 2020-03-02T09:32:12.000Z | """Support for Epson projector."""
from __future__ import annotations
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Epson projector from a config entry."""
entry_id = config_entry.entry_id
unique_id = config_entry.unique_id
projector = hass.data[DOMAIN][entry_id]
projector_entity = EpsonProjectorMediaPlayer(
projector=projector,
name=config_entry.title,
unique_id=unique_id,
entry=config_entry,
)
async_add_entities([projector_entity], True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
"""Representation of Epson Projector Device."""
_attr_supported_features = (
MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_STEP
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.PREVIOUS_TRACK
)
def __init__(self, projector, name, unique_id, entry):
"""Initialize entity to control Epson projector."""
self._projector = projector
self._entry = entry
self._name = name
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def set_unique_id(self):
"""Set unique id for projector config entry."""
_LOGGER.debug("Setting unique_id for projector")
if self._unique_id:
return False
if uid := await self._projector.get_serial_number():
self.hass.config_entries.async_update_entry(self._entry, unique_id=uid)
registry = async_get_entity_registry(self.hass)
old_entity_id = registry.async_get_entity_id(
"media_player", DOMAIN, self._entry.entry_id
)
if old_entity_id is not None:
registry.async_update_entity(old_entity_id, new_unique_id=uid)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return True
async def async_update(self):
"""Update state of device."""
power_state = await self._projector.get_power()
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
if await self.set_unique_id():
return
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def device_info(self) -> DeviceInfo | None:
"""Get attributes about the device."""
if not self._unique_id:
return None
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer="Epson",
model="Epson",
name="Epson projector",
via_device=(DOMAIN, self._unique_id),
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return unique ID."""
return self._unique_id
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def available(self):
"""Return if projector is available."""
return self._available
async def async_turn_on(self):
"""Turn on epson."""
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
"""Turn off epson."""
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Get current input sources."""
return self._source
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
async def select_cmode(self, cmode):
"""Set color mode in Epson."""
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
"""Select input source."""
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
"""Mute (true) or unmute (false) sound."""
await self._projector.send_command(MUTE)
async def async_volume_up(self):
"""Increase volume."""
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
"""Decrease volume."""
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
"""Play media via Epson."""
await self._projector.send_command(PLAY)
async def async_media_pause(self):
"""Pause media via Epson."""
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
"""Skip to next."""
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
"""Skip to previous."""
await self._projector.send_command(BACK)
@property
def extra_state_attributes(self):
"""Return device specific state attributes."""
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
| 31.592593 | 88 | 0.655464 | from __future__ import annotations
import logging
from epson_projector.const import (
BACK,
BUSY,
CMODE,
CMODE_LIST,
CMODE_LIST_SET,
DEFAULT_SOURCES,
EPSON_CODES,
FAST,
INV_SOURCES,
MUTE,
PAUSE,
PLAY,
POWER,
SOURCE,
SOURCE_LIST,
STATE_UNAVAILABLE as EPSON_STATE_UNAVAILABLE,
TURN_OFF,
TURN_ON,
VOL_DOWN,
VOL_UP,
VOLUME,
)
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerEntity,
MediaPlayerEntityFeature,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.entity_registry import async_get as async_get_entity_registry
from .const import ATTR_CMODE, DOMAIN, SERVICE_SELECT_CMODE
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
entry_id = config_entry.entry_id
unique_id = config_entry.unique_id
projector = hass.data[DOMAIN][entry_id]
projector_entity = EpsonProjectorMediaPlayer(
projector=projector,
name=config_entry.title,
unique_id=unique_id,
entry=config_entry,
)
async_add_entities([projector_entity], True)
platform = entity_platform.async_get_current_platform()
platform.async_register_entity_service(
SERVICE_SELECT_CMODE,
{vol.Required(ATTR_CMODE): vol.All(cv.string, vol.Any(*CMODE_LIST_SET))},
SERVICE_SELECT_CMODE,
)
class EpsonProjectorMediaPlayer(MediaPlayerEntity):
_attr_supported_features = (
MediaPlayerEntityFeature.TURN_ON
| MediaPlayerEntityFeature.TURN_OFF
| MediaPlayerEntityFeature.SELECT_SOURCE
| MediaPlayerEntityFeature.VOLUME_MUTE
| MediaPlayerEntityFeature.VOLUME_STEP
| MediaPlayerEntityFeature.NEXT_TRACK
| MediaPlayerEntityFeature.PREVIOUS_TRACK
)
def __init__(self, projector, name, unique_id, entry):
self._projector = projector
self._entry = entry
self._name = name
self._available = False
self._cmode = None
self._source_list = list(DEFAULT_SOURCES.values())
self._source = None
self._volume = None
self._state = None
self._unique_id = unique_id
async def set_unique_id(self):
_LOGGER.debug("Setting unique_id for projector")
if self._unique_id:
return False
if uid := await self._projector.get_serial_number():
self.hass.config_entries.async_update_entry(self._entry, unique_id=uid)
registry = async_get_entity_registry(self.hass)
old_entity_id = registry.async_get_entity_id(
"media_player", DOMAIN, self._entry.entry_id
)
if old_entity_id is not None:
registry.async_update_entity(old_entity_id, new_unique_id=uid)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._entry.entry_id)
)
return True
async def async_update(self):
power_state = await self._projector.get_power()
_LOGGER.debug("Projector status: %s", power_state)
if not power_state or power_state == EPSON_STATE_UNAVAILABLE:
self._available = False
return
self._available = True
if power_state == EPSON_CODES[POWER]:
self._state = STATE_ON
if await self.set_unique_id():
return
self._source_list = list(DEFAULT_SOURCES.values())
cmode = await self._projector.get_property(CMODE)
self._cmode = CMODE_LIST.get(cmode, self._cmode)
source = await self._projector.get_property(SOURCE)
self._source = SOURCE_LIST.get(source, self._source)
volume = await self._projector.get_property(VOLUME)
if volume:
self._volume = volume
elif power_state == BUSY:
self._state = STATE_ON
else:
self._state = STATE_OFF
@property
def device_info(self) -> DeviceInfo | None:
if not self._unique_id:
return None
return DeviceInfo(
identifiers={(DOMAIN, self._unique_id)},
manufacturer="Epson",
model="Epson",
name="Epson projector",
via_device=(DOMAIN, self._unique_id),
)
@property
def name(self):
return self._name
@property
def unique_id(self):
return self._unique_id
@property
def state(self):
return self._state
@property
def available(self):
return self._available
async def async_turn_on(self):
if self._state == STATE_OFF:
await self._projector.send_command(TURN_ON)
async def async_turn_off(self):
if self._state == STATE_ON:
await self._projector.send_command(TURN_OFF)
@property
def source_list(self):
return self._source_list
@property
def source(self):
return self._source
@property
def volume_level(self):
return self._volume
async def select_cmode(self, cmode):
await self._projector.send_command(CMODE_LIST_SET[cmode])
async def async_select_source(self, source):
selected_source = INV_SOURCES[source]
await self._projector.send_command(selected_source)
async def async_mute_volume(self, mute):
await self._projector.send_command(MUTE)
async def async_volume_up(self):
await self._projector.send_command(VOL_UP)
async def async_volume_down(self):
await self._projector.send_command(VOL_DOWN)
async def async_media_play(self):
await self._projector.send_command(PLAY)
async def async_media_pause(self):
await self._projector.send_command(PAUSE)
async def async_media_next_track(self):
await self._projector.send_command(FAST)
async def async_media_previous_track(self):
await self._projector.send_command(BACK)
@property
def extra_state_attributes(self):
if self._cmode is None:
return {}
return {ATTR_CMODE: self._cmode}
| true | true |
f72b112a2a1fc41633e4d17514fd8efbba957fc5 | 299 | py | Python | World 02/Class 13/ex050.py | DanielRios549/PythonExcercises | acb44a7cc383e8534f47bc59235d9cc04fd83880 | [
"MIT"
] | 6 | 2021-05-04T22:09:16.000Z | 2022-01-08T20:27:39.000Z | World 02/Class 13/ex050.py | DanielRios549/PythonExercises | acb44a7cc383e8534f47bc59235d9cc04fd83880 | [
"MIT"
] | null | null | null | World 02/Class 13/ex050.py | DanielRios549/PythonExercises | acb44a7cc383e8534f47bc59235d9cc04fd83880 | [
"MIT"
] | null | null | null | '''
Get 6 integer numbers and show the sum of the even ones. Do not consider the odd ones.
'''
sum_number = 0
for count in range(0, 6):
number = int(input('Choose a number: '))
if number % 2 == 0:
sum_number += number
print(f'The sum of all even numbers equals {sum_number}')
| 23 | 90 | 0.64214 | sum_number = 0
for count in range(0, 6):
number = int(input('Choose a number: '))
if number % 2 == 0:
sum_number += number
print(f'The sum of all even numbers equals {sum_number}')
| true | true |
f72b11dd0aed4940421d5a68bccc46f47f43bad2 | 6,464 | py | Python | integrations/tensorflow/e2e/conv_test.py | rise-lang/iree | 46ad3fe392d38ce3df6eff7826cc1ab331a40b72 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/conv_test.py | rise-lang/iree | 46ad3fe392d38ce3df6eff7826cc1ab331a40b72 | [
"Apache-2.0"
] | null | null | null | integrations/tensorflow/e2e/conv_test.py | rise-lang/iree | 46ad3fe392d38ce3df6eff7826cc1ab331a40b72 | [
"Apache-2.0"
] | null | null | null | # Lint as: python3
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyiree.tf.support import tf_test_utils
import tensorflow.compat.v2 as tf
class Conv2dModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_1451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_2451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_2451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([3, 2, 2, 1], tf.float32),
])
def conv2d_1452x3221_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 2], tf.float32),
])
def conv2d_1451x1112_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([1, 1, 2, 2], tf.float32),
])
def conv2d_1452x1122_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_2452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf_test_utils.compile_module(Conv2dModule)
class ConvTest(tf_test_utils.SavedModelTestCase):
def test_id_batch_size_1(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_1451x1111_valid(i, k)
r.print().assert_all_close()
def test_id_batch_size_2(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_2451x1111_valid(i, k)
r.print().assert_all_close()
def test_asym_kernel(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_valid(i, k)
r.print().assert_all_close()
def test_padding(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_same(i, k)
r.print().assert_all_close()
def test_batched_padding(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_2451x2311_same(i, k)
r.print().assert_all_close()
def test_feature_reduce(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.ones([3, 2, 2, 1], dtype=np.float32)
r = self.get_module().conv2d_1452x3221_same(i, k)
r.print().assert_all_close()
def test_feature_inflate(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.arange(2, dtype=np.float32).reshape([1, 1, 1, 2])
r = self.get_module().conv2d_1451x1112_same(i, k)
r.print().assert_all_close()
def test_feature_mix(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(4, dtype=np.float32).reshape([1, 1, 2, 2])
r = self.get_module().conv2d_1452x1122_same(i, k)
r.print().assert_all_close()
def test_feature_padded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_same(i, k)
r.print().assert_all_close()
def test_feature_unpadded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_valid(i, k)
r.print().assert_all_close()
def test_batched_feature_unpadded(self):
i = np.arange(80, dtype=np.float32).reshape([2, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_2452x2223_valid(i, k)
r.print().assert_all_close()
if __name__ == "__main__":
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf.test.main()
| 36.937143 | 79 | 0.642481 |
import numpy as np
from pyiree.tf.support import tf_test_utils
import tensorflow.compat.v2 as tf
class Conv2dModule(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_1451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 1], tf.float32),
])
def conv2d_2451x1111_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_1451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 1], tf.float32),
tf.TensorSpec([2, 3, 1, 1], tf.float32),
])
def conv2d_2451x2311_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([3, 2, 2, 1], tf.float32),
])
def conv2d_1452x3221_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 1], tf.float32),
tf.TensorSpec([1, 1, 1, 2], tf.float32),
])
def conv2d_1451x1112_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([1, 1, 2, 2], tf.float32),
])
def conv2d_1452x1122_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_same(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "SAME", name="result")
@tf.function(input_signature=[
tf.TensorSpec([1, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_1452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf.function(input_signature=[
tf.TensorSpec([2, 4, 5, 2], tf.float32),
tf.TensorSpec([2, 2, 2, 3], tf.float32),
])
def conv2d_2452x2223_valid(self, img, kernel):
return tf.nn.conv2d(img, kernel, [1, 1, 1, 1], "VALID", name="result")
@tf_test_utils.compile_module(Conv2dModule)
class ConvTest(tf_test_utils.SavedModelTestCase):
def test_id_batch_size_1(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_1451x1111_valid(i, k)
r.print().assert_all_close()
def test_id_batch_size_2(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.ones([1, 1, 1, 1], dtype=np.float32)
r = self.get_module().conv2d_2451x1111_valid(i, k)
r.print().assert_all_close()
def test_asym_kernel(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_valid(i, k)
r.print().assert_all_close()
def test_padding(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_1451x2311_same(i, k)
r.print().assert_all_close()
def test_batched_padding(self):
i = np.arange(40, dtype=np.float32).reshape([2, 4, 5, 1])
k = np.array([[1, 4, 2], [-2, 0, 1]], dtype=np.float32).reshape(2, 3, 1, 1)
r = self.get_module().conv2d_2451x2311_same(i, k)
r.print().assert_all_close()
def test_feature_reduce(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.ones([3, 2, 2, 1], dtype=np.float32)
r = self.get_module().conv2d_1452x3221_same(i, k)
r.print().assert_all_close()
def test_feature_inflate(self):
i = np.arange(20, dtype=np.float32).reshape([1, 4, 5, 1])
k = np.arange(2, dtype=np.float32).reshape([1, 1, 1, 2])
r = self.get_module().conv2d_1451x1112_same(i, k)
r.print().assert_all_close()
def test_feature_mix(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(4, dtype=np.float32).reshape([1, 1, 2, 2])
r = self.get_module().conv2d_1452x1122_same(i, k)
r.print().assert_all_close()
def test_feature_padded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_same(i, k)
r.print().assert_all_close()
def test_feature_unpadded(self):
i = np.arange(40, dtype=np.float32).reshape([1, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_1452x2223_valid(i, k)
r.print().assert_all_close()
def test_batched_feature_unpadded(self):
i = np.arange(80, dtype=np.float32).reshape([2, 4, 5, 2])
k = np.arange(24, dtype=np.float32).reshape([2, 2, 2, 3])
r = self.get_module().conv2d_2452x2223_valid(i, k)
r.print().assert_all_close()
if __name__ == "__main__":
if hasattr(tf, "enable_v2_behavior"):
tf.enable_v2_behavior()
tf.test.main()
| true | true |
f72b11f17c30ee2bf5b08acdb6fe426742382acd | 26,208 | py | Python | lib/ansiblelint/utils.py | gdoucet/ansible-lint | 07b5194b44f6979480f57b96ea3d196fb59c0e7c | [
"MIT"
] | 1 | 2020-01-21T04:30:10.000Z | 2020-01-21T04:30:10.000Z | lib/ansiblelint/utils.py | gdoucet/ansible-lint | 07b5194b44f6979480f57b96ea3d196fb59c0e7c | [
"MIT"
] | null | null | null | lib/ansiblelint/utils.py | gdoucet/ansible-lint | 07b5194b44f6979480f57b96ea3d196fb59c0e7c | [
"MIT"
] | null | null | null | # Copyright (c) 2013-2014 Will Thames <will@thames.id.au>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import glob
import imp
import os
from itertools import product
import six
from ansible import constants
from ansible.errors import AnsibleError
try:
# Try to import the Ansible 2 module first, it's the future-proof one
from ansible.parsing.splitter import split_args
except ImportError:
# Fallback on the Ansible 1.9 module
from ansible.module_utils.splitter import split_args
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
import ruamel.yaml
try:
from ansible.utils import parse_yaml_from_file
from ansible.utils import path_dwim
from ansible.utils.template import template as ansible_template
from ansible.utils import module_finder
module_loader = module_finder
ANSIBLE_VERSION = 1
except ImportError:
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.errors import AnsibleParserError
ANSIBLE_VERSION = 2
# ansible-lint doesn't need/want to know about encrypted secrets, but it needs
# Ansible 2.3+ allows encrypted secrets within yaml files, so we pass a string
# as the password to enable such yaml files to be opened and parsed successfully.
DEFAULT_VAULT_PASSWORD = 'x'
def parse_yaml_from_file(filepath):
dl = DataLoader()
if hasattr(dl, 'set_vault_password'):
dl.set_vault_password(DEFAULT_VAULT_PASSWORD)
return dl.load_from_file(filepath)
def path_dwim(basedir, given):
dl = DataLoader()
dl.set_basedir(basedir)
return dl.path_dwim(given)
def ansible_template(basedir, varname, templatevars, **kwargs):
dl = DataLoader()
dl.set_basedir(basedir)
templar = Templar(dl, variables=templatevars)
return templar.template(varname, **kwargs)
try:
from ansible.plugins import module_loader
except ImportError:
from ansible.plugins.loader import module_loader
LINE_NUMBER_KEY = '__line__'
FILENAME_KEY = '__file__'
VALID_KEYS = [
'name', 'action', 'when', 'async', 'poll', 'notify',
'first_available_file', 'include', 'include_tasks', 'import_tasks', 'import_playbook',
'tags', 'register', 'ignore_errors', 'delegate_to',
'local_action', 'transport', 'remote_user', 'sudo',
'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'always_run',
'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay',
'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
'become', 'become_user', 'become_method', FILENAME_KEY,
]
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
'tasks': 'task',
'handlers': 'handler',
'pre_tasks': 'task',
'post_tasks': 'task',
'block': 'meta',
'rescue': 'meta',
'always': 'meta',
}
def load_plugins(directory):
result = []
fh = None
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
try:
fh, filename, desc = imp.find_module(pluginname, [directory])
mod = imp.load_module(pluginname, fh, filename, desc)
obj = getattr(mod, pluginname)()
result.append(obj)
finally:
if fh:
fh.close()
return result
def tokenize(line):
tokens = line.lstrip().split(" ")
if tokens[0] == '-':
tokens = tokens[1:]
if tokens[0] == 'action:' or tokens[0] == 'local_action:':
tokens = tokens[1:]
command = tokens[0].replace(":", "")
args = list()
kwargs = dict()
nonkvfound = False
for arg in tokens[1:]:
if "=" in arg and not nonkvfound:
kv = arg.split("=", 1)
kwargs[kv[0]] = kv[1]
else:
nonkvfound = True
args.append(arg)
return (command, args, kwargs)
def _playbook_items(pb_data):
if isinstance(pb_data, dict):
return pb_data.items()
elif not pb_data:
return []
else:
return [item for play in pb_data for item in play.items()]
def find_children(playbook, playbook_dir):
if not os.path.exists(playbook[0]):
return []
if playbook[1] == 'role':
playbook_ds = {'roles': [{'role': playbook[0]}]}
else:
try:
playbook_ds = parse_yaml_from_file(playbook[0])
except AnsibleError as e:
raise SystemExit(str(e))
results = []
basedir = os.path.dirname(playbook[0])
items = _playbook_items(playbook_ds)
for item in items:
for child in play_children(basedir, item, playbook[1], playbook_dir):
if "$" in child['path'] or "{{" in child['path']:
continue
valid_tokens = list()
for token in split_args(child['path']):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
results.append({
'path': path_dwim(basedir, path),
'type': child['type']
})
return results
def template(basedir, value, vars, fail_on_undefined=False, **kwargs):
try:
value = ansible_template(os.path.abspath(basedir), value, vars,
**dict(kwargs, fail_on_undefined=fail_on_undefined))
# Hack to skip the following exception when using to_json filter on a variable.
# I guess the filter doesn't like empty vars...
except (AnsibleError, ValueError):
# templating failed, so just keep value as is.
pass
return value
def play_children(basedir, item, parent_type, playbook_dir):
delegate_map = {
'tasks': _taskshandlers_children,
'pre_tasks': _taskshandlers_children,
'post_tasks': _taskshandlers_children,
'block': _taskshandlers_children,
'include': _include_children,
'import_playbook': _include_children,
'roles': _roles_children,
'dependencies': _roles_children,
'handlers': _taskshandlers_children,
'include_tasks': _include_children,
'import_tasks': _include_children,
}
(k, v) = item
play_library = os.path.join(os.path.abspath(basedir), 'library')
_load_library_if_exists(play_library)
if k in delegate_map:
if v:
v = template(os.path.abspath(basedir),
v,
dict(playbook_dir=os.path.abspath(basedir)),
fail_on_undefined=False)
return delegate_map[k](basedir, k, v, parent_type)
return []
def _include_children(basedir, k, v, parent_type):
# handle include: filename.yml tags=blah
(command, args, kwargs) = tokenize("{0}: {1}".format(k, v))
result = path_dwim(basedir, args[0])
if not os.path.exists(result) and not basedir.endswith('tasks'):
result = path_dwim(os.path.join(basedir, '..', 'tasks'), v)
return [{'path': result, 'type': parent_type}]
def _taskshandlers_children(basedir, k, v, parent_type):
results = []
for th in v:
if 'include' in th:
append_children(th['include'], basedir, k, parent_type, results)
elif 'include_tasks' in th:
append_children(th['include_tasks'], basedir, k, parent_type, results)
elif 'import_playbook' in th:
append_children(th['import_playbook'], basedir, k, parent_type, results)
elif 'import_tasks' in th:
append_children(th['import_tasks'], basedir, k, parent_type, results)
elif 'import_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')], parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'include_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')],
parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'block' in th:
results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type))
if 'rescue' in th:
results.extend(_taskshandlers_children(basedir, k, th['rescue'], parent_type))
if 'always' in th:
results.extend(_taskshandlers_children(basedir, k, th['always'], parent_type))
return results
def append_children(taskhandler, basedir, k, parent_type, results):
# when taskshandlers_children is called for playbooks, the
# actual type of the included tasks is the section containing the
# include, e.g. tasks, pre_tasks, or handlers.
if parent_type == 'playbook':
playbook_section = k
else:
playbook_section = parent_type
results.append({
'path': path_dwim(basedir, taskhandler),
'type': playbook_section
})
def _roles_children(basedir, k, v, parent_type, main='main'):
results = []
for role in v:
if isinstance(role, dict):
if 'role' in role or 'name' in role:
if 'tags' not in role or 'skip_ansible_lint' not in role['tags']:
results.extend(_look_for_role_files(basedir,
role.get('role', role.get('name')),
main=main))
elif k != 'dependencies':
raise SystemExit('role dict {0} does not contain a "role" '
'or "name" key'.format(role))
else:
results.extend(_look_for_role_files(basedir, role, main=main))
return results
def _load_library_if_exists(path):
if os.path.exists(path):
module_loader.add_directory(path)
def _rolepath(basedir, role):
role_path = None
possible_paths = [
# if included from a playbook
path_dwim(basedir, os.path.join('roles', role)),
path_dwim(basedir, role),
# if included from roles/[role]/meta/main.yml
path_dwim(
basedir, os.path.join('..', '..', '..', 'roles', role)
),
path_dwim(basedir, os.path.join('..', '..', role)),
]
if constants.DEFAULT_ROLES_PATH:
search_locations = constants.DEFAULT_ROLES_PATH
if isinstance(search_locations, six.string_types):
search_locations = search_locations.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(path_dwim(loc, role))
possible_paths.append(path_dwim(basedir, ''))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
if role_path:
_load_library_if_exists(os.path.join(role_path, 'library'))
return role_path
def _look_for_role_files(basedir, role, main='main'):
role_path = _rolepath(basedir, role)
if not role_path:
return []
results = []
for th in ['tasks', 'handlers', 'meta']:
current_path = os.path.join(role_path, th)
for dir, subdirs, files in os.walk(current_path):
for file in files:
file_ignorecase = file.lower()
if file_ignorecase.endswith(('.yml', '.yaml')):
thpath = os.path.join(dir, file)
results.append({'path': thpath, 'type': th})
return results
def rolename(filepath):
idx = filepath.find('roles/')
if idx < 0:
return ''
role = filepath[idx+6:]
role = role[:role.find('/')]
return role
def _kv_to_dict(v):
(command, args, kwargs) = tokenize(v)
return (dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs))
def normalize_task_v2(task):
'''Ensures tasks have an action key and strings are converted to python objects'''
result = dict()
mod_arg_parser = ModuleArgsParser(task)
try:
action, arguments, result['delegate_to'] = mod_arg_parser.parse()
except AnsibleParserError as e:
try:
task_info = "%s:%s" % (task[FILENAME_KEY], task[LINE_NUMBER_KEY])
del task[FILENAME_KEY]
del task[LINE_NUMBER_KEY]
except KeyError:
task_info = "Unknown"
try:
import pprint
pp = pprint.PrettyPrinter(indent=2)
task_pprint = pp.pformat(task)
except ImportError:
task_pprint = task
raise SystemExit("Couldn't parse task at %s (%s)\n%s" % (task_info, e.message, task_pprint))
# denormalize shell -> command conversion
if '_uses_shell' in arguments:
action = 'shell'
del(arguments['_uses_shell'])
for (k, v) in list(task.items()):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action:
# we don't want to re-assign these values, which were
# determined by the ModuleArgsParser() above
continue
else:
result[k] = v
result['action'] = dict(__ansible_module__=action)
if '_raw_params' in arguments:
result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ')
del(arguments['_raw_params'])
else:
result['action']['__ansible_arguments__'] = list()
if 'argv' in arguments and not result['action']['__ansible_arguments__']:
result['action']['__ansible_arguments__'] = arguments['argv']
del(arguments['argv'])
result['action'].update(arguments)
return result
def normalize_task_v1(task):
result = dict()
for (k, v) in task.items():
if k in VALID_KEYS or k.startswith('with_'):
if k == 'local_action' or k == 'action':
if not isinstance(v, dict):
v = _kv_to_dict(v)
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
else:
result[k] = v
else:
if isinstance(v, six.string_types):
v = _kv_to_dict(k + ' ' + v)
elif not v:
v = dict(__ansible_module__=k)
else:
if isinstance(v, dict):
v.update(dict(__ansible_module__=k))
else:
if k == '__line__':
# Keep the line number stored
result[k] = v
continue
else:
# Tasks that include playbooks (rather than task files)
# can get here
# https://github.com/ansible/ansible-lint/issues/138
raise RuntimeError("Was not expecting value %s of type %s for key %s\n"
"Task: %s. Check the syntax of your playbook using "
"ansible-playbook --syntax-check" %
(str(v), type(v), k, str(task)))
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
if 'module' in result['action']:
# this happens when a task uses
# local_action:
# module: ec2
# etc...
result['action']['__ansible_module__'] = result['action']['module']
del(result['action']['module'])
if 'args' in result:
result['action'].update(result.get('args'))
del(result['args'])
return result
def normalize_task(task, filename):
ansible_action_type = task.get('__ansible_action_type__', 'task')
if '__ansible_action_type__' in task:
del(task['__ansible_action_type__'])
if ANSIBLE_VERSION < 2:
task = normalize_task_v1(task)
else:
task = normalize_task_v2(task)
task[FILENAME_KEY] = filename
task['__ansible_action_type__'] = ansible_action_type
return task
def task_to_str(task):
name = task.get("name")
if name:
return name
action = task.get("action")
args = " ".join([u"{0}={1}".format(k, v) for (k, v) in action.items()
if k not in ["__ansible_module__", "__ansible_arguments__"]] +
action.get("__ansible_arguments__"))
return u"{0} {1}".format(action["__ansible_module__"], args)
def extract_from_list(blocks, candidates):
results = list()
for block in blocks:
for candidate in candidates:
if isinstance(block, dict) and candidate in block:
if isinstance(block[candidate], list):
results.extend(add_action_type(block[candidate], candidate))
elif block[candidate] is not None:
raise RuntimeError(
"Key '%s' defined, but bad value: '%s'" %
(candidate, str(block[candidate])))
return results
def add_action_type(actions, action_type):
results = list()
for action in actions:
action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
results.append(action)
return results
def get_action_tasks(yaml, file):
tasks = list()
if file['type'] in ['tasks', 'handlers']:
tasks = add_action_type(yaml, file['type'])
else:
tasks.extend(extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']))
# Add sub-elements of block/rescue/always to tasks list
tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always']))
# Remove block/rescue/always elements from tasks list
block_rescue_always = ('block', 'rescue', 'always')
tasks[:] = [task for task in tasks if all(k not in task for k in block_rescue_always)]
return [task for task in tasks if
set(['include', 'include_tasks',
'import_playbook', 'import_tasks']).isdisjoint(task.keys())]
def get_normalized_tasks(yaml, file):
tasks = get_action_tasks(yaml, file)
res = []
for task in tasks:
# An empty `tags` block causes `None` to be returned if
# the `or []` is not present - `task.get('tags', [])`
# does not suffice.
if 'skip_ansible_lint' in (task.get('tags') or []):
# No need to normalize_task is we are skipping it.
continue
res.append(normalize_task(task, file['path']))
return res
def parse_yaml_linenumbers(data, filename):
"""Parses yaml as ansible.utils.parse_yaml but with linenumbers.
The line numbers are stored in each node's LINE_NUMBER_KEY key.
"""
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
if ANSIBLE_VERSION < 2:
mapping = Constructor.construct_mapping(loader, node, deep=deep)
else:
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, '__line__'):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number
mapping[FILENAME_KEY] = filename
return mapping
try:
if ANSIBLE_VERSION < 2:
loader = yaml.Loader(data)
else:
import inspect
kwargs = {}
if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:
kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD
loader = AnsibleLoader(data, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e)))
return data
def get_first_cmd_arg(task):
try:
if 'cmd' in task['action']:
first_cmd_arg = task['action']['cmd'].split()[0]
else:
first_cmd_arg = task['action']['__ansible_arguments__'][0]
except IndexError:
return None
return first_cmd_arg
def append_skipped_rules(pyyaml_data, file_text, file_type):
"""Append 'skipped_rules' to individual tasks or single metadata block.
For a file, uses 2nd parser (ruamel.yaml) to pull comments out of
yaml subsets, check for '# noqa' skipped rules, and append any skips to the
original parser (pyyaml) data relied on by remainder of ansible-lint.
:param pyyaml_data: file text parsed via ansible and pyyaml.
:param file_text: raw file text.
:param file_type: type of file: tasks, handlers or meta.
:returns: original pyyaml_data altered with a 'skipped_rules' list added
to individual tasks, or added to the single metadata block.
"""
try:
yaml_skip = _append_skipped_rules(pyyaml_data, file_text, file_type)
except RuntimeError as exc:
# Notify user of skip error, do not stop, do not change exit code
print('Error trying to append skipped rules: {!r}'.format(exc))
return pyyaml_data
return yaml_skip
def _append_skipped_rules(pyyaml_data, file_text, file_type):
# parse file text using 2nd parser library
yaml = ruamel.yaml.YAML()
ruamel_data = yaml.load(file_text)
if file_type == 'meta':
pyyaml_data[0]['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_data)
return pyyaml_data
# create list of blocks of tasks or nested tasks
if file_type in ('tasks', 'handlers'):
ruamel_task_blocks = ruamel_data
pyyaml_task_blocks = pyyaml_data
elif file_type == 'playbook':
try:
pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data)
ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data)
except (AttributeError, TypeError):
# TODO(awcrosby): running ansible-lint on any .yml file will
# assume it is a playbook, check needs to be added higher in the
# call stack, and can remove this except
return pyyaml_data
else:
raise RuntimeError('Unexpected file type: {}'.format(file_type))
# get tasks from blocks of tasks
pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks)
ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks)
# append skipped_rules for each task
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks):
if pyyaml_task.get('name') != ruamel_task.get('name'):
raise RuntimeError('Error in matching skip comment to a task')
pyyaml_task['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_task)
return pyyaml_data
def _get_task_blocks_from_playbook(playbook):
"""Return parts of playbook that contains tasks, and nested tasks.
:param playbook: playbook yaml from yaml parser.
:returns: list of task dictionaries.
"""
PLAYBOOK_TASK_KEYWORDS = [
'tasks',
'pre_tasks',
'post_tasks',
'handlers',
]
task_blocks = []
for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS):
task_blocks.extend(play.get(key, []))
return task_blocks
def _get_tasks_from_blocks(task_blocks):
"""Get list of tasks from list made of tasks and nested tasks."""
NESTED_TASK_KEYS = [
'block',
'always',
'rescue',
]
def get_nested_tasks(task):
return (
subtask
for k in NESTED_TASK_KEYS if k in task
for subtask in task[k]
)
for task in task_blocks:
for sub_task in get_nested_tasks(task):
yield sub_task
yield task
def _get_rule_skips_from_yaml(yaml_input):
"""Travese yaml for comments with rule skips and return list of rules."""
def traverse_yaml(obj):
yaml_comment_obj_strs.append(str(obj.ca.items))
if isinstance(obj, dict):
for key, val in obj.items():
if isinstance(val, (dict, list)):
traverse_yaml(val)
elif isinstance(obj, list):
for e in obj:
if isinstance(e, (dict, list)):
traverse_yaml(e)
else:
return
yaml_comment_obj_strs = []
traverse_yaml(yaml_input)
rule_id_list = []
for comment_obj_str in yaml_comment_obj_strs:
for line in comment_obj_str.split('\\n'):
rule_id_list.extend(get_rule_skips_from_line(line))
return rule_id_list
def get_rule_skips_from_line(line):
rule_id_list = []
if '# noqa' in line:
noqa_text = line.split('# noqa')[1]
rule_id_list = noqa_text.split()
return rule_id_list
| 35.657143 | 100 | 0.618781 |
import glob
import imp
import os
from itertools import product
import six
from ansible import constants
from ansible.errors import AnsibleError
try:
from ansible.parsing.splitter import split_args
except ImportError:
# Fallback on the Ansible 1.9 module
from ansible.module_utils.splitter import split_args
import yaml
from yaml.composer import Composer
from yaml.constructor import Constructor
import ruamel.yaml
try:
from ansible.utils import parse_yaml_from_file
from ansible.utils import path_dwim
from ansible.utils.template import template as ansible_template
from ansible.utils import module_finder
module_loader = module_finder
ANSIBLE_VERSION = 1
except ImportError:
from ansible.parsing.dataloader import DataLoader
from ansible.template import Templar
from ansible.parsing.mod_args import ModuleArgsParser
from ansible.parsing.yaml.constructor import AnsibleConstructor
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.errors import AnsibleParserError
ANSIBLE_VERSION = 2
# ansible-lint doesn't need/want to know about encrypted secrets, but it needs
DEFAULT_VAULT_PASSWORD = 'x'
def parse_yaml_from_file(filepath):
dl = DataLoader()
if hasattr(dl, 'set_vault_password'):
dl.set_vault_password(DEFAULT_VAULT_PASSWORD)
return dl.load_from_file(filepath)
def path_dwim(basedir, given):
dl = DataLoader()
dl.set_basedir(basedir)
return dl.path_dwim(given)
def ansible_template(basedir, varname, templatevars, **kwargs):
dl = DataLoader()
dl.set_basedir(basedir)
templar = Templar(dl, variables=templatevars)
return templar.template(varname, **kwargs)
try:
from ansible.plugins import module_loader
except ImportError:
from ansible.plugins.loader import module_loader
LINE_NUMBER_KEY = '__line__'
FILENAME_KEY = '__file__'
VALID_KEYS = [
'name', 'action', 'when', 'async', 'poll', 'notify',
'first_available_file', 'include', 'include_tasks', 'import_tasks', 'import_playbook',
'tags', 'register', 'ignore_errors', 'delegate_to',
'local_action', 'transport', 'remote_user', 'sudo',
'sudo_user', 'sudo_pass', 'when', 'connection', 'environment', 'args', 'always_run',
'any_errors_fatal', 'changed_when', 'failed_when', 'check_mode', 'delay',
'retries', 'until', 'su', 'su_user', 'su_pass', 'no_log', 'run_once',
'become', 'become_user', 'become_method', FILENAME_KEY,
]
BLOCK_NAME_TO_ACTION_TYPE_MAP = {
'tasks': 'task',
'handlers': 'handler',
'pre_tasks': 'task',
'post_tasks': 'task',
'block': 'meta',
'rescue': 'meta',
'always': 'meta',
}
def load_plugins(directory):
result = []
fh = None
for pluginfile in glob.glob(os.path.join(directory, '[A-Za-z]*.py')):
pluginname = os.path.basename(pluginfile.replace('.py', ''))
try:
fh, filename, desc = imp.find_module(pluginname, [directory])
mod = imp.load_module(pluginname, fh, filename, desc)
obj = getattr(mod, pluginname)()
result.append(obj)
finally:
if fh:
fh.close()
return result
def tokenize(line):
tokens = line.lstrip().split(" ")
if tokens[0] == '-':
tokens = tokens[1:]
if tokens[0] == 'action:' or tokens[0] == 'local_action:':
tokens = tokens[1:]
command = tokens[0].replace(":", "")
args = list()
kwargs = dict()
nonkvfound = False
for arg in tokens[1:]:
if "=" in arg and not nonkvfound:
kv = arg.split("=", 1)
kwargs[kv[0]] = kv[1]
else:
nonkvfound = True
args.append(arg)
return (command, args, kwargs)
def _playbook_items(pb_data):
if isinstance(pb_data, dict):
return pb_data.items()
elif not pb_data:
return []
else:
return [item for play in pb_data for item in play.items()]
def find_children(playbook, playbook_dir):
if not os.path.exists(playbook[0]):
return []
if playbook[1] == 'role':
playbook_ds = {'roles': [{'role': playbook[0]}]}
else:
try:
playbook_ds = parse_yaml_from_file(playbook[0])
except AnsibleError as e:
raise SystemExit(str(e))
results = []
basedir = os.path.dirname(playbook[0])
items = _playbook_items(playbook_ds)
for item in items:
for child in play_children(basedir, item, playbook[1], playbook_dir):
if "$" in child['path'] or "{{" in child['path']:
continue
valid_tokens = list()
for token in split_args(child['path']):
if '=' in token:
break
valid_tokens.append(token)
path = ' '.join(valid_tokens)
results.append({
'path': path_dwim(basedir, path),
'type': child['type']
})
return results
def template(basedir, value, vars, fail_on_undefined=False, **kwargs):
try:
value = ansible_template(os.path.abspath(basedir), value, vars,
**dict(kwargs, fail_on_undefined=fail_on_undefined))
except (AnsibleError, ValueError):
# templating failed, so just keep value as is.
pass
return value
def play_children(basedir, item, parent_type, playbook_dir):
delegate_map = {
'tasks': _taskshandlers_children,
'pre_tasks': _taskshandlers_children,
'post_tasks': _taskshandlers_children,
'block': _taskshandlers_children,
'include': _include_children,
'import_playbook': _include_children,
'roles': _roles_children,
'dependencies': _roles_children,
'handlers': _taskshandlers_children,
'include_tasks': _include_children,
'import_tasks': _include_children,
}
(k, v) = item
play_library = os.path.join(os.path.abspath(basedir), 'library')
_load_library_if_exists(play_library)
if k in delegate_map:
if v:
v = template(os.path.abspath(basedir),
v,
dict(playbook_dir=os.path.abspath(basedir)),
fail_on_undefined=False)
return delegate_map[k](basedir, k, v, parent_type)
return []
def _include_children(basedir, k, v, parent_type):
# handle include: filename.yml tags=blah
(command, args, kwargs) = tokenize("{0}: {1}".format(k, v))
result = path_dwim(basedir, args[0])
if not os.path.exists(result) and not basedir.endswith('tasks'):
result = path_dwim(os.path.join(basedir, '..', 'tasks'), v)
return [{'path': result, 'type': parent_type}]
def _taskshandlers_children(basedir, k, v, parent_type):
results = []
for th in v:
if 'include' in th:
append_children(th['include'], basedir, k, parent_type, results)
elif 'include_tasks' in th:
append_children(th['include_tasks'], basedir, k, parent_type, results)
elif 'import_playbook' in th:
append_children(th['import_playbook'], basedir, k, parent_type, results)
elif 'import_tasks' in th:
append_children(th['import_tasks'], basedir, k, parent_type, results)
elif 'import_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')], parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'include_role' in th:
th = normalize_task_v2(th)
results.extend(_roles_children(basedir, k, [th['action'].get('name')],
parent_type,
main=th['action'].get('tasks_from', 'main')))
elif 'block' in th:
results.extend(_taskshandlers_children(basedir, k, th['block'], parent_type))
if 'rescue' in th:
results.extend(_taskshandlers_children(basedir, k, th['rescue'], parent_type))
if 'always' in th:
results.extend(_taskshandlers_children(basedir, k, th['always'], parent_type))
return results
def append_children(taskhandler, basedir, k, parent_type, results):
# when taskshandlers_children is called for playbooks, the
# actual type of the included tasks is the section containing the
# include, e.g. tasks, pre_tasks, or handlers.
if parent_type == 'playbook':
playbook_section = k
else:
playbook_section = parent_type
results.append({
'path': path_dwim(basedir, taskhandler),
'type': playbook_section
})
def _roles_children(basedir, k, v, parent_type, main='main'):
results = []
for role in v:
if isinstance(role, dict):
if 'role' in role or 'name' in role:
if 'tags' not in role or 'skip_ansible_lint' not in role['tags']:
results.extend(_look_for_role_files(basedir,
role.get('role', role.get('name')),
main=main))
elif k != 'dependencies':
raise SystemExit('role dict {0} does not contain a "role" '
'or "name" key'.format(role))
else:
results.extend(_look_for_role_files(basedir, role, main=main))
return results
def _load_library_if_exists(path):
if os.path.exists(path):
module_loader.add_directory(path)
def _rolepath(basedir, role):
role_path = None
possible_paths = [
# if included from a playbook
path_dwim(basedir, os.path.join('roles', role)),
path_dwim(basedir, role),
# if included from roles/[role]/meta/main.yml
path_dwim(
basedir, os.path.join('..', '..', '..', 'roles', role)
),
path_dwim(basedir, os.path.join('..', '..', role)),
]
if constants.DEFAULT_ROLES_PATH:
search_locations = constants.DEFAULT_ROLES_PATH
if isinstance(search_locations, six.string_types):
search_locations = search_locations.split(os.pathsep)
for loc in search_locations:
loc = os.path.expanduser(loc)
possible_paths.append(path_dwim(loc, role))
possible_paths.append(path_dwim(basedir, ''))
for path_option in possible_paths:
if os.path.isdir(path_option):
role_path = path_option
break
if role_path:
_load_library_if_exists(os.path.join(role_path, 'library'))
return role_path
def _look_for_role_files(basedir, role, main='main'):
role_path = _rolepath(basedir, role)
if not role_path:
return []
results = []
for th in ['tasks', 'handlers', 'meta']:
current_path = os.path.join(role_path, th)
for dir, subdirs, files in os.walk(current_path):
for file in files:
file_ignorecase = file.lower()
if file_ignorecase.endswith(('.yml', '.yaml')):
thpath = os.path.join(dir, file)
results.append({'path': thpath, 'type': th})
return results
def rolename(filepath):
idx = filepath.find('roles/')
if idx < 0:
return ''
role = filepath[idx+6:]
role = role[:role.find('/')]
return role
def _kv_to_dict(v):
(command, args, kwargs) = tokenize(v)
return (dict(__ansible_module__=command, __ansible_arguments__=args, **kwargs))
def normalize_task_v2(task):
result = dict()
mod_arg_parser = ModuleArgsParser(task)
try:
action, arguments, result['delegate_to'] = mod_arg_parser.parse()
except AnsibleParserError as e:
try:
task_info = "%s:%s" % (task[FILENAME_KEY], task[LINE_NUMBER_KEY])
del task[FILENAME_KEY]
del task[LINE_NUMBER_KEY]
except KeyError:
task_info = "Unknown"
try:
import pprint
pp = pprint.PrettyPrinter(indent=2)
task_pprint = pp.pformat(task)
except ImportError:
task_pprint = task
raise SystemExit("Couldn't parse task at %s (%s)\n%s" % (task_info, e.message, task_pprint))
if '_uses_shell' in arguments:
action = 'shell'
del(arguments['_uses_shell'])
for (k, v) in list(task.items()):
if k in ('action', 'local_action', 'args', 'delegate_to') or k == action:
# determined by the ModuleArgsParser() above
continue
else:
result[k] = v
result['action'] = dict(__ansible_module__=action)
if '_raw_params' in arguments:
result['action']['__ansible_arguments__'] = arguments['_raw_params'].split(' ')
del(arguments['_raw_params'])
else:
result['action']['__ansible_arguments__'] = list()
if 'argv' in arguments and not result['action']['__ansible_arguments__']:
result['action']['__ansible_arguments__'] = arguments['argv']
del(arguments['argv'])
result['action'].update(arguments)
return result
def normalize_task_v1(task):
result = dict()
for (k, v) in task.items():
if k in VALID_KEYS or k.startswith('with_'):
if k == 'local_action' or k == 'action':
if not isinstance(v, dict):
v = _kv_to_dict(v)
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
else:
result[k] = v
else:
if isinstance(v, six.string_types):
v = _kv_to_dict(k + ' ' + v)
elif not v:
v = dict(__ansible_module__=k)
else:
if isinstance(v, dict):
v.update(dict(__ansible_module__=k))
else:
if k == '__line__':
# Keep the line number stored
result[k] = v
continue
else:
# Tasks that include playbooks (rather than task files)
# can get here
# https://github.com/ansible/ansible-lint/issues/138
raise RuntimeError("Was not expecting value %s of type %s for key %s\n"
"Task: %s. Check the syntax of your playbook using "
"ansible-playbook --syntax-check" %
(str(v), type(v), k, str(task)))
v['__ansible_arguments__'] = v.get('__ansible_arguments__', list())
result['action'] = v
if 'module' in result['action']:
# this happens when a task uses
# local_action:
# module: ec2
# etc...
result['action']['__ansible_module__'] = result['action']['module']
del(result['action']['module'])
if 'args' in result:
result['action'].update(result.get('args'))
del(result['args'])
return result
def normalize_task(task, filename):
ansible_action_type = task.get('__ansible_action_type__', 'task')
if '__ansible_action_type__' in task:
del(task['__ansible_action_type__'])
if ANSIBLE_VERSION < 2:
task = normalize_task_v1(task)
else:
task = normalize_task_v2(task)
task[FILENAME_KEY] = filename
task['__ansible_action_type__'] = ansible_action_type
return task
def task_to_str(task):
name = task.get("name")
if name:
return name
action = task.get("action")
args = " ".join([u"{0}={1}".format(k, v) for (k, v) in action.items()
if k not in ["__ansible_module__", "__ansible_arguments__"]] +
action.get("__ansible_arguments__"))
return u"{0} {1}".format(action["__ansible_module__"], args)
def extract_from_list(blocks, candidates):
results = list()
for block in blocks:
for candidate in candidates:
if isinstance(block, dict) and candidate in block:
if isinstance(block[candidate], list):
results.extend(add_action_type(block[candidate], candidate))
elif block[candidate] is not None:
raise RuntimeError(
"Key '%s' defined, but bad value: '%s'" %
(candidate, str(block[candidate])))
return results
def add_action_type(actions, action_type):
results = list()
for action in actions:
action['__ansible_action_type__'] = BLOCK_NAME_TO_ACTION_TYPE_MAP[action_type]
results.append(action)
return results
def get_action_tasks(yaml, file):
tasks = list()
if file['type'] in ['tasks', 'handlers']:
tasks = add_action_type(yaml, file['type'])
else:
tasks.extend(extract_from_list(yaml, ['tasks', 'handlers', 'pre_tasks', 'post_tasks']))
# Add sub-elements of block/rescue/always to tasks list
tasks.extend(extract_from_list(tasks, ['block', 'rescue', 'always']))
# Remove block/rescue/always elements from tasks list
block_rescue_always = ('block', 'rescue', 'always')
tasks[:] = [task for task in tasks if all(k not in task for k in block_rescue_always)]
return [task for task in tasks if
set(['include', 'include_tasks',
'import_playbook', 'import_tasks']).isdisjoint(task.keys())]
def get_normalized_tasks(yaml, file):
tasks = get_action_tasks(yaml, file)
res = []
for task in tasks:
# An empty `tags` block causes `None` to be returned if
# the `or []` is not present - `task.get('tags', [])`
# does not suffice.
if 'skip_ansible_lint' in (task.get('tags') or []):
# No need to normalize_task is we are skipping it.
continue
res.append(normalize_task(task, file['path']))
return res
def parse_yaml_linenumbers(data, filename):
def compose_node(parent, index):
# the line number where the previous token has ended (plus empty lines)
line = loader.line
node = Composer.compose_node(loader, parent, index)
node.__line__ = line + 1
return node
def construct_mapping(node, deep=False):
if ANSIBLE_VERSION < 2:
mapping = Constructor.construct_mapping(loader, node, deep=deep)
else:
mapping = AnsibleConstructor.construct_mapping(loader, node, deep=deep)
if hasattr(node, '__line__'):
mapping[LINE_NUMBER_KEY] = node.__line__
else:
mapping[LINE_NUMBER_KEY] = mapping._line_number
mapping[FILENAME_KEY] = filename
return mapping
try:
if ANSIBLE_VERSION < 2:
loader = yaml.Loader(data)
else:
import inspect
kwargs = {}
if 'vault_password' in inspect.getargspec(AnsibleLoader.__init__).args:
kwargs['vault_password'] = DEFAULT_VAULT_PASSWORD
loader = AnsibleLoader(data, **kwargs)
loader.compose_node = compose_node
loader.construct_mapping = construct_mapping
data = loader.get_single_data()
except (yaml.parser.ParserError, yaml.scanner.ScannerError) as e:
raise SystemExit("Failed to parse YAML in %s: %s" % (filename, str(e)))
return data
def get_first_cmd_arg(task):
try:
if 'cmd' in task['action']:
first_cmd_arg = task['action']['cmd'].split()[0]
else:
first_cmd_arg = task['action']['__ansible_arguments__'][0]
except IndexError:
return None
return first_cmd_arg
def append_skipped_rules(pyyaml_data, file_text, file_type):
try:
yaml_skip = _append_skipped_rules(pyyaml_data, file_text, file_type)
except RuntimeError as exc:
# Notify user of skip error, do not stop, do not change exit code
print('Error trying to append skipped rules: {!r}'.format(exc))
return pyyaml_data
return yaml_skip
def _append_skipped_rules(pyyaml_data, file_text, file_type):
# parse file text using 2nd parser library
yaml = ruamel.yaml.YAML()
ruamel_data = yaml.load(file_text)
if file_type == 'meta':
pyyaml_data[0]['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_data)
return pyyaml_data
# create list of blocks of tasks or nested tasks
if file_type in ('tasks', 'handlers'):
ruamel_task_blocks = ruamel_data
pyyaml_task_blocks = pyyaml_data
elif file_type == 'playbook':
try:
pyyaml_task_blocks = _get_task_blocks_from_playbook(pyyaml_data)
ruamel_task_blocks = _get_task_blocks_from_playbook(ruamel_data)
except (AttributeError, TypeError):
# TODO(awcrosby): running ansible-lint on any .yml file will
# assume it is a playbook, check needs to be added higher in the
# call stack, and can remove this except
return pyyaml_data
else:
raise RuntimeError('Unexpected file type: {}'.format(file_type))
# get tasks from blocks of tasks
pyyaml_tasks = _get_tasks_from_blocks(pyyaml_task_blocks)
ruamel_tasks = _get_tasks_from_blocks(ruamel_task_blocks)
# append skipped_rules for each task
for ruamel_task, pyyaml_task in zip(ruamel_tasks, pyyaml_tasks):
if pyyaml_task.get('name') != ruamel_task.get('name'):
raise RuntimeError('Error in matching skip comment to a task')
pyyaml_task['skipped_rules'] = _get_rule_skips_from_yaml(ruamel_task)
return pyyaml_data
def _get_task_blocks_from_playbook(playbook):
PLAYBOOK_TASK_KEYWORDS = [
'tasks',
'pre_tasks',
'post_tasks',
'handlers',
]
task_blocks = []
for play, key in product(playbook, PLAYBOOK_TASK_KEYWORDS):
task_blocks.extend(play.get(key, []))
return task_blocks
def _get_tasks_from_blocks(task_blocks):
NESTED_TASK_KEYS = [
'block',
'always',
'rescue',
]
def get_nested_tasks(task):
return (
subtask
for k in NESTED_TASK_KEYS if k in task
for subtask in task[k]
)
for task in task_blocks:
for sub_task in get_nested_tasks(task):
yield sub_task
yield task
def _get_rule_skips_from_yaml(yaml_input):
def traverse_yaml(obj):
yaml_comment_obj_strs.append(str(obj.ca.items))
if isinstance(obj, dict):
for key, val in obj.items():
if isinstance(val, (dict, list)):
traverse_yaml(val)
elif isinstance(obj, list):
for e in obj:
if isinstance(e, (dict, list)):
traverse_yaml(e)
else:
return
yaml_comment_obj_strs = []
traverse_yaml(yaml_input)
rule_id_list = []
for comment_obj_str in yaml_comment_obj_strs:
for line in comment_obj_str.split('\\n'):
rule_id_list.extend(get_rule_skips_from_line(line))
return rule_id_list
def get_rule_skips_from_line(line):
rule_id_list = []
if '
noqa_text = line.split('
rule_id_list = noqa_text.split()
return rule_id_list
| true | true |
f72b120e0e4865b2e5c26ca09713f83332de05bd | 43,459 | py | Python | kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | kubernetes_state/datadog_checks/kubernetes_state/kubernetes_state.py | tanner-bruce/integrations-core | 36337b84fefb73e94d4f1ee28aaeb669dc12fb59 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2016-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import re
import time
from collections import Counter, defaultdict
from copy import deepcopy
from six import iteritems
from datadog_checks.checks.openmetrics import OpenMetricsBaseCheck
from datadog_checks.config import is_affirmative
from datadog_checks.errors import CheckException
from datadog_checks.utils.common import to_string
try:
# this module is only available in agent 6
from datadog_agent import get_clustername
except ImportError:
def get_clustername():
return ""
METRIC_TYPES = ['counter', 'gauge']
# As case can vary depending on Kubernetes versions, we match the lowercase string
WHITELISTED_WAITING_REASONS = ['errimagepull', 'imagepullbackoff', 'crashloopbackoff', 'containercreating']
WHITELISTED_TERMINATED_REASONS = ['oomkilled', 'containercannotrun', 'error']
kube_labels_mapper = {
'namespace': 'kube_namespace',
'job': 'kube_job',
'cronjob': 'kube_cronjob',
'pod': 'pod_name',
'phase': 'pod_phase',
'daemonset': 'kube_daemon_set',
'replicationcontroller': 'kube_replication_controller',
'replicaset': 'kube_replica_set',
'statefulset ': 'kube_stateful_set',
'deployment': 'kube_deployment',
'container': 'kube_container_name',
'container_id': 'container_id',
'image': 'image_name',
}
class KubernetesState(OpenMetricsBaseCheck):
"""
Collect kube-state-metrics metrics in the Prometheus format
See https://github.com/kubernetes/kube-state-metrics
"""
class CronJobCount:
def __init__(self):
self.count = 0
self.previous_run_max_ts = 0
self.current_run_max_ts = 0
def set_previous_and_reset_current_ts(self):
if self.current_run_max_ts > 0:
self.previous_run_max_ts = self.current_run_max_ts
self.current_run_max_ts = 0
def update_current_ts_and_add_count(self, job_ts, count):
if job_ts > self.previous_run_max_ts and count > 0:
self.count += count
self.current_run_max_ts = max(self.current_run_max_ts, job_ts)
DEFAULT_METRIC_LIMIT = 0
def __init__(self, name, init_config, agentConfig, instances=None):
# We do not support more than one instance of kube-state-metrics
instance = instances[0]
kubernetes_state_instance = self._create_kubernetes_state_prometheus_instance(instance)
# First deprecation phase: we keep ksm labels by default
# Next iteration: remove ksm labels by default
# Last iteration: remove this option
self.keep_ksm_labels = is_affirmative(kubernetes_state_instance.get('keep_ksm_labels', True))
generic_instances = [kubernetes_state_instance]
super(KubernetesState, self).__init__(name, init_config, agentConfig, instances=generic_instances)
self.condition_to_status_positive = {'true': self.OK, 'false': self.CRITICAL, 'unknown': self.UNKNOWN}
self.condition_to_status_negative = {'true': self.CRITICAL, 'false': self.OK, 'unknown': self.UNKNOWN}
# Parameters for the count_objects_by_tags method
self.object_count_params = {
'kube_persistentvolume_status_phase': {
'metric_name': 'persistentvolumes.by_phase',
'allowed_labels': ['storageclass', 'phase'],
},
'kube_service_spec_type': {'metric_name': 'service.count', 'allowed_labels': ['namespace', 'type']},
}
self.METRIC_TRANSFORMERS = {
'kube_pod_status_phase': self.kube_pod_status_phase,
'kube_pod_container_status_waiting_reason': self.kube_pod_container_status_waiting_reason,
'kube_pod_container_status_terminated_reason': self.kube_pod_container_status_terminated_reason,
'kube_cronjob_next_schedule_time': self.kube_cronjob_next_schedule_time,
'kube_job_complete': self.kube_job_complete,
'kube_job_failed': self.kube_job_failed,
'kube_job_status_failed': self.kube_job_status_failed,
'kube_job_status_succeeded': self.kube_job_status_succeeded,
'kube_node_status_condition': self.kube_node_status_condition,
'kube_node_status_ready': self.kube_node_status_ready,
'kube_node_status_out_of_disk': self.kube_node_status_out_of_disk,
'kube_node_status_memory_pressure': self.kube_node_status_memory_pressure,
'kube_node_status_disk_pressure': self.kube_node_status_disk_pressure,
'kube_node_status_network_unavailable': self.kube_node_status_network_unavailable,
'kube_node_spec_unschedulable': self.kube_node_spec_unschedulable,
'kube_resourcequota': self.kube_resourcequota,
'kube_limitrange': self.kube_limitrange,
'kube_persistentvolume_status_phase': self.count_objects_by_tags,
'kube_service_spec_type': self.count_objects_by_tags,
}
# Handling cron jobs succeeded/failed counts
self.failed_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
self.succeeded_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
# Logic for Jobs
self.job_succeeded_count = defaultdict(int)
self.job_failed_count = defaultdict(int)
def check(self, instance):
endpoint = instance.get('kube_state_url')
scraper_config = self.config_map[endpoint]
self.process(scraper_config, metric_transformers=self.METRIC_TRANSFORMERS)
# Logic for Cron Jobs
for job_tags, job in iteritems(self.failed_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
for job_tags, job in iteritems(self.succeeded_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
# Logic for Jobs
for job_tags, job_count in iteritems(self.job_succeeded_count):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job_count, list(job_tags))
for job_tags, job_count in iteritems(self.job_failed_count):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job_count, list(job_tags))
def _filter_metric(self, metric, scraper_config):
if scraper_config['telemetry']:
# name is like "kube_pod_execution_duration"
name_part = metric.name.split("_", 3)
if len(name_part) < 2:
return False
family = name_part[1]
tags = ["resource_name:" + family]
for sample in metric.samples:
if "namespace" in sample[self.SAMPLE_LABELS]:
ns = sample[self.SAMPLE_LABELS]["namespace"]
tags.append("resource_namespace:" + ns)
break
self._send_telemetry_counter(
'collector.metrics.count', len(metric.samples), scraper_config, extra_tags=tags
)
# do not filter
return False
def _create_kubernetes_state_prometheus_instance(self, instance):
"""
Set up the kubernetes_state instance so it can be used in OpenMetricsBaseCheck
"""
ksm_instance = deepcopy(instance)
endpoint = instance.get('kube_state_url')
if endpoint is None:
raise CheckException("Unable to find kube_state_url in config file.")
extra_labels = ksm_instance.get('label_joins', {})
hostname_override = is_affirmative(ksm_instance.get('hostname_override', True))
ksm_instance.update(
{
'namespace': 'kubernetes_state',
'metrics': [
{
'kube_daemonset_status_current_number_scheduled': 'daemonset.scheduled',
'kube_daemonset_status_desired_number_scheduled': 'daemonset.desired',
'kube_daemonset_status_number_misscheduled': 'daemonset.misscheduled',
'kube_daemonset_status_number_ready': 'daemonset.ready',
'kube_daemonset_updated_number_scheduled': 'daemonset.updated',
'kube_deployment_spec_paused': 'deployment.paused',
'kube_deployment_spec_replicas': 'deployment.replicas_desired',
'kube_deployment_spec_strategy_rollingupdate_max_unavailable': 'deployment.rollingupdate.max_unavailable', # noqa: E501
'kube_deployment_status_replicas': 'deployment.replicas',
'kube_deployment_status_replicas_available': 'deployment.replicas_available',
'kube_deployment_status_replicas_unavailable': 'deployment.replicas_unavailable',
'kube_deployment_status_replicas_updated': 'deployment.replicas_updated',
'kube_endpoint_address_available': 'endpoint.address_available',
'kube_endpoint_address_not_ready': 'endpoint.address_not_ready',
'kube_endpoint_created': 'endpoint.created',
'kube_hpa_spec_min_replicas': 'hpa.min_replicas',
'kube_hpa_spec_max_replicas': 'hpa.max_replicas',
'kube_hpa_status_desired_replicas': 'hpa.desired_replicas',
'kube_hpa_status_current_replicas': 'hpa.current_replicas',
'kube_hpa_status_condition': 'hpa.condition',
'kube_node_info': 'node.count',
'kube_node_status_allocatable_cpu_cores': 'node.cpu_allocatable',
'kube_node_status_allocatable_memory_bytes': 'node.memory_allocatable',
'kube_node_status_allocatable_pods': 'node.pods_allocatable',
'kube_node_status_capacity_cpu_cores': 'node.cpu_capacity',
'kube_node_status_capacity_memory_bytes': 'node.memory_capacity',
'kube_node_status_capacity_pods': 'node.pods_capacity',
'kube_node_status_allocatable_nvidia_gpu_cards': 'node.gpu.cards_allocatable',
'kube_node_status_capacity_nvidia_gpu_cards': 'node.gpu.cards_capacity',
'kube_pod_container_status_terminated': 'container.terminated',
'kube_pod_container_status_waiting': 'container.waiting',
'kube_persistentvolumeclaim_status_phase': 'persistentvolumeclaim.status',
'kube_persistentvolumeclaim_resource_requests_storage_bytes': 'persistentvolumeclaim.request_storage', # noqa: E501
'kube_pod_container_resource_limits_cpu_cores': 'container.cpu_limit',
'kube_pod_container_resource_limits_memory_bytes': 'container.memory_limit',
'kube_pod_container_resource_requests_cpu_cores': 'container.cpu_requested',
'kube_pod_container_resource_requests_memory_bytes': 'container.memory_requested',
'kube_pod_container_status_ready': 'container.ready',
'kube_pod_container_status_restarts': 'container.restarts', # up to kube-state-metrics 1.1.x
'kube_pod_container_status_restarts_total': 'container.restarts', # noqa: E501, from kube-state-metrics 1.2.0
'kube_pod_container_status_running': 'container.running',
'kube_pod_container_resource_requests_nvidia_gpu_devices': 'container.gpu.request',
'kube_pod_container_resource_limits_nvidia_gpu_devices': 'container.gpu.limit',
'kube_pod_status_ready': 'pod.ready',
'kube_pod_status_scheduled': 'pod.scheduled',
'kube_poddisruptionbudget_status_current_healthy': 'pdb.pods_healthy',
'kube_poddisruptionbudget_status_desired_healthy': 'pdb.pods_desired',
'kube_poddisruptionbudget_status_pod_disruptions_allowed': 'pdb.disruptions_allowed',
'kube_poddisruptionbudget_status_expected_pods': 'pdb.pods_total',
'kube_replicaset_spec_replicas': 'replicaset.replicas_desired',
'kube_replicaset_status_fully_labeled_replicas': 'replicaset.fully_labeled_replicas',
'kube_replicaset_status_ready_replicas': 'replicaset.replicas_ready',
'kube_replicaset_status_replicas': 'replicaset.replicas',
'kube_replicationcontroller_spec_replicas': 'replicationcontroller.replicas_desired',
'kube_replicationcontroller_status_available_replicas': 'replicationcontroller.replicas_available', # noqa: E501
'kube_replicationcontroller_status_fully_labeled_replicas': 'replicationcontroller.fully_labeled_replicas', # noqa: E501
'kube_replicationcontroller_status_ready_replicas': 'replicationcontroller.replicas_ready',
'kube_replicationcontroller_status_replicas': 'replicationcontroller.replicas',
'kube_statefulset_replicas': 'statefulset.replicas_desired',
'kube_statefulset_status_replicas': 'statefulset.replicas',
'kube_statefulset_status_replicas_current': 'statefulset.replicas_current',
'kube_statefulset_status_replicas_ready': 'statefulset.replicas_ready',
'kube_statefulset_status_replicas_updated': 'statefulset.replicas_updated',
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_lowerbound': (
'vpa.lower_bound'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_target': (
'vpa.target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_uncappedtarget': (
'vpa.uncapped_target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_upperbound': (
'vpa.upperbound'
),
'kube_verticalpodautoscaler_spec_updatepolicy_updatemode': 'vpa.update_mode',
}
],
'ignore_metrics': [
# _info, _labels and _created don't convey any metric
'kube_cronjob_info',
'kube_cronjob_created',
'kube_daemonset_created',
'kube_deployment_created',
'kube_deployment_labels',
'kube_job_created',
'kube_job_info',
'kube_limitrange_created',
'kube_namespace_created',
'kube_namespace_labels',
'kube_node_created',
'kube_node_labels',
'kube_pod_created',
'kube_pod_container_info',
'kube_pod_info',
'kube_pod_owner',
'kube_pod_start_time',
'kube_pod_labels',
'kube_poddisruptionbudget_created',
'kube_replicaset_created',
'kube_replicationcontroller_created',
'kube_resourcequota_created',
'kube_replicaset_owner',
'kube_service_created',
'kube_service_info',
'kube_service_labels',
'kube_service_spec_external_ip',
'kube_service_status_load_balancer_ingress',
'kube_statefulset_labels',
'kube_statefulset_created',
'kube_statefulset_status_current_revision',
'kube_statefulset_status_update_revision',
# Already provided by the kubelet integration
'kube_pod_container_status_last_terminated_reason',
# _generation metrics are more metadata than metrics, no real use case for now
'kube_daemonset_metadata_generation',
'kube_deployment_metadata_generation',
'kube_deployment_status_observed_generation',
'kube_replicaset_metadata_generation',
'kube_replicaset_status_observed_generation',
'kube_replicationcontroller_metadata_generation',
'kube_replicationcontroller_status_observed_generation',
'kube_statefulset_metadata_generation',
'kube_statefulset_status_observed_generation',
'kube_hpa_metadata_generation',
# kube_node_status_phase and kube_namespace_status_phase have no use case as a service check
'kube_namespace_status_phase',
'kube_node_status_phase',
# These CronJob and Job metrics need use cases to determine how do implement
'kube_cronjob_status_active',
'kube_cronjob_status_last_schedule_time',
'kube_cronjob_spec_suspend',
'kube_cronjob_spec_starting_deadline_seconds',
'kube_job_spec_active_dealine_seconds',
'kube_job_spec_completions',
'kube_job_spec_parallelism',
'kube_job_status_active',
'kube_job_status_completion_time', # We could compute the duration=completion-start as a gauge
'kube_job_status_start_time',
'kube_verticalpodautoscaler_labels',
],
'label_joins': {
'kube_pod_info': {'label_to_match': 'pod', 'labels_to_get': ['node']},
'kube_pod_status_phase': {'label_to_match': 'pod', 'labels_to_get': ['phase']},
'kube_persistentvolume_info': {
'label_to_match': 'persistentvolume',
'labels_to_get': ['storageclass'],
},
'kube_persistentvolumeclaim_info': {
'label_to_match': 'persistentvolumeclaim',
'labels_to_get': ['storageclass'],
},
},
# Defaults that were set when kubernetes_state was based on PrometheusCheck
'send_monotonic_counter': ksm_instance.get('send_monotonic_counter', False),
'health_service_check': ksm_instance.get('health_service_check', False),
}
)
ksm_instance['prometheus_url'] = endpoint
ksm_instance['label_joins'].update(extra_labels)
if hostname_override:
ksm_instance['label_to_hostname'] = 'node'
clustername = get_clustername()
if clustername != "":
ksm_instance['label_to_hostname_suffix'] = "-" + clustername
if 'labels_mapper' in ksm_instance and not isinstance(ksm_instance['labels_mapper'], dict):
self.log.warning("Option labels_mapper should be a dictionary for %s", endpoint)
return ksm_instance
def _condition_to_service_check(self, sample, sc_name, mapping, tags=None):
"""
Some metrics contains conditions, labels that have "condition" as name and "true", "false", or "unknown"
as value. The metric value is expected to be a gauge equal to 0 or 1 in this case.
For example:
metric {
label { name: "condition", value: "true"
}
# other labels here
gauge { value: 1.0 }
}
This function evaluates metrics containing conditions and sends a service check
based on a provided condition->check mapping dict
"""
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1
condition = sample[self.SAMPLE_LABELS].get('condition')
if condition:
if condition in mapping:
self.service_check(sc_name, mapping[condition], tags=tags)
else:
self.log.debug("Unable to handle %s - unknown condition %s", sc_name, condition)
def _condition_to_tag_check(self, sample, base_sc_name, mapping, scraper_config, tags=None):
"""
Metrics from kube-state-metrics have changed
For example:
kube_node_status_condition{condition="Ready",node="ip-172-33-39-189.eu-west-1.compute",status="true"} 1
kube_node_status_condition{condition="OutOfDisk",node="ip-172-33-57-130.eu-west-1.compute",status="false"} 1
metric {
label { name: "condition", value: "true"
}
# other labels here
gauge { value: 1.0 }
}
This function evaluates metrics containing conditions and sends a service check
based on a provided condition->check mapping dict
"""
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1 and we are not processing the pod phase check
label_value, condition_map = self._get_metric_condition_map(base_sc_name, sample[self.SAMPLE_LABELS])
service_check_name = condition_map['service_check_name']
mapping = condition_map['mapping']
node = self._label_to_tag('node', sample[self.SAMPLE_LABELS], scraper_config)
condition = self._label_to_tag('condition', sample[self.SAMPLE_LABELS], scraper_config)
message = "{} is currently reporting {} = {}".format(node, condition, label_value)
if condition_map['service_check_name'] is None:
self.log.debug("Unable to handle %s - unknown condition %s", service_check_name, label_value)
else:
self.service_check(service_check_name, mapping[label_value], tags=tags, message=message)
def _get_metric_condition_map(self, base_sc_name, labels):
if base_sc_name == 'kubernetes_state.node':
switch = {
'Ready': {'service_check_name': base_sc_name + '.ready', 'mapping': self.condition_to_status_positive},
'OutOfDisk': {
'service_check_name': base_sc_name + '.out_of_disk',
'mapping': self.condition_to_status_negative,
},
'DiskPressure': {
'service_check_name': base_sc_name + '.disk_pressure',
'mapping': self.condition_to_status_negative,
},
'NetworkUnavailable': {
'service_check_name': base_sc_name + '.network_unavailable',
'mapping': self.condition_to_status_negative,
},
'MemoryPressure': {
'service_check_name': base_sc_name + '.memory_pressure',
'mapping': self.condition_to_status_negative,
},
}
return (
labels.get('status'),
switch.get(labels.get('condition'), {'service_check_name': None, 'mapping': None}),
)
def _format_tag(self, name, value, scraper_config):
"""
Lookups the labels_mapper table to see if replacing the tag name is
necessary, then returns a "name:value" tag string
"""
return '%s:%s' % (scraper_config['labels_mapper'].get(name, name), to_string(value).lower())
def _label_to_tag(self, name, labels, scraper_config, tag_name=None):
"""
Search for `name` in labels name and returns corresponding tag string.
Tag name is label name if not specified.
Returns None if name was not found.
"""
value = labels.get(name)
if value:
return self._format_tag(tag_name or name, value, scraper_config)
else:
return None
def _label_to_tags(self, name, labels, scraper_config, tag_name=None):
"""
Search for `name` in labels name and returns corresponding tags string.
Tag name is label name if not specified.
Returns an empty list if name was not found.
"""
value = labels.get(name)
tags = []
if value:
tags += self._build_tags(tag_name or name, value, scraper_config)
return tags
def _trim_job_tag(self, name):
"""
Trims suffix of job names if they match -(\\d{4,10}$)
"""
pattern = r"(-\d{4,10}$)"
return re.sub(pattern, '', name)
def _extract_job_timestamp(self, name):
"""
Extract timestamp of job names
"""
ts = name.split('-')[-1]
if ts.isdigit():
return int(ts)
else:
msg = 'Cannot extract ts from job name {}'
self.log.debug(msg, name)
return None
# Labels attached: namespace, pod
# As a message the phase=Pending|Running|Succeeded|Failed|Unknown
# From the phase the check will update its status
# Also submits as an aggregated count with minimal tags so it is
# visualisable over time per namespace and phase
def kube_pod_status_phase(self, metric, scraper_config):
""" Phase a pod is in. """
metric_name = scraper_config['namespace'] + '.pod.status_phase'
status_phase_counter = Counter()
for sample in metric.samples:
# Counts aggregated cluster-wide to avoid no-data issues on pod churn,
# pod granularity available in the service checks
tags = (
self._label_to_tags('namespace', sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags('phase', sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
status_phase_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(status_phase_counter):
self.gauge(metric_name, count, tags=list(tags))
def _submit_metric_kube_pod_container_status_reason(
self, metric, metric_suffix, whitelisted_status_reasons, scraper_config
):
metric_name = scraper_config['namespace'] + metric_suffix
for sample in metric.samples:
tags = []
reason = sample[self.SAMPLE_LABELS].get('reason')
if reason:
# Filtering according to the reason here is paramount to limit cardinality
if reason.lower() in whitelisted_status_reasons:
tags += self._build_tags('reason', reason, scraper_config)
else:
continue
if 'container' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('kube_container_name', sample[self.SAMPLE_LABELS]['container'], scraper_config)
if 'namespace' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('namespace', sample[self.SAMPLE_LABELS]['namespace'], scraper_config)
if 'pod' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('pod', sample[self.SAMPLE_LABELS]['pod'], scraper_config)
self.gauge(
metric_name,
sample[self.SAMPLE_VALUE],
tags + scraper_config['custom_tags'],
hostname=self.get_hostname_for_sample(sample, scraper_config),
)
def kube_pod_container_status_waiting_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.waiting', WHITELISTED_WAITING_REASONS, scraper_config
)
def kube_pod_container_status_terminated_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.terminated', WHITELISTED_TERMINATED_REASONS, scraper_config
)
def kube_cronjob_next_schedule_time(self, metric, scraper_config):
""" Time until the next schedule """
# Used as a service check so that one can be alerted if the cronjob's next schedule is in the past
check_basename = scraper_config['namespace'] + '.cronjob.on_schedule_check'
curr_time = int(time.time())
for sample in metric.samples:
on_schedule = int(sample[self.SAMPLE_VALUE]) - curr_time
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
if on_schedule < 0:
message = "The service check scheduled at {} is {} seconds late".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int(sample[self.SAMPLE_VALUE]))), on_schedule
)
self.service_check(check_basename, self.CRITICAL, tags=tags, message=message)
else:
self.service_check(check_basename, self.OK, tags=tags)
def kube_job_complete(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.OK, tags=tags + scraper_config['custom_tags'])
def kube_job_failed(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.CRITICAL, tags=tags + scraper_config['custom_tags'])
def kube_job_status_failed(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None: # if there is a timestamp, this is a Cron Job
self.failed_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_failed_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_job_status_succeeded(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None: # if there is a timestamp, this is a Cron Job
self.succeeded_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_succeeded_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_node_status_condition(self, metric, scraper_config):
""" The ready status of a cluster node. v1.0+"""
base_check_name = scraper_config['namespace'] + '.node'
metric_name = scraper_config['namespace'] + '.nodes.by_condition'
by_condition_counter = Counter()
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_tag_check(
sample,
base_check_name,
self.condition_to_status_positive,
scraper_config,
tags=node_tags + scraper_config['custom_tags'],
)
# Counts aggregated cluster-wide to avoid no-data issues on node churn,
# node granularity available in the service checks
tags = (
self._label_to_tags("condition", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("status", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
by_condition_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(by_condition_counter):
self.gauge(metric_name, count, tags=list(tags))
def kube_node_status_ready(self, metric, scraper_config):
""" The ready status of a cluster node (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.ready'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_positive,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_out_of_disk(self, metric, scraper_config):
""" Whether the node is out of disk space (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.out_of_disk'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_memory_pressure(self, metric, scraper_config):
""" Whether the node is in a memory pressure state (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.memory_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_disk_pressure(self, metric, scraper_config):
""" Whether the node is in a disk pressure state (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.disk_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_network_unavailable(self, metric, scraper_config):
""" Whether the node is in a network unavailable state (legacy)"""
service_check_name = scraper_config['namespace'] + '.node.network_unavailable'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_spec_unschedulable(self, metric, scraper_config):
""" Whether a node can schedule new pods. """
metric_name = scraper_config['namespace'] + '.node.status'
statuses = ('schedulable', 'unschedulable')
if metric.type in METRIC_TYPES:
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
status = statuses[int(sample[self.SAMPLE_VALUE])] # value can be 0 or 1
tags += self._build_tags('status', status, scraper_config)
self.gauge(metric_name, 1, tags) # metric value is always one, value is on the tags
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_resourcequota(self, metric, scraper_config):
""" Quota and current usage by resource type. """
metric_base_name = scraper_config['namespace'] + '.resourcequota.{}.{}'
suffixes = {'used': 'used', 'hard': 'limit'}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
mtype = sample[self.SAMPLE_LABELS].get("type")
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("resourcequota", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, suffixes[mtype]), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_limitrange(self, metric, scraper_config):
""" Resource limits by consumer type. """
# type's cardinality's low: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3872-L3879
# idem for resource: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3342-L3352
# idem for constraint: https://github.com/kubernetes/kubernetes/blob/v1.6.1/pkg/api/v1/types.go#L3882-L3901
metric_base_name = scraper_config['namespace'] + '.limitrange.{}.{}'
constraints = {
'min': 'min',
'max': 'max',
'default': 'default',
'defaultRequest': 'default_request',
'maxLimitRequestRatio': 'max_limit_request_ratio',
}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
constraint = sample[self.SAMPLE_LABELS].get("constraint")
if constraint in constraints:
constraint = constraints[constraint]
else:
self.log.error("Constraint %s unsupported for metric %s", constraint, metric.name)
continue
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("type", sample[self.SAMPLE_LABELS], scraper_config, tag_name="consumer_type")
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, constraint), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def count_objects_by_tags(self, metric, scraper_config):
""" Count objects by whitelisted tags and submit counts as gauges. """
config = self.object_count_params[metric.name]
metric_name = "{}.{}".format(scraper_config['namespace'], config['metric_name'])
object_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag(l, sample[self.SAMPLE_LABELS], scraper_config) for l in config['allowed_labels']
] + scraper_config['custom_tags']
object_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(object_counter):
self.gauge(metric_name, count, tags=list(tags))
def _build_tags(self, label_name, label_value, scraper_config, hostname=None):
"""
Build a list of formated tags from `label_name` parameter. It also depend of the
check configuration ('keep_ksm_labels' parameter)
"""
tags = []
# first use the labels_mapper
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
# then try to use the kube_labels_mapper
kube_tag_name = kube_labels_mapper.get(tag_name, tag_name)
label_value = to_string(label_value).lower()
tags.append('{}:{}'.format(to_string(kube_tag_name), label_value))
if self.keep_ksm_labels and (kube_tag_name != tag_name):
tags.append('{}:{}'.format(to_string(tag_name), label_value))
return tags
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
"""
Redefine this method to allow labels duplication, during migration phase
"""
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags += scraper_config['_metric_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
_tags += self._build_tags(label_name, label_value, scraper_config)
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
| 52.046707 | 145 | 0.625463 |
import re
import time
from collections import Counter, defaultdict
from copy import deepcopy
from six import iteritems
from datadog_checks.checks.openmetrics import OpenMetricsBaseCheck
from datadog_checks.config import is_affirmative
from datadog_checks.errors import CheckException
from datadog_checks.utils.common import to_string
try:
from datadog_agent import get_clustername
except ImportError:
def get_clustername():
return ""
METRIC_TYPES = ['counter', 'gauge']
WHITELISTED_WAITING_REASONS = ['errimagepull', 'imagepullbackoff', 'crashloopbackoff', 'containercreating']
WHITELISTED_TERMINATED_REASONS = ['oomkilled', 'containercannotrun', 'error']
kube_labels_mapper = {
'namespace': 'kube_namespace',
'job': 'kube_job',
'cronjob': 'kube_cronjob',
'pod': 'pod_name',
'phase': 'pod_phase',
'daemonset': 'kube_daemon_set',
'replicationcontroller': 'kube_replication_controller',
'replicaset': 'kube_replica_set',
'statefulset ': 'kube_stateful_set',
'deployment': 'kube_deployment',
'container': 'kube_container_name',
'container_id': 'container_id',
'image': 'image_name',
}
class KubernetesState(OpenMetricsBaseCheck):
class CronJobCount:
def __init__(self):
self.count = 0
self.previous_run_max_ts = 0
self.current_run_max_ts = 0
def set_previous_and_reset_current_ts(self):
if self.current_run_max_ts > 0:
self.previous_run_max_ts = self.current_run_max_ts
self.current_run_max_ts = 0
def update_current_ts_and_add_count(self, job_ts, count):
if job_ts > self.previous_run_max_ts and count > 0:
self.count += count
self.current_run_max_ts = max(self.current_run_max_ts, job_ts)
DEFAULT_METRIC_LIMIT = 0
def __init__(self, name, init_config, agentConfig, instances=None):
instance = instances[0]
kubernetes_state_instance = self._create_kubernetes_state_prometheus_instance(instance)
self.keep_ksm_labels = is_affirmative(kubernetes_state_instance.get('keep_ksm_labels', True))
generic_instances = [kubernetes_state_instance]
super(KubernetesState, self).__init__(name, init_config, agentConfig, instances=generic_instances)
self.condition_to_status_positive = {'true': self.OK, 'false': self.CRITICAL, 'unknown': self.UNKNOWN}
self.condition_to_status_negative = {'true': self.CRITICAL, 'false': self.OK, 'unknown': self.UNKNOWN}
self.object_count_params = {
'kube_persistentvolume_status_phase': {
'metric_name': 'persistentvolumes.by_phase',
'allowed_labels': ['storageclass', 'phase'],
},
'kube_service_spec_type': {'metric_name': 'service.count', 'allowed_labels': ['namespace', 'type']},
}
self.METRIC_TRANSFORMERS = {
'kube_pod_status_phase': self.kube_pod_status_phase,
'kube_pod_container_status_waiting_reason': self.kube_pod_container_status_waiting_reason,
'kube_pod_container_status_terminated_reason': self.kube_pod_container_status_terminated_reason,
'kube_cronjob_next_schedule_time': self.kube_cronjob_next_schedule_time,
'kube_job_complete': self.kube_job_complete,
'kube_job_failed': self.kube_job_failed,
'kube_job_status_failed': self.kube_job_status_failed,
'kube_job_status_succeeded': self.kube_job_status_succeeded,
'kube_node_status_condition': self.kube_node_status_condition,
'kube_node_status_ready': self.kube_node_status_ready,
'kube_node_status_out_of_disk': self.kube_node_status_out_of_disk,
'kube_node_status_memory_pressure': self.kube_node_status_memory_pressure,
'kube_node_status_disk_pressure': self.kube_node_status_disk_pressure,
'kube_node_status_network_unavailable': self.kube_node_status_network_unavailable,
'kube_node_spec_unschedulable': self.kube_node_spec_unschedulable,
'kube_resourcequota': self.kube_resourcequota,
'kube_limitrange': self.kube_limitrange,
'kube_persistentvolume_status_phase': self.count_objects_by_tags,
'kube_service_spec_type': self.count_objects_by_tags,
}
self.failed_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
self.succeeded_cron_job_counts = defaultdict(KubernetesState.CronJobCount)
self.job_succeeded_count = defaultdict(int)
self.job_failed_count = defaultdict(int)
def check(self, instance):
endpoint = instance.get('kube_state_url')
scraper_config = self.config_map[endpoint]
self.process(scraper_config, metric_transformers=self.METRIC_TRANSFORMERS)
for job_tags, job in iteritems(self.failed_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
for job_tags, job in iteritems(self.succeeded_cron_job_counts):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job.count, list(job_tags))
job.set_previous_and_reset_current_ts()
for job_tags, job_count in iteritems(self.job_succeeded_count):
self.monotonic_count(scraper_config['namespace'] + '.job.succeeded', job_count, list(job_tags))
for job_tags, job_count in iteritems(self.job_failed_count):
self.monotonic_count(scraper_config['namespace'] + '.job.failed', job_count, list(job_tags))
def _filter_metric(self, metric, scraper_config):
if scraper_config['telemetry']:
name_part = metric.name.split("_", 3)
if len(name_part) < 2:
return False
family = name_part[1]
tags = ["resource_name:" + family]
for sample in metric.samples:
if "namespace" in sample[self.SAMPLE_LABELS]:
ns = sample[self.SAMPLE_LABELS]["namespace"]
tags.append("resource_namespace:" + ns)
break
self._send_telemetry_counter(
'collector.metrics.count', len(metric.samples), scraper_config, extra_tags=tags
)
return False
def _create_kubernetes_state_prometheus_instance(self, instance):
ksm_instance = deepcopy(instance)
endpoint = instance.get('kube_state_url')
if endpoint is None:
raise CheckException("Unable to find kube_state_url in config file.")
extra_labels = ksm_instance.get('label_joins', {})
hostname_override = is_affirmative(ksm_instance.get('hostname_override', True))
ksm_instance.update(
{
'namespace': 'kubernetes_state',
'metrics': [
{
'kube_daemonset_status_current_number_scheduled': 'daemonset.scheduled',
'kube_daemonset_status_desired_number_scheduled': 'daemonset.desired',
'kube_daemonset_status_number_misscheduled': 'daemonset.misscheduled',
'kube_daemonset_status_number_ready': 'daemonset.ready',
'kube_daemonset_updated_number_scheduled': 'daemonset.updated',
'kube_deployment_spec_paused': 'deployment.paused',
'kube_deployment_spec_replicas': 'deployment.replicas_desired',
'kube_deployment_spec_strategy_rollingupdate_max_unavailable': 'deployment.rollingupdate.max_unavailable',
'kube_deployment_status_replicas': 'deployment.replicas',
'kube_deployment_status_replicas_available': 'deployment.replicas_available',
'kube_deployment_status_replicas_unavailable': 'deployment.replicas_unavailable',
'kube_deployment_status_replicas_updated': 'deployment.replicas_updated',
'kube_endpoint_address_available': 'endpoint.address_available',
'kube_endpoint_address_not_ready': 'endpoint.address_not_ready',
'kube_endpoint_created': 'endpoint.created',
'kube_hpa_spec_min_replicas': 'hpa.min_replicas',
'kube_hpa_spec_max_replicas': 'hpa.max_replicas',
'kube_hpa_status_desired_replicas': 'hpa.desired_replicas',
'kube_hpa_status_current_replicas': 'hpa.current_replicas',
'kube_hpa_status_condition': 'hpa.condition',
'kube_node_info': 'node.count',
'kube_node_status_allocatable_cpu_cores': 'node.cpu_allocatable',
'kube_node_status_allocatable_memory_bytes': 'node.memory_allocatable',
'kube_node_status_allocatable_pods': 'node.pods_allocatable',
'kube_node_status_capacity_cpu_cores': 'node.cpu_capacity',
'kube_node_status_capacity_memory_bytes': 'node.memory_capacity',
'kube_node_status_capacity_pods': 'node.pods_capacity',
'kube_node_status_allocatable_nvidia_gpu_cards': 'node.gpu.cards_allocatable',
'kube_node_status_capacity_nvidia_gpu_cards': 'node.gpu.cards_capacity',
'kube_pod_container_status_terminated': 'container.terminated',
'kube_pod_container_status_waiting': 'container.waiting',
'kube_persistentvolumeclaim_status_phase': 'persistentvolumeclaim.status',
'kube_persistentvolumeclaim_resource_requests_storage_bytes': 'persistentvolumeclaim.request_storage',
'kube_pod_container_resource_limits_cpu_cores': 'container.cpu_limit',
'kube_pod_container_resource_limits_memory_bytes': 'container.memory_limit',
'kube_pod_container_resource_requests_cpu_cores': 'container.cpu_requested',
'kube_pod_container_resource_requests_memory_bytes': 'container.memory_requested',
'kube_pod_container_status_ready': 'container.ready',
'kube_pod_container_status_restarts': 'container.restarts',
'kube_pod_container_status_restarts_total': 'container.restarts',
'kube_pod_container_status_running': 'container.running',
'kube_pod_container_resource_requests_nvidia_gpu_devices': 'container.gpu.request',
'kube_pod_container_resource_limits_nvidia_gpu_devices': 'container.gpu.limit',
'kube_pod_status_ready': 'pod.ready',
'kube_pod_status_scheduled': 'pod.scheduled',
'kube_poddisruptionbudget_status_current_healthy': 'pdb.pods_healthy',
'kube_poddisruptionbudget_status_desired_healthy': 'pdb.pods_desired',
'kube_poddisruptionbudget_status_pod_disruptions_allowed': 'pdb.disruptions_allowed',
'kube_poddisruptionbudget_status_expected_pods': 'pdb.pods_total',
'kube_replicaset_spec_replicas': 'replicaset.replicas_desired',
'kube_replicaset_status_fully_labeled_replicas': 'replicaset.fully_labeled_replicas',
'kube_replicaset_status_ready_replicas': 'replicaset.replicas_ready',
'kube_replicaset_status_replicas': 'replicaset.replicas',
'kube_replicationcontroller_spec_replicas': 'replicationcontroller.replicas_desired',
'kube_replicationcontroller_status_available_replicas': 'replicationcontroller.replicas_available',
'kube_replicationcontroller_status_fully_labeled_replicas': 'replicationcontroller.fully_labeled_replicas',
'kube_replicationcontroller_status_ready_replicas': 'replicationcontroller.replicas_ready',
'kube_replicationcontroller_status_replicas': 'replicationcontroller.replicas',
'kube_statefulset_replicas': 'statefulset.replicas_desired',
'kube_statefulset_status_replicas': 'statefulset.replicas',
'kube_statefulset_status_replicas_current': 'statefulset.replicas_current',
'kube_statefulset_status_replicas_ready': 'statefulset.replicas_ready',
'kube_statefulset_status_replicas_updated': 'statefulset.replicas_updated',
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_lowerbound': (
'vpa.lower_bound'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_target': (
'vpa.target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_uncappedtarget': (
'vpa.uncapped_target'
),
'kube_verticalpodautoscaler_status_recommendation_containerrecommendations_upperbound': (
'vpa.upperbound'
),
'kube_verticalpodautoscaler_spec_updatepolicy_updatemode': 'vpa.update_mode',
}
],
'ignore_metrics': [
'kube_cronjob_info',
'kube_cronjob_created',
'kube_daemonset_created',
'kube_deployment_created',
'kube_deployment_labels',
'kube_job_created',
'kube_job_info',
'kube_limitrange_created',
'kube_namespace_created',
'kube_namespace_labels',
'kube_node_created',
'kube_node_labels',
'kube_pod_created',
'kube_pod_container_info',
'kube_pod_info',
'kube_pod_owner',
'kube_pod_start_time',
'kube_pod_labels',
'kube_poddisruptionbudget_created',
'kube_replicaset_created',
'kube_replicationcontroller_created',
'kube_resourcequota_created',
'kube_replicaset_owner',
'kube_service_created',
'kube_service_info',
'kube_service_labels',
'kube_service_spec_external_ip',
'kube_service_status_load_balancer_ingress',
'kube_statefulset_labels',
'kube_statefulset_created',
'kube_statefulset_status_current_revision',
'kube_statefulset_status_update_revision',
# Already provided by the kubelet integration
'kube_pod_container_status_last_terminated_reason',
# _generation metrics are more metadata than metrics, no real use case for now
'kube_daemonset_metadata_generation',
'kube_deployment_metadata_generation',
'kube_deployment_status_observed_generation',
'kube_replicaset_metadata_generation',
'kube_replicaset_status_observed_generation',
'kube_replicationcontroller_metadata_generation',
'kube_replicationcontroller_status_observed_generation',
'kube_statefulset_metadata_generation',
'kube_statefulset_status_observed_generation',
'kube_hpa_metadata_generation',
# kube_node_status_phase and kube_namespace_status_phase have no use case as a service check
'kube_namespace_status_phase',
'kube_node_status_phase',
# These CronJob and Job metrics need use cases to determine how do implement
'kube_cronjob_status_active',
'kube_cronjob_status_last_schedule_time',
'kube_cronjob_spec_suspend',
'kube_cronjob_spec_starting_deadline_seconds',
'kube_job_spec_active_dealine_seconds',
'kube_job_spec_completions',
'kube_job_spec_parallelism',
'kube_job_status_active',
'kube_job_status_completion_time', # We could compute the duration=completion-start as a gauge
'kube_job_status_start_time',
'kube_verticalpodautoscaler_labels',
],
'label_joins': {
'kube_pod_info': {'label_to_match': 'pod', 'labels_to_get': ['node']},
'kube_pod_status_phase': {'label_to_match': 'pod', 'labels_to_get': ['phase']},
'kube_persistentvolume_info': {
'label_to_match': 'persistentvolume',
'labels_to_get': ['storageclass'],
},
'kube_persistentvolumeclaim_info': {
'label_to_match': 'persistentvolumeclaim',
'labels_to_get': ['storageclass'],
},
},
# Defaults that were set when kubernetes_state was based on PrometheusCheck
'send_monotonic_counter': ksm_instance.get('send_monotonic_counter', False),
'health_service_check': ksm_instance.get('health_service_check', False),
}
)
ksm_instance['prometheus_url'] = endpoint
ksm_instance['label_joins'].update(extra_labels)
if hostname_override:
ksm_instance['label_to_hostname'] = 'node'
clustername = get_clustername()
if clustername != "":
ksm_instance['label_to_hostname_suffix'] = "-" + clustername
if 'labels_mapper' in ksm_instance and not isinstance(ksm_instance['labels_mapper'], dict):
self.log.warning("Option labels_mapper should be a dictionary for %s", endpoint)
return ksm_instance
def _condition_to_service_check(self, sample, sc_name, mapping, tags=None):
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1
condition = sample[self.SAMPLE_LABELS].get('condition')
if condition:
if condition in mapping:
self.service_check(sc_name, mapping[condition], tags=tags)
else:
self.log.debug("Unable to handle %s - unknown condition %s", sc_name, condition)
def _condition_to_tag_check(self, sample, base_sc_name, mapping, scraper_config, tags=None):
if bool(sample[self.SAMPLE_VALUE]) is False:
return # Ignore if gauge is not 1 and we are not processing the pod phase check
label_value, condition_map = self._get_metric_condition_map(base_sc_name, sample[self.SAMPLE_LABELS])
service_check_name = condition_map['service_check_name']
mapping = condition_map['mapping']
node = self._label_to_tag('node', sample[self.SAMPLE_LABELS], scraper_config)
condition = self._label_to_tag('condition', sample[self.SAMPLE_LABELS], scraper_config)
message = "{} is currently reporting {} = {}".format(node, condition, label_value)
if condition_map['service_check_name'] is None:
self.log.debug("Unable to handle %s - unknown condition %s", service_check_name, label_value)
else:
self.service_check(service_check_name, mapping[label_value], tags=tags, message=message)
def _get_metric_condition_map(self, base_sc_name, labels):
if base_sc_name == 'kubernetes_state.node':
switch = {
'Ready': {'service_check_name': base_sc_name + '.ready', 'mapping': self.condition_to_status_positive},
'OutOfDisk': {
'service_check_name': base_sc_name + '.out_of_disk',
'mapping': self.condition_to_status_negative,
},
'DiskPressure': {
'service_check_name': base_sc_name + '.disk_pressure',
'mapping': self.condition_to_status_negative,
},
'NetworkUnavailable': {
'service_check_name': base_sc_name + '.network_unavailable',
'mapping': self.condition_to_status_negative,
},
'MemoryPressure': {
'service_check_name': base_sc_name + '.memory_pressure',
'mapping': self.condition_to_status_negative,
},
}
return (
labels.get('status'),
switch.get(labels.get('condition'), {'service_check_name': None, 'mapping': None}),
)
def _format_tag(self, name, value, scraper_config):
return '%s:%s' % (scraper_config['labels_mapper'].get(name, name), to_string(value).lower())
def _label_to_tag(self, name, labels, scraper_config, tag_name=None):
value = labels.get(name)
if value:
return self._format_tag(tag_name or name, value, scraper_config)
else:
return None
def _label_to_tags(self, name, labels, scraper_config, tag_name=None):
value = labels.get(name)
tags = []
if value:
tags += self._build_tags(tag_name or name, value, scraper_config)
return tags
def _trim_job_tag(self, name):
pattern = r"(-\d{4,10}$)"
return re.sub(pattern, '', name)
def _extract_job_timestamp(self, name):
ts = name.split('-')[-1]
if ts.isdigit():
return int(ts)
else:
msg = 'Cannot extract ts from job name {}'
self.log.debug(msg, name)
return None
# Labels attached: namespace, pod
# As a message the phase=Pending|Running|Succeeded|Failed|Unknown
# From the phase the check will update its status
# Also submits as an aggregated count with minimal tags so it is
# visualisable over time per namespace and phase
def kube_pod_status_phase(self, metric, scraper_config):
metric_name = scraper_config['namespace'] + '.pod.status_phase'
status_phase_counter = Counter()
for sample in metric.samples:
# Counts aggregated cluster-wide to avoid no-data issues on pod churn,
# pod granularity available in the service checks
tags = (
self._label_to_tags('namespace', sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags('phase', sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
status_phase_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(status_phase_counter):
self.gauge(metric_name, count, tags=list(tags))
def _submit_metric_kube_pod_container_status_reason(
self, metric, metric_suffix, whitelisted_status_reasons, scraper_config
):
metric_name = scraper_config['namespace'] + metric_suffix
for sample in metric.samples:
tags = []
reason = sample[self.SAMPLE_LABELS].get('reason')
if reason:
# Filtering according to the reason here is paramount to limit cardinality
if reason.lower() in whitelisted_status_reasons:
tags += self._build_tags('reason', reason, scraper_config)
else:
continue
if 'container' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('kube_container_name', sample[self.SAMPLE_LABELS]['container'], scraper_config)
if 'namespace' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('namespace', sample[self.SAMPLE_LABELS]['namespace'], scraper_config)
if 'pod' in sample[self.SAMPLE_LABELS]:
tags += self._build_tags('pod', sample[self.SAMPLE_LABELS]['pod'], scraper_config)
self.gauge(
metric_name,
sample[self.SAMPLE_VALUE],
tags + scraper_config['custom_tags'],
hostname=self.get_hostname_for_sample(sample, scraper_config),
)
def kube_pod_container_status_waiting_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.waiting', WHITELISTED_WAITING_REASONS, scraper_config
)
def kube_pod_container_status_terminated_reason(self, metric, scraper_config):
self._submit_metric_kube_pod_container_status_reason(
metric, '.container.status_report.count.terminated', WHITELISTED_TERMINATED_REASONS, scraper_config
)
def kube_cronjob_next_schedule_time(self, metric, scraper_config):
# Used as a service check so that one can be alerted if the cronjob's next schedule is in the past
check_basename = scraper_config['namespace'] + '.cronjob.on_schedule_check'
curr_time = int(time.time())
for sample in metric.samples:
on_schedule = int(sample[self.SAMPLE_VALUE]) - curr_time
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
if on_schedule < 0:
message = "The service check scheduled at {} is {} seconds late".format(
time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(int(sample[self.SAMPLE_VALUE]))), on_schedule
)
self.service_check(check_basename, self.CRITICAL, tags=tags, message=message)
else:
self.service_check(check_basename, self.OK, tags=tags)
def kube_job_complete(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.OK, tags=tags + scraper_config['custom_tags'])
def kube_job_failed(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.job.complete'
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
self.service_check(service_check_name, self.CRITICAL, tags=tags + scraper_config['custom_tags'])
def kube_job_status_failed(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None:
self.failed_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_failed_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_job_status_succeeded(self, metric, scraper_config):
for sample in metric.samples:
job_ts = None
tags = [] + scraper_config['custom_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name == 'job' or label_name == 'job_name':
trimmed_job = self._trim_job_tag(label_value)
job_ts = self._extract_job_timestamp(label_value)
tags += self._build_tags(label_name, trimmed_job, scraper_config)
else:
tags += self._build_tags(label_name, label_value, scraper_config)
if job_ts is not None:
self.succeeded_cron_job_counts[frozenset(tags)].update_current_ts_and_add_count(
job_ts, sample[self.SAMPLE_VALUE]
)
else:
self.job_succeeded_count[frozenset(tags)] += sample[self.SAMPLE_VALUE]
def kube_node_status_condition(self, metric, scraper_config):
base_check_name = scraper_config['namespace'] + '.node'
metric_name = scraper_config['namespace'] + '.nodes.by_condition'
by_condition_counter = Counter()
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_tag_check(
sample,
base_check_name,
self.condition_to_status_positive,
scraper_config,
tags=node_tags + scraper_config['custom_tags'],
)
tags = (
self._label_to_tags("condition", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("status", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
by_condition_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(by_condition_counter):
self.gauge(metric_name, count, tags=list(tags))
def kube_node_status_ready(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.ready'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_positive,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_out_of_disk(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.out_of_disk'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_memory_pressure(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.memory_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_disk_pressure(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.disk_pressure'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_status_network_unavailable(self, metric, scraper_config):
service_check_name = scraper_config['namespace'] + '.node.network_unavailable'
for sample in metric.samples:
node_tags = self._label_to_tags("node", sample[self.SAMPLE_LABELS], scraper_config)
self._condition_to_service_check(
sample,
service_check_name,
self.condition_to_status_negative,
tags=node_tags + scraper_config['custom_tags'],
)
def kube_node_spec_unschedulable(self, metric, scraper_config):
metric_name = scraper_config['namespace'] + '.node.status'
statuses = ('schedulable', 'unschedulable')
if metric.type in METRIC_TYPES:
for sample in metric.samples:
tags = []
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
tags += self._build_tags(label_name, label_value, scraper_config)
tags += scraper_config['custom_tags']
status = statuses[int(sample[self.SAMPLE_VALUE])]
tags += self._build_tags('status', status, scraper_config)
self.gauge(metric_name, 1, tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_resourcequota(self, metric, scraper_config):
metric_base_name = scraper_config['namespace'] + '.resourcequota.{}.{}'
suffixes = {'used': 'used', 'hard': 'limit'}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
mtype = sample[self.SAMPLE_LABELS].get("type")
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("resourcequota", sample[self.SAMPLE_LABELS], scraper_config)
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, suffixes[mtype]), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def kube_limitrange(self, metric, scraper_config):
se_name = scraper_config['namespace'] + '.limitrange.{}.{}'
constraints = {
'min': 'min',
'max': 'max',
'default': 'default',
'defaultRequest': 'default_request',
'maxLimitRequestRatio': 'max_limit_request_ratio',
}
if metric.type in METRIC_TYPES:
for sample in metric.samples:
constraint = sample[self.SAMPLE_LABELS].get("constraint")
if constraint in constraints:
constraint = constraints[constraint]
else:
self.log.error("Constraint %s unsupported for metric %s", constraint, metric.name)
continue
resource = sample[self.SAMPLE_LABELS].get("resource")
tags = (
self._label_to_tags("namespace", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("limitrange", sample[self.SAMPLE_LABELS], scraper_config)
+ self._label_to_tags("type", sample[self.SAMPLE_LABELS], scraper_config, tag_name="consumer_type")
+ scraper_config['custom_tags']
)
self.gauge(metric_base_name.format(resource, constraint), sample[self.SAMPLE_VALUE], tags)
else:
self.log.error("Metric type %s unsupported for metric %s", metric.type, metric.name)
def count_objects_by_tags(self, metric, scraper_config):
config = self.object_count_params[metric.name]
metric_name = "{}.{}".format(scraper_config['namespace'], config['metric_name'])
object_counter = Counter()
for sample in metric.samples:
tags = [
self._label_to_tag(l, sample[self.SAMPLE_LABELS], scraper_config) for l in config['allowed_labels']
] + scraper_config['custom_tags']
object_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE]
for tags, count in iteritems(object_counter):
self.gauge(metric_name, count, tags=list(tags))
def _build_tags(self, label_name, label_value, scraper_config, hostname=None):
tags = []
tag_name = scraper_config['labels_mapper'].get(label_name, label_name)
kube_tag_name = kube_labels_mapper.get(tag_name, tag_name)
label_value = to_string(label_value).lower()
tags.append('{}:{}'.format(to_string(kube_tag_name), label_value))
if self.keep_ksm_labels and (kube_tag_name != tag_name):
tags.append('{}:{}'.format(to_string(tag_name), label_value))
return tags
def _metric_tags(self, metric_name, val, sample, scraper_config, hostname=None):
custom_tags = scraper_config['custom_tags']
_tags = list(custom_tags)
_tags += scraper_config['_metric_tags']
for label_name, label_value in iteritems(sample[self.SAMPLE_LABELS]):
if label_name not in scraper_config['exclude_labels']:
_tags += self._build_tags(label_name, label_value, scraper_config)
return self._finalize_tags_to_submit(
_tags, metric_name, val, sample, custom_tags=custom_tags, hostname=hostname
)
| true | true |
f72b12c2828be5260fdd70ad443c19b16f2923f0 | 3,003 | py | Python | bin/stock_price_scraper.py | Samanvay96/asx_scraper | 4b80ff97bc3d1005aef005c82bd0a6c20d8733dc | [
"MIT"
] | null | null | null | bin/stock_price_scraper.py | Samanvay96/asx_scraper | 4b80ff97bc3d1005aef005c82bd0a6c20d8733dc | [
"MIT"
] | null | null | null | bin/stock_price_scraper.py | Samanvay96/asx_scraper | 4b80ff97bc3d1005aef005c82bd0a6c20d8733dc | [
"MIT"
] | null | null | null | import urllib.request
from datetime import datetime
import string
from argparse import ArgumentParser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from bs4 import BeautifulSoup
from sortedcontainers import SortedDict
class StockPriceScraper:
def __init__(self, base_url, stock_codes, google_sheet, client_secret, test):
self.stock_codes = stock_codes
self.base_url = base_url
if not test:
self.sheet = client(client_secret).open(google_sheet)
def insert_prices(self):
worksheet = self.sheet.add_worksheet(title=f'{datetime.today().strftime("%Y-%m-%d")}', rows='2', cols=f'{len(self.stock_codes)}')
for i, (stock_code, stock_price) in enumerate(self.stock_prices().items()):
self.update_sheet(worksheet, i, [stock_code, stock_price])
def stock_prices(self):
stock_prices = {}
for stock_code in self.stock_codes:
stock_prices[stock_code] = price(url(self.base_url, stock_code))
return SortedDict(stock_prices)
def update_sheet(self, worksheet, i, contents):
for j, content in enumerate(contents):
update_cell(worksheet, cell(string.ascii_uppercase[i], j), content)
def cell(letter, number):
return f'{letter}{number}'
def update_cell(worksheet, cell, info):
worksheet.update_acell(cell, info)
def client(client_secret):
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)
return gspread.authorize(creds)
def price(url):
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
return soup.find('h2', attrs={'class':'page-content entry-content'}).text.strip()
def url(base_url, stock_code):
return f'{base_url}{stock_code.upper()}'
if __name__ == '__main__':
parser = ArgumentParser(description='Takes stock codes, scrapes prices from website and inserts into a given google sheet')
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='base_url', required=True)
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='client_secret', required=True)
parser.add_argument('-g', '--google-sheet', action='store', help='the google sheet to insert prices into', type=str, dest='google_sheet', required=True)
parser.add_argument('-s', '--stock-codes', action='store', help='the stock codes to get price for', type=str, dest='stock_codes', nargs='+', required=True)
parser.add_argument('-t', '--test', action='store_true', help='Perform test', dest='test' )
args = parser.parse_args().__dict__
StockPriceScraper(**args).insert_prices() | 48.435484 | 183 | 0.652681 | import urllib.request
from datetime import datetime
import string
from argparse import ArgumentParser
import gspread
from oauth2client.service_account import ServiceAccountCredentials
from bs4 import BeautifulSoup
from sortedcontainers import SortedDict
class StockPriceScraper:
def __init__(self, base_url, stock_codes, google_sheet, client_secret, test):
self.stock_codes = stock_codes
self.base_url = base_url
if not test:
self.sheet = client(client_secret).open(google_sheet)
def insert_prices(self):
worksheet = self.sheet.add_worksheet(title=f'{datetime.today().strftime("%Y-%m-%d")}', rows='2', cols=f'{len(self.stock_codes)}')
for i, (stock_code, stock_price) in enumerate(self.stock_prices().items()):
self.update_sheet(worksheet, i, [stock_code, stock_price])
def stock_prices(self):
stock_prices = {}
for stock_code in self.stock_codes:
stock_prices[stock_code] = price(url(self.base_url, stock_code))
return SortedDict(stock_prices)
def update_sheet(self, worksheet, i, contents):
for j, content in enumerate(contents):
update_cell(worksheet, cell(string.ascii_uppercase[i], j), content)
def cell(letter, number):
return f'{letter}{number}'
def update_cell(worksheet, cell, info):
worksheet.update_acell(cell, info)
def client(client_secret):
scope = ['https://spreadsheets.google.com/feeds']
creds = ServiceAccountCredentials.from_json_keyfile_name(client_secret, scope)
return gspread.authorize(creds)
def price(url):
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page, 'html.parser')
return soup.find('h2', attrs={'class':'page-content entry-content'}).text.strip()
def url(base_url, stock_code):
return f'{base_url}{stock_code.upper()}'
if __name__ == '__main__':
parser = ArgumentParser(description='Takes stock codes, scrapes prices from website and inserts into a given google sheet')
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='base_url', required=True)
parser.add_argument('-c', '--client-secret', action='store', help='the client', type=str, dest='client_secret', required=True)
parser.add_argument('-g', '--google-sheet', action='store', help='the google sheet to insert prices into', type=str, dest='google_sheet', required=True)
parser.add_argument('-s', '--stock-codes', action='store', help='the stock codes to get price for', type=str, dest='stock_codes', nargs='+', required=True)
parser.add_argument('-t', '--test', action='store_true', help='Perform test', dest='test' )
args = parser.parse_args().__dict__
StockPriceScraper(**args).insert_prices() | true | true |
f72b1364a37162fb740d304ac9506ad71a4279ec | 17,209 | py | Python | bitshares/asset.py | ianco/python-bitshares | f9fb23bc32f7bf6ebabb295df8f4056d84f0e859 | [
"MIT"
] | null | null | null | bitshares/asset.py | ianco/python-bitshares | f9fb23bc32f7bf6ebabb295df8f4056d84f0e859 | [
"MIT"
] | null | null | null | bitshares/asset.py | ianco/python-bitshares | f9fb23bc32f7bf6ebabb295df8f4056d84f0e859 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from bitsharesbase import operations
from bitsharesbase.asset_permissions import (
asset_permissions,
force_flag,
test_permissions,
todict,
)
from .blockchainobject import BlockchainObject
from .exceptions import AssetDoesNotExistsException
from .instance import BlockchainInstance
from graphenecommon.asset import Asset as GrapheneAsset
@BlockchainInstance.inject
class Asset(GrapheneAsset):
""" Deals with Assets of the network.
:param str Asset: Symbol name or object id of an asset
:param bool lazy: Lazy loading
:param bool full: Also obtain bitasset-data and dynamic asset data
:param bitshares.bitshares.BitShares blockchain_instance: BitShares
instance
:returns: All data of an asset
:rtype: dict
.. note:: This class comes with its own caching function to reduce the
load on the API server. Instances of this class can be
refreshed with ``Asset.refresh()``.
"""
def define_classes(self):
self.type_id = 3
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Permissions and flags
self["permissions"] = todict(self["options"].get("issuer_permissions"))
self["flags"] = todict(self["options"].get("flags"))
try:
self["description"] = json.loads(self["options"]["description"])
except Exception:
self["description"] = self["options"]["description"]
@property
def market_fee_percent(self):
return self["options"]["market_fee_percent"] / 100 / 100
@property
def max_market_fee(self):
from .amount import Amount
return Amount(
{"amount": self["options"]["max_market_fee"], "asset_id": self["id"]}
)
@property
def feeds(self):
from .price import PriceFeed
self.ensure_full()
if not self.is_bitasset:
return
r = []
for feed in self["bitasset_data"]["feeds"]:
r.append(PriceFeed(feed, blockchain_instance=self.blockchain))
return r
@property
def feed(self):
from .price import PriceFeed
assert self.is_bitasset
self.ensure_full()
return PriceFeed(
self["bitasset_data"]["current_feed"], blockchain_instance=self.blockchain
)
@property
def calls(self):
return self.get_call_orders(10)
def get_call_orders(self, limit=100):
from .price import Price
from .account import Account
from .amount import Amount
assert limit <= 100
assert self.is_bitasset
self.ensure_full()
r = list()
bitasset = self["bitasset_data"]
settlement_price = Price(
bitasset["current_feed"]["settlement_price"],
blockchain_instance=self.blockchain,
)
ret = self.blockchain.rpc.get_call_orders(self["id"], limit)
for call in ret[:limit]:
call_price = Price(call["call_price"], blockchain_instance=self.blockchain)
collateral_amount = Amount(
{
"amount": call["collateral"],
"asset_id": call["call_price"]["base"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
debt_amount = Amount(
{
"amount": call["debt"],
"asset_id": call["call_price"]["quote"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
r.append(
{
"account": Account(
call["borrower"], lazy=True, blockchain_instance=self.blockchain
),
"collateral": collateral_amount,
"debt": debt_amount,
"call_price": call_price,
"settlement_price": settlement_price,
"ratio": (
float(collateral_amount)
/ float(debt_amount)
* float(settlement_price)
),
}
)
return r
@property
def settlements(self):
return self.get_settle_orders(10)
def get_settle_orders(self, limit=100):
from .account import Account
from .amount import Amount
from .utils import formatTimeString
assert limit <= 100
assert self.is_bitasset
r = list()
ret = self.blockchain.rpc.get_settle_orders(self["id"], limit)
for settle in ret[:limit]:
r.append(
{
"account": Account(
settle["owner"], lazy=True, blockchain_instance=self.blockchain
),
"amount": Amount(
settle["balance"], blockchain_instance=self.blockchain
),
"date": formatTimeString(settle["settlement_date"]),
}
)
return r
def halt(self):
""" Halt this asset from being moved or traded
"""
from .account import Account
nullaccount = Account(
"null-account", # We set the null-account
blockchain_instance=self.blockchain,
)
flags = {"white_list": True, "transfer_restricted": True}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [nullaccount["id"]],
"blacklist_authorities": [],
"whitelist_markets": [self["id"]],
"blacklist_markets": [],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def release(
self,
whitelist_authorities=[],
blacklist_authorities=[],
whitelist_markets=[],
blacklist_markets=[],
):
""" Release this asset and allow unrestricted transfer, trading,
etc.
:param list whitelist_authorities: List of accounts that
serve as whitelist authorities
:param list blacklist_authorities: List of accounts that
serve as blacklist authorities
:param list whitelist_markets: List of assets to allow
trading with
:param list blacklist_markets: List of assets to prevent
trading with
"""
from .account import Account
flags = {"white_list": False, "transfer_restricted": False}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [
Account(a)["id"] for a in whitelist_authorities
],
"blacklist_authorities": [
Account(a)["id"] for a in blacklist_authorities
],
"whitelist_markets": [Asset(a)["id"] for a in whitelist_markets],
"blacklist_markets": [Asset(a)["id"] for a in blacklist_markets],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def setoptions(self, flags):
""" Enable a certain flag.
Flags:
* charge_market_fee
* white_list
* override_authority
* transfer_restricted
* disable_force_settle
* global_settle
* disable_confidential
* witness_fed_asset
* committee_fed_asset
:param dict flag: dictionary of flags and boolean
"""
assert set(flags.keys()).issubset(asset_permissions.keys()), "unknown flag"
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update({"flags": flags_int})
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def enableflag(self, flag):
""" Enable a certain flag.
:param str flag: Flag name
"""
return self.setoptions({flag: True})
def disableflag(self, flag):
""" Enable a certain flag.
:param str flag: Flag name
"""
return self.setoptions({flag: False})
def seize(self, from_account, to_account, amount):
""" Seize amount from an account and send to another
... note:: This requires the ``override_authority`` to be
set for this asset!
:param bitshares.account.Account from_account: From this account
:param bitshares.account.Account to_account: To this account
:param bitshares.amount.Amount amount: Amount to seize
"""
options = self["options"]
if not (options["flags"] & asset_permissions["override_authority"]):
raise Exception("Insufficient Permissions/flags for seizure!")
op = operations.Override_transfer(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"from": from_account["id"],
"to": to_account["id"],
"amount": amount.json(),
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_authorities(self, type, authorities=[]):
""" Add authorities to an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list authorities: List of authorities (Accounts)
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
options["whitelist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
if type == "blacklist":
options["blacklist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_authorities(self, type, authorities=[]):
""" Remove authorities from an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list authorities: List of authorities (Accounts)
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_authorities"].remove(Account(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_authorities"].remove(Account(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_markets(self, type, authorities=[], force_enable=True):
""" Add markets to an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list markets: List of markets (assets)
:param bool force_enable: Force enable ``white_list`` flag
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if force_enable:
test_permissions(options["issuer_permissions"], {"white_list": True})
flags_int = force_flag(options["flags"], {"white_list": True})
options.update({"flags": flags_int})
else:
assert test_permissions(
options["flags"], ["white_list"]
), "whitelist feature not enabled"
if type == "whitelist":
options["whitelist_markets"].extend([Asset(a)["id"] for a in authorities])
if type == "blacklist":
options["blacklist_markets"].extend([Asset(a)["id"] for a in authorities])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_markets(self, type, authorities=[]):
""" Remove markets from an assets white/black list
:param str type: ``blacklist`` or ``whitelist``
:param list markets: List of markets (assets)
"""
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_markets"].remove(Asset(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_markets"].remove(Asset(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def set_market_fee(self, percentage_fee, max_market_fee):
""" Set trading percentage fee
:param float percentage_fee: Percentage of fee
:param bitshares.amount.Amount max_market_fee: Max Fee
"""
assert percentage_fee <= 100 and percentage_fee > 0
flags = {"charge_market_fee": percentage_fee > 0}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"market_fee_percent": percentage_fee * 100,
"max_market_fee": int(max_market_fee),
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def update_feed_producers(self, producers):
""" Update bitasset feed producers
:param list producers: List of accounts that are allowed to produce
a feed
"""
assert self.is_bitasset, "Asset needs to be a bitasset/market pegged asset"
from .account import Account
op = operations.Asset_update_feed_producers(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_feed_producers": [Account(a)["id"] for a in producers],
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
| 35.192229 | 88 | 0.534895 |
import json
from bitsharesbase import operations
from bitsharesbase.asset_permissions import (
asset_permissions,
force_flag,
test_permissions,
todict,
)
from .blockchainobject import BlockchainObject
from .exceptions import AssetDoesNotExistsException
from .instance import BlockchainInstance
from graphenecommon.asset import Asset as GrapheneAsset
@BlockchainInstance.inject
class Asset(GrapheneAsset):
def define_classes(self):
self.type_id = 3
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self["permissions"] = todict(self["options"].get("issuer_permissions"))
self["flags"] = todict(self["options"].get("flags"))
try:
self["description"] = json.loads(self["options"]["description"])
except Exception:
self["description"] = self["options"]["description"]
@property
def market_fee_percent(self):
return self["options"]["market_fee_percent"] / 100 / 100
@property
def max_market_fee(self):
from .amount import Amount
return Amount(
{"amount": self["options"]["max_market_fee"], "asset_id": self["id"]}
)
@property
def feeds(self):
from .price import PriceFeed
self.ensure_full()
if not self.is_bitasset:
return
r = []
for feed in self["bitasset_data"]["feeds"]:
r.append(PriceFeed(feed, blockchain_instance=self.blockchain))
return r
@property
def feed(self):
from .price import PriceFeed
assert self.is_bitasset
self.ensure_full()
return PriceFeed(
self["bitasset_data"]["current_feed"], blockchain_instance=self.blockchain
)
@property
def calls(self):
return self.get_call_orders(10)
def get_call_orders(self, limit=100):
from .price import Price
from .account import Account
from .amount import Amount
assert limit <= 100
assert self.is_bitasset
self.ensure_full()
r = list()
bitasset = self["bitasset_data"]
settlement_price = Price(
bitasset["current_feed"]["settlement_price"],
blockchain_instance=self.blockchain,
)
ret = self.blockchain.rpc.get_call_orders(self["id"], limit)
for call in ret[:limit]:
call_price = Price(call["call_price"], blockchain_instance=self.blockchain)
collateral_amount = Amount(
{
"amount": call["collateral"],
"asset_id": call["call_price"]["base"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
debt_amount = Amount(
{
"amount": call["debt"],
"asset_id": call["call_price"]["quote"]["asset_id"],
},
blockchain_instance=self.blockchain,
)
r.append(
{
"account": Account(
call["borrower"], lazy=True, blockchain_instance=self.blockchain
),
"collateral": collateral_amount,
"debt": debt_amount,
"call_price": call_price,
"settlement_price": settlement_price,
"ratio": (
float(collateral_amount)
/ float(debt_amount)
* float(settlement_price)
),
}
)
return r
@property
def settlements(self):
return self.get_settle_orders(10)
def get_settle_orders(self, limit=100):
from .account import Account
from .amount import Amount
from .utils import formatTimeString
assert limit <= 100
assert self.is_bitasset
r = list()
ret = self.blockchain.rpc.get_settle_orders(self["id"], limit)
for settle in ret[:limit]:
r.append(
{
"account": Account(
settle["owner"], lazy=True, blockchain_instance=self.blockchain
),
"amount": Amount(
settle["balance"], blockchain_instance=self.blockchain
),
"date": formatTimeString(settle["settlement_date"]),
}
)
return r
def halt(self):
from .account import Account
nullaccount = Account(
"null-account",
blockchain_instance=self.blockchain,
)
flags = {"white_list": True, "transfer_restricted": True}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [nullaccount["id"]],
"blacklist_authorities": [],
"whitelist_markets": [self["id"]],
"blacklist_markets": [],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def release(
self,
whitelist_authorities=[],
blacklist_authorities=[],
whitelist_markets=[],
blacklist_markets=[],
):
from .account import Account
flags = {"white_list": False, "transfer_restricted": False}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"whitelist_authorities": [
Account(a)["id"] for a in whitelist_authorities
],
"blacklist_authorities": [
Account(a)["id"] for a in blacklist_authorities
],
"whitelist_markets": [Asset(a)["id"] for a in whitelist_markets],
"blacklist_markets": [Asset(a)["id"] for a in blacklist_markets],
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def setoptions(self, flags):
assert set(flags.keys()).issubset(asset_permissions.keys()), "unknown flag"
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update({"flags": flags_int})
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def enableflag(self, flag):
return self.setoptions({flag: True})
def disableflag(self, flag):
return self.setoptions({flag: False})
def seize(self, from_account, to_account, amount):
options = self["options"]
if not (options["flags"] & asset_permissions["override_authority"]):
raise Exception("Insufficient Permissions/flags for seizure!")
op = operations.Override_transfer(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"from": from_account["id"],
"to": to_account["id"],
"amount": amount.json(),
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_authorities(self, type, authorities=[]):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
options["whitelist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
if type == "blacklist":
options["blacklist_authorities"].extend(
[Account(a)["id"] for a in authorities]
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_authorities(self, type, authorities=[]):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
from .account import Account
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_authorities"].remove(Account(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_authorities"].remove(Account(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def add_markets(self, type, authorities=[], force_enable=True):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if force_enable:
test_permissions(options["issuer_permissions"], {"white_list": True})
flags_int = force_flag(options["flags"], {"white_list": True})
options.update({"flags": flags_int})
else:
assert test_permissions(
options["flags"], ["white_list"]
), "whitelist feature not enabled"
if type == "whitelist":
options["whitelist_markets"].extend([Asset(a)["id"] for a in authorities])
if type == "blacklist":
options["blacklist_markets"].extend([Asset(a)["id"] for a in authorities])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def remove_markets(self, type, authorities=[]):
assert type in ["blacklist", "whitelist"]
assert isinstance(authorities, (list, set))
options = self["options"]
if type == "whitelist":
for a in authorities:
options["whitelist_markets"].remove(Asset(a)["id"])
if type == "blacklist":
for a in authorities:
options["blacklist_markets"].remove(Asset(a)["id"])
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def set_market_fee(self, percentage_fee, max_market_fee):
assert percentage_fee <= 100 and percentage_fee > 0
flags = {"charge_market_fee": percentage_fee > 0}
options = self["options"]
test_permissions(options["issuer_permissions"], flags)
flags_int = force_flag(options["flags"], flags)
options.update(
{
"flags": flags_int,
"market_fee_percent": percentage_fee * 100,
"max_market_fee": int(max_market_fee),
}
)
op = operations.Asset_update(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_options": options,
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
def update_feed_producers(self, producers):
assert self.is_bitasset, "Asset needs to be a bitasset/market pegged asset"
from .account import Account
op = operations.Asset_update_feed_producers(
**{
"fee": {"amount": 0, "asset_id": "1.3.0"},
"issuer": self["issuer"],
"asset_to_update": self["id"],
"new_feed_producers": [Account(a)["id"] for a in producers],
"extensions": [],
}
)
return self.blockchain.finalizeOp(op, self["issuer"], "active")
| true | true |
f72b1485c5de36b36b7c1db7dbc892f1eac0ef05 | 7,375 | py | Python | recipes/Python/576780_Timeout_for_nearly_any_callable/recipe-576780.py | tdiprima/code | 61a74f5f93da087d27c70b2efe779ac6bd2a3b4f | [
"MIT"
] | 2,023 | 2017-07-29T09:34:46.000Z | 2022-03-24T08:00:45.000Z | recipes/Python/576780_Timeout_for_nearly_any_callable/recipe-576780.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 32 | 2017-09-02T17:20:08.000Z | 2022-02-11T17:49:37.000Z | recipes/Python/576780_Timeout_for_nearly_any_callable/recipe-576780.py | unhacker/code | 73b09edc1b9850c557a79296655f140ce5e853db | [
"MIT"
] | 780 | 2017-07-28T19:23:28.000Z | 2022-03-25T20:39:41.000Z | #!/usr/bin/env python
'''This module exposes function timelimited and two
classes TimeLimited and TimeLimitExpired.
Function timelimited can be used to invoke any
callable object with a time limit.
Class TimeLimited wraps any callable object into a
time limited callable with an equivalent signature.
Beware, any critical resources like locks, memory or
files, etc. acquired or opened by the callable may
not be released respectively closed. Therefore,
time limiting such callables may cause deadlock or
leaks or both.
No signals or timers are affected and any errors are
propagated as usual. Decorators and with statements
are avoided for backward compatibility.
Tested with Python 2.2.3, 2.3.7, 2.4.5, 2.5.2, 2.6.2
or 3.0.1 on CentOS 4.7, MacOS X 10.4.11 Tiger (Intel)
and 10.3.9 Panther (PPC), Solaris 10 and Windows XP.
Note, for Python 3.0 and beyond, replace ', e:' with
' as e:' in the 3 except lines marked #XXX below or
run the Python 2to3 translator on this file, see
<http://docs.python.org/dev/3.1/library/2to3.html>
The core of the function timelimited is copied from
<http://code.activestate.com/recipes/473878/>.
'''
__all__ = ('timelimited', 'TimeLimited', 'TimeLimitExpired')
__version__ = '4 2009-06-08'
from threading import Thread
# The #PYCHOK marks are intended for postprocessing
# by <http://code.activestate.com/recipes/546532/>
try: # UGLY! private method __stop
_Thread_stop = Thread._Thread__stop #PYCHOK false
except AttributeError: # _stop in Python 3.0
_Thread_stop = Thread._stop #PYCHOK expected
class TimeLimitExpired(Exception):
'''Exception raised when time limit expires.
'''
pass
def timelimited(timeout, function, *args, **kwds):
'''Invoke the given function with the positional and
keyword arguments under a time constraint.
The function result is returned if the function
finishes within the given time limit, otherwise
a TimeLimitExpired error is raised.
The timeout value is in seconds and has the same
resolution as the standard time.time function. A
timeout value of None invokes the given function
without imposing any time limit.
A TypeError is raised if function is not callable,
a ValueError is raised for negative timeout values
and any errors occurring inside the function are
passed along as-is.
'''
class _Timelimited(Thread):
_error_ = TimeLimitExpired # assume timeout
_result_ = None
def run(self):
try:
self._result_ = function(*args, **kwds)
self._error_ = None
except Exception, e: #XXX as for Python 3.0
self._error_ = e
def _stop(self):
# UGLY! force the thread to stop by (ab)using
# the private __stop or _stop method, but that
# seems to work better than these recipes
# <http://code.activestate.com/recipes/496960/>
# <http://sebulba.wikispaces.com/recipe+thread2>
if self.isAlive():
_Thread_stop(self)
if not hasattr(function, '__call__'):
raise TypeError('function not callable: %s' % repr(function))
if timeout is None: # shortcut
return function(*args, **kwds)
if timeout < 0:
raise ValueError('timeout invalid: %s' % repr(timeout))
t = _Timelimited()
t.start()
t.join(timeout)
if t._error_ is None:
return t._result_
if t._error_ is TimeLimitExpired:
t._stop()
raise TimeLimitExpired('timeout %r for %s' % (timeout, repr(function)))
else:
raise t._error_
class TimeLimited(object):
'''Create a time limited version of any callable.
For example, to limit function f to t seconds,
first create a time limited version of f.
from timelimited import *
f_t = TimeLimited(f, t)
Then, instead of invoking f(...), use f_t like
try:
r = f_t(...)
except TimeLimitExpired:
r = ... # timed out
'''
def __init__(self, function, timeout=None):
'''See function timelimited for a description
of the arguments.
'''
self._function = function
self._timeout = timeout
def __call__(self, *args, **kwds):
'''See function timelimited for a description
of the behavior.
'''
return timelimited(self._timeout, self._function, *args, **kwds)
def __str__(self):
return '<%s of %r, timeout=%s>' % (repr(self)[1:-1], self._function, self._timeout)
def _timeout_get(self):
return self._timeout
def _timeout_set(self, timeout):
self._timeout = timeout
timeout = property(_timeout_get, _timeout_set, None,
'Property to get and set the timeout value')
if __name__ == '__main__':
import sys, time, threading #PYCHOK expected
_format = '%s test %%d/8 %%s in Python %s: %%s' % (
sys.argv[0], sys.version.split()[0])
_tests = 0
def passed(arg='OK'):
global _tests
_tests += 1
print(_format % (_tests, 'passed', arg))
def failed(fmt, *args):
global _tests
_tests += 1
if args:
t = fmt % args
else:
t = fmt
print(_format % (_tests, 'failed', t))
def check(timeout, sleep, result, arg='OK'):
if timeout > sleep:
x = None # time.sleep(0) result
elif isinstance(result, TimeLimitExpired):
x = result
else:
x = TimeLimitExpired
if result is x:
passed(arg)
else:
failed('expected %r, but got %r', x, result)
# check timelimited function
for t, s in ((2.0, 1),
(1.0, 20)): # note, 20!
try:
r = timelimited(t, time.sleep, s)
except Exception, e: #XXX as for Python 3.0
r = e
check(t, s, r, timelimited)
# check TimeLimited class and property
f = TimeLimited(time.sleep)
for t, s in ((2.0, 1),
(1.0, 20)): # note, 20!
f.timeout = t
try:
r = f(s)
except Exception, e: #XXX as for Python 3.0
r = e
check(t, s, r, f)
# check TypeError
try:
t = timelimited(0, None)
failed('no %r', TypeError)
except TypeError:
passed(TypeError)
except:
failed('expected %r', TypeError)
# check ValueError
try:
t = timelimited(-10, time.time)
failed('no %r', ValueError)
except ValueError:
passed(ValueError)
except:
failed('expected %r', ValueError)
# check error passing from thread
try:
r = timelimited(1, lambda x: 1/x, 0)
failed('no %r', ZeroDivisionError)
except ZeroDivisionError:
passed(ZeroDivisionError)
except:
failed('expected %r', ZeroDivisionError)
# check that all created threads stopped
for t in threading.enumerate():
if t.isAlive() and repr(t).startswith('<_Timelimited('):
failed('thread %r still alive', t)
break
else:
passed('all _Timelimited threads stopped')
| 29.979675 | 91 | 0.603254 |
'''This module exposes function timelimited and two
classes TimeLimited and TimeLimitExpired.
Function timelimited can be used to invoke any
callable object with a time limit.
Class TimeLimited wraps any callable object into a
time limited callable with an equivalent signature.
Beware, any critical resources like locks, memory or
files, etc. acquired or opened by the callable may
not be released respectively closed. Therefore,
time limiting such callables may cause deadlock or
leaks or both.
No signals or timers are affected and any errors are
propagated as usual. Decorators and with statements
are avoided for backward compatibility.
Tested with Python 2.2.3, 2.3.7, 2.4.5, 2.5.2, 2.6.2
or 3.0.1 on CentOS 4.7, MacOS X 10.4.11 Tiger (Intel)
and 10.3.9 Panther (PPC), Solaris 10 and Windows XP.
Note, for Python 3.0 and beyond, replace ', e:' with
' as e:' in the 3 except lines marked #XXX below or
run the Python 2to3 translator on this file, see
<http://docs.python.org/dev/3.1/library/2to3.html>
The core of the function timelimited is copied from
<http://code.activestate.com/recipes/473878/>.
'''
__all__ = ('timelimited', 'TimeLimited', 'TimeLimitExpired')
__version__ = '4 2009-06-08'
from threading import Thread
_stop
except AttributeError:
_Thread_stop = Thread._stop
class TimeLimitExpired(Exception):
'''Exception raised when time limit expires.
'''
pass
def timelimited(timeout, function, *args, **kwds):
'''Invoke the given function with the positional and
keyword arguments under a time constraint.
The function result is returned if the function
finishes within the given time limit, otherwise
a TimeLimitExpired error is raised.
The timeout value is in seconds and has the same
resolution as the standard time.time function. A
timeout value of None invokes the given function
without imposing any time limit.
A TypeError is raised if function is not callable,
a ValueError is raised for negative timeout values
and any errors occurring inside the function are
passed along as-is.
'''
class _Timelimited(Thread):
_error_ = TimeLimitExpired
_result_ = None
def run(self):
try:
self._result_ = function(*args, **kwds)
self._error_ = None
except Exception, e:
self._error_ = e
def _stop(self):
if self.isAlive():
_Thread_stop(self)
if not hasattr(function, '__call__'):
raise TypeError('function not callable: %s' % repr(function))
if timeout is None:
return function(*args, **kwds)
if timeout < 0:
raise ValueError('timeout invalid: %s' % repr(timeout))
t = _Timelimited()
t.start()
t.join(timeout)
if t._error_ is None:
return t._result_
if t._error_ is TimeLimitExpired:
t._stop()
raise TimeLimitExpired('timeout %r for %s' % (timeout, repr(function)))
else:
raise t._error_
class TimeLimited(object):
'''Create a time limited version of any callable.
For example, to limit function f to t seconds,
first create a time limited version of f.
from timelimited import *
f_t = TimeLimited(f, t)
Then, instead of invoking f(...), use f_t like
try:
r = f_t(...)
except TimeLimitExpired:
r = ... # timed out
'''
def __init__(self, function, timeout=None):
'''See function timelimited for a description
of the arguments.
'''
self._function = function
self._timeout = timeout
def __call__(self, *args, **kwds):
'''See function timelimited for a description
of the behavior.
'''
return timelimited(self._timeout, self._function, *args, **kwds)
def __str__(self):
return '<%s of %r, timeout=%s>' % (repr(self)[1:-1], self._function, self._timeout)
def _timeout_get(self):
return self._timeout
def _timeout_set(self, timeout):
self._timeout = timeout
timeout = property(_timeout_get, _timeout_set, None,
'Property to get and set the timeout value')
if __name__ == '__main__':
import sys, time, threading
_format = '%s test %%d/8 %%s in Python %s: %%s' % (
sys.argv[0], sys.version.split()[0])
_tests = 0
def passed(arg='OK'):
global _tests
_tests += 1
print(_format % (_tests, 'passed', arg))
def failed(fmt, *args):
global _tests
_tests += 1
if args:
t = fmt % args
else:
t = fmt
print(_format % (_tests, 'failed', t))
def check(timeout, sleep, result, arg='OK'):
if timeout > sleep:
x = None
elif isinstance(result, TimeLimitExpired):
x = result
else:
x = TimeLimitExpired
if result is x:
passed(arg)
else:
failed('expected %r, but got %r', x, result)
for t, s in ((2.0, 1),
(1.0, 20)):
try:
r = timelimited(t, time.sleep, s)
except Exception, e:
r = e
check(t, s, r, timelimited)
f = TimeLimited(time.sleep)
for t, s in ((2.0, 1),
(1.0, 20)):
f.timeout = t
try:
r = f(s)
except Exception, e:
r = e
check(t, s, r, f)
try:
t = timelimited(0, None)
failed('no %r', TypeError)
except TypeError:
passed(TypeError)
except:
failed('expected %r', TypeError)
try:
t = timelimited(-10, time.time)
failed('no %r', ValueError)
except ValueError:
passed(ValueError)
except:
failed('expected %r', ValueError)
try:
r = timelimited(1, lambda x: 1/x, 0)
failed('no %r', ZeroDivisionError)
except ZeroDivisionError:
passed(ZeroDivisionError)
except:
failed('expected %r', ZeroDivisionError)
for t in threading.enumerate():
if t.isAlive() and repr(t).startswith('<_Timelimited('):
failed('thread %r still alive', t)
break
else:
passed('all _Timelimited threads stopped')
| false | true |
f72b17196b95f01f3e9c02c59d337099f3b510e2 | 18,401 | py | Python | fedlearner/trainer/estimator.py | bruinxiong/fedlearner | 9cdeaf44b279acedd5bc88bbffd4a390697b06aa | [
"Apache-2.0"
] | 1 | 2020-12-02T09:51:29.000Z | 2020-12-02T09:51:29.000Z | fedlearner/trainer/estimator.py | bruinxiong/fedlearner | 9cdeaf44b279acedd5bc88bbffd4a390697b06aa | [
"Apache-2.0"
] | null | null | null | fedlearner/trainer/estimator.py | bruinxiong/fedlearner | 9cdeaf44b279acedd5bc88bbffd4a390697b06aa | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import os
import logging
import time
import tensorflow.compat.v1 as tf
from tensorflow.compat import as_str_any
from tensorflow.compat.v1.train import Optimizer
from tensorflow.compat.v1.estimator import ModeKeys
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from fedlearner.common.mysql_client import DBClient
from fedlearner.common.summary_hook import SummaryHook
from fedlearner.trainer import patch # pylint: disable=unused-import
from fedlearner.common import metrics
from fedlearner.data_join.common import get_kvstore_config
SYNC_PATH = '/sync/'
DATA_CHECKPOINT_INIT_VALUE = "_init_value"
class DataCheckpointSaverListener(tf.estimator.CheckpointSaverListener):
def __init__(self, tm, appid):
self._trainer_master = tm
self._application_id = appid
def begin(self):
ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd")
var_tmp = tf.Variable(DATA_CHECKPOINT_INIT_VALUE, \
name="data_checkpoint")
self._ckpt_tensor = var_tmp.assign(ckpt)
def before_save(self, session, global_step_value):
logging.info('About to write a checkpoint at step %d', \
global_step_value)
data_checkpoint = self._trainer_master.get_data_block_checkpoint(
self._application_id)
#if empty block from checkpoint fetched due to exception or
# master not ready, no need to save.
if len(data_checkpoint) == 0:
return
res = session.run(self._ckpt_tensor, {"data_checkpoint_plhd:0":
",".join(data_checkpoint)})
logging.info("data checkpoint saved result: %s", res)
class FLModel(object):
def __init__(self, role, bridge, example_ids, exporting=False):
self._role = role
self._bridge = bridge
self._example_ids = example_ids
self._exporting = exporting
self._train_ops = []
self._recvs = []
self._sends = []
self._outputs = []
@property
def train_ops(self):
return self._train_ops
@property
def sends(self):
return [(n, t) for n, t, _ in self._sends]
@property
def recvs(self):
return [(n, t) for n, t, _ in self._recvs]
def verify_example_ids(self):
tensor = tf.strings.to_hash_bucket_fast(self._example_ids, 2**31 - 1)
if self._role == 'leader':
self.send('_verify_example_ids', tensor)
else:
recv_tensor = self.recv('_verify_example_ids', tensor.dtype)
op = tf.assert_equal(tensor, recv_tensor)
self._train_ops.append(op)
def send(self, name, tensor, require_grad=False):
with tf.control_dependencies([self._example_ids]):
op = self._bridge.send_op(name, tensor)
self._train_ops.append(op)
self._sends.append((name, tensor, require_grad))
if require_grad:
return self.recv(name + '_grad', tensor.dtype)
return None
def recv(self, name, dtype=tf.float32, require_grad=False):
with tf.control_dependencies([self._example_ids]):
tensor = self._bridge.receive_op(name, dtype)
self._recvs.append((name, tensor, require_grad))
return tensor
def minimize(self,
optimizer,
loss,
global_step=None,
var_list=None,
gate_gradients=Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
recv_grads = [i for i in self._recvs if i[2]]
if var_list is None:
var_list = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + \
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)
var_list = [v for _, v, _ in recv_grads] + var_list
grads_and_vars = optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
send_grads = grads_and_vars[:len(recv_grads)]
for (n, _, _), (grad, _) in zip(recv_grads, send_grads):
if grad is not None:
self.send(n + '_grad', grad)
if grads_and_vars[len(recv_grads):]:
train_op = optimizer.apply_gradients(
grads_and_vars[len(recv_grads):],
global_step=global_step,
name=name)
else:
train_op = tf.no_op()
return train_op
def _append_summary_hook(self, training_hooks):
if not training_hooks:
training_hooks = []
summary_hook = SummaryHook.get_hook()
if summary_hook:
training_hooks.append(summary_hook)
return training_hooks
def make_spec(self,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
training_chief_hooks=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
if isinstance(predictions, tf.Tensor):
predictions = {'output': predictions}
if mode == ModeKeys.TRAIN:
train_op = tf.group([train_op] + self._train_ops)
training_hooks = self._append_summary_hook(training_hooks)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
class FLEstimator(object):
def __init__(self,
model_fn,
bridge,
trainer_master,
role,
worker_rank=0,
application_id=None,
cluster_spec=None):
self._model_fn = model_fn
self._bridge = bridge
self._trainer_master = trainer_master
self._role = role
self._worker_rank = worker_rank
self._cluster_spec = cluster_spec
self._application_id = application_id
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
dataset = input_fn(self._bridge, self._trainer_master)
features, labels = dataset.make_one_shot_iterator().get_next()
return features, labels
def _get_model_spec(self, features, labels, mode):
model = FLModel(self._role, self._bridge,
features.get('example_id', None),
exporting=(mode == ModeKeys.PREDICT))
spec = self._model_fn(model, features, labels, mode)
return spec, model
def _restore_datablock(self, blk_ids):
# only chief worker restores from checkpoint.
if self._worker_rank != 0 or blk_ids is None:
return True
block_id_str = as_str_any(blk_ids)
block_ids = []
if block_id_str != DATA_CHECKPOINT_INIT_VALUE:
block_ids = block_id_str.split(",")
logging.info("restore: %s", block_id_str)
return self._trainer_master.restore_data_block_checkpoint(
self._application_id, block_ids)
def _cheif_barriar(self, is_chief=False, sync_times=300):
worker_replicas = os.environ.get('REPLICA_NUM', 0)
kvstore_type = os.environ.get('KVSTORE_TYPE', 'etcd')
db_database, db_addr, db_username, db_password, _ = \
get_kvstore_config(kvstore_type)
kvstore_client = DBClient(db_database,
db_addr,
db_username,
db_password,
SYNC_PATH)
sync_path = '%s/%s' % (os.environ['APPLICATION_ID'],
os.environ['WORKER_RANK'])
logging.info('Creating a sync flag at %s', sync_path)
kvstore_client.set_data(sync_path, "1")
if is_chief:
for _ in range(sync_times):
sync_list = kvstore_client.get_prefix_kvs(
os.environ['APPLICATION_ID'])
logging.info('Sync file pattern is: %s', sync_list)
if len(sync_list) < worker_replicas:
logging.info('Count of ready workers is %d',
len(sync_list))
time.sleep(6)
else:
break
def train(self,
input_fn,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None):
if self._cluster_spec is not None:
device_fn = tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % self._worker_rank,
merge_devices=True,
cluster=self._cluster_spec)
cluster_def = self._cluster_spec.as_cluster_def()
local_address = self._cluster_spec.job_tasks('worker')[
self._worker_rank]
server = tf.train.Server(tf.train.ClusterSpec(
{'local': {
0: local_address
}}),
job_name='local',
task_index=0)
target = 'grpc://' + local_address
else:
device_fn = None
cluster_def = None
target = None
config = tf.ConfigProto(cluster_def=cluster_def)
config.inter_op_parallelism_threads = 4
config.intra_op_parallelism_threads = 4
config.experimental.share_session_state_in_clusterspec_propagation \
= True
tf.config.set_soft_device_placement(False)
with tf.Graph().as_default() as g:
with tf.device(device_fn):
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.TRAIN)
spec, _ = self._get_model_spec(features, labels, ModeKeys.TRAIN)
# Explicitly add a Saver
if not tf.get_collection(tf.GraphKeys.SAVERS):
saver = tf.train.Saver(
sharded=True,
defer_build=True,
save_relative_paths=True) # Must set for portability
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
listener = DataCheckpointSaverListener(self._trainer_master,
self._application_id)
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_path, save_secs=save_checkpoint_secs,
save_steps=save_checkpoint_steps, listeners=[listener])
self._bridge.connect()
try:
with tf.train.MonitoredTrainingSession(
master=target,
config=config,
is_chief=(self._worker_rank == 0),
chief_only_hooks=[saver_hook],
checkpoint_dir=checkpoint_path,
save_checkpoint_steps=save_checkpoint_steps,
save_checkpoint_secs=save_checkpoint_secs,
hooks=spec.training_hooks) as sess:
iter_id = 0
data_checkpoint_value = None
if hasattr(saver_hook, "data_checkpoint"):
data_checkpoint_value = saver_hook.data_checkpoint
if not self._restore_datablock(data_checkpoint_value):
raise ValueError("Restore data checkpoint error")
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(spec.train_op, feed_dict={})
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
return self
def evaluate(self,
input_fn,
checkpoint_path=None):
if not tf.train.latest_checkpoint(checkpoint_path):
raise ValueError(
"Could not find trained model at %s" % checkpoint_path)
with tf.Graph().as_default():
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.EVAL)
spec, model = self._get_model_spec(features, labels, ModeKeys.EVAL)
# Track the average loss in default
eval_metric_ops = spec.eval_metric_ops or {}
if model_fn_lib.LOSS_METRIC_KEY not in eval_metric_ops:
loss_metric = tf.metrics.mean(spec.loss)
eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric
# Create the real eval op
update_ops, eval_dict = _extract_metric_update_ops(eval_metric_ops)
update_ops.extend(model._train_ops)
eval_op = tf.group(*update_ops)
# Also track the global step
if tf.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because '
'Estimator already defines a default metric with the '
'same name.')
eval_dict[tf.GraphKeys.GLOBAL_STEP] = \
tf.train.get_or_create_global_step()
# Prepare the session creator.
scaffold = tf.train.Scaffold()
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_path)
# Prepare hooks
all_hooks = list(spec.evaluation_hooks) or []
final_ops_hook = tf.train.FinalOpsHook(eval_dict)
all_hooks.append(final_ops_hook)
# Evaluate over dataset
self._bridge.connect()
try:
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=all_hooks) as sess:
if not self._restore_datablock(DATA_CHECKPOINT_INIT_VALUE):
raise ValueError("Restore data checkpoint error")
iter_id = 0
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(eval_op)
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
# Print result
logging.info('Metrics for iteration %d: %s',
iter_id, _dict_to_str(final_ops_hook.final_ops_values))
return final_ops_hook.final_ops_values
def export_saved_model(self,
export_dir_base,
serving_input_receiver_fn,
checkpoint_path=None):
with tf.Graph().as_default():
receiver = serving_input_receiver_fn()
spec, model = self._get_model_spec(receiver.features, None,
ModeKeys.PREDICT)
assert not model.sends, "Exported model cannot send"
assert not model.recvs, "Exported model cannot receive"
with tf.Session() as sess:
saver_for_restore = tf.train.Saver(sharded=True)
saver_for_restore.restore(
sess, tf.train.latest_checkpoint(checkpoint_path))
tf.saved_model.simple_save(sess, export_dir_base,
receiver.receiver_tensors,
spec.predictions, None)
return export_dir_base
def _extract_metric_update_ops(eval_dict):
"""Separate update operations from metric value operations."""
update_ops = []
value_ops = {}
# Sort metrics lexicographically so graph is identical every time.
for name in sorted(eval_dict.keys()):
metric_tensor, update_op = eval_dict[name]
value_ops[name] = metric_tensor
update_ops.append(update_op)
return update_ops, value_ops
def _dict_to_str(dictionary):
"""Get a `str` representation of a `dict`.
Args:
dictionary: The `dict` to be represented as `str`.
Returns:
A `str` representing the `dictionary`.
"""
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(dictionary.items())
if not isinstance(v, bytes))
| 39.915401 | 80 | 0.577469 |
import os
import logging
import time
import tensorflow.compat.v1 as tf
from tensorflow.compat import as_str_any
from tensorflow.compat.v1.train import Optimizer
from tensorflow.compat.v1.estimator import ModeKeys
from tensorflow_estimator.python.estimator import model_fn as model_fn_lib
from fedlearner.common.mysql_client import DBClient
from fedlearner.common.summary_hook import SummaryHook
from fedlearner.trainer import patch
from fedlearner.common import metrics
from fedlearner.data_join.common import get_kvstore_config
SYNC_PATH = '/sync/'
DATA_CHECKPOINT_INIT_VALUE = "_init_value"
class DataCheckpointSaverListener(tf.estimator.CheckpointSaverListener):
def __init__(self, tm, appid):
self._trainer_master = tm
self._application_id = appid
def begin(self):
ckpt = tf.placeholder(tf.string, name="data_checkpoint_plhd")
var_tmp = tf.Variable(DATA_CHECKPOINT_INIT_VALUE, \
name="data_checkpoint")
self._ckpt_tensor = var_tmp.assign(ckpt)
def before_save(self, session, global_step_value):
logging.info('About to write a checkpoint at step %d', \
global_step_value)
data_checkpoint = self._trainer_master.get_data_block_checkpoint(
self._application_id)
if len(data_checkpoint) == 0:
return
res = session.run(self._ckpt_tensor, {"data_checkpoint_plhd:0":
",".join(data_checkpoint)})
logging.info("data checkpoint saved result: %s", res)
class FLModel(object):
def __init__(self, role, bridge, example_ids, exporting=False):
self._role = role
self._bridge = bridge
self._example_ids = example_ids
self._exporting = exporting
self._train_ops = []
self._recvs = []
self._sends = []
self._outputs = []
@property
def train_ops(self):
return self._train_ops
@property
def sends(self):
return [(n, t) for n, t, _ in self._sends]
@property
def recvs(self):
return [(n, t) for n, t, _ in self._recvs]
def verify_example_ids(self):
tensor = tf.strings.to_hash_bucket_fast(self._example_ids, 2**31 - 1)
if self._role == 'leader':
self.send('_verify_example_ids', tensor)
else:
recv_tensor = self.recv('_verify_example_ids', tensor.dtype)
op = tf.assert_equal(tensor, recv_tensor)
self._train_ops.append(op)
def send(self, name, tensor, require_grad=False):
with tf.control_dependencies([self._example_ids]):
op = self._bridge.send_op(name, tensor)
self._train_ops.append(op)
self._sends.append((name, tensor, require_grad))
if require_grad:
return self.recv(name + '_grad', tensor.dtype)
return None
def recv(self, name, dtype=tf.float32, require_grad=False):
with tf.control_dependencies([self._example_ids]):
tensor = self._bridge.receive_op(name, dtype)
self._recvs.append((name, tensor, require_grad))
return tensor
def minimize(self,
optimizer,
loss,
global_step=None,
var_list=None,
gate_gradients=Optimizer.GATE_OP,
aggregation_method=None,
colocate_gradients_with_ops=False,
name=None,
grad_loss=None):
recv_grads = [i for i in self._recvs if i[2]]
if var_list is None:
var_list = \
tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES) + \
tf.get_collection(tf.GraphKeys.TRAINABLE_RESOURCE_VARIABLES)
var_list = [v for _, v, _ in recv_grads] + var_list
grads_and_vars = optimizer.compute_gradients(
loss,
var_list=var_list,
gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
send_grads = grads_and_vars[:len(recv_grads)]
for (n, _, _), (grad, _) in zip(recv_grads, send_grads):
if grad is not None:
self.send(n + '_grad', grad)
if grads_and_vars[len(recv_grads):]:
train_op = optimizer.apply_gradients(
grads_and_vars[len(recv_grads):],
global_step=global_step,
name=name)
else:
train_op = tf.no_op()
return train_op
def _append_summary_hook(self, training_hooks):
if not training_hooks:
training_hooks = []
summary_hook = SummaryHook.get_hook()
if summary_hook:
training_hooks.append(summary_hook)
return training_hooks
def make_spec(self,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
training_chief_hooks=None,
training_hooks=None,
evaluation_hooks=None,
prediction_hooks=None):
if isinstance(predictions, tf.Tensor):
predictions = {'output': predictions}
if mode == ModeKeys.TRAIN:
train_op = tf.group([train_op] + self._train_ops)
training_hooks = self._append_summary_hook(training_hooks)
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
evaluation_hooks=evaluation_hooks,
prediction_hooks=prediction_hooks)
class FLEstimator(object):
def __init__(self,
model_fn,
bridge,
trainer_master,
role,
worker_rank=0,
application_id=None,
cluster_spec=None):
self._model_fn = model_fn
self._bridge = bridge
self._trainer_master = trainer_master
self._role = role
self._worker_rank = worker_rank
self._cluster_spec = cluster_spec
self._application_id = application_id
def _get_features_and_labels_from_input_fn(self, input_fn, mode):
dataset = input_fn(self._bridge, self._trainer_master)
features, labels = dataset.make_one_shot_iterator().get_next()
return features, labels
def _get_model_spec(self, features, labels, mode):
model = FLModel(self._role, self._bridge,
features.get('example_id', None),
exporting=(mode == ModeKeys.PREDICT))
spec = self._model_fn(model, features, labels, mode)
return spec, model
def _restore_datablock(self, blk_ids):
if self._worker_rank != 0 or blk_ids is None:
return True
block_id_str = as_str_any(blk_ids)
block_ids = []
if block_id_str != DATA_CHECKPOINT_INIT_VALUE:
block_ids = block_id_str.split(",")
logging.info("restore: %s", block_id_str)
return self._trainer_master.restore_data_block_checkpoint(
self._application_id, block_ids)
def _cheif_barriar(self, is_chief=False, sync_times=300):
worker_replicas = os.environ.get('REPLICA_NUM', 0)
kvstore_type = os.environ.get('KVSTORE_TYPE', 'etcd')
db_database, db_addr, db_username, db_password, _ = \
get_kvstore_config(kvstore_type)
kvstore_client = DBClient(db_database,
db_addr,
db_username,
db_password,
SYNC_PATH)
sync_path = '%s/%s' % (os.environ['APPLICATION_ID'],
os.environ['WORKER_RANK'])
logging.info('Creating a sync flag at %s', sync_path)
kvstore_client.set_data(sync_path, "1")
if is_chief:
for _ in range(sync_times):
sync_list = kvstore_client.get_prefix_kvs(
os.environ['APPLICATION_ID'])
logging.info('Sync file pattern is: %s', sync_list)
if len(sync_list) < worker_replicas:
logging.info('Count of ready workers is %d',
len(sync_list))
time.sleep(6)
else:
break
def train(self,
input_fn,
checkpoint_path=None,
save_checkpoint_steps=None,
save_checkpoint_secs=None):
if self._cluster_spec is not None:
device_fn = tf.train.replica_device_setter(
worker_device="/job:worker/task:%d" % self._worker_rank,
merge_devices=True,
cluster=self._cluster_spec)
cluster_def = self._cluster_spec.as_cluster_def()
local_address = self._cluster_spec.job_tasks('worker')[
self._worker_rank]
server = tf.train.Server(tf.train.ClusterSpec(
{'local': {
0: local_address
}}),
job_name='local',
task_index=0)
target = 'grpc://' + local_address
else:
device_fn = None
cluster_def = None
target = None
config = tf.ConfigProto(cluster_def=cluster_def)
config.inter_op_parallelism_threads = 4
config.intra_op_parallelism_threads = 4
config.experimental.share_session_state_in_clusterspec_propagation \
= True
tf.config.set_soft_device_placement(False)
with tf.Graph().as_default() as g:
with tf.device(device_fn):
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.TRAIN)
spec, _ = self._get_model_spec(features, labels, ModeKeys.TRAIN)
if not tf.get_collection(tf.GraphKeys.SAVERS):
saver = tf.train.Saver(
sharded=True,
defer_build=True,
save_relative_paths=True)
tf.add_to_collection(tf.GraphKeys.SAVERS, saver)
listener = DataCheckpointSaverListener(self._trainer_master,
self._application_id)
saver_hook = tf.estimator.CheckpointSaverHook(
checkpoint_path, save_secs=save_checkpoint_secs,
save_steps=save_checkpoint_steps, listeners=[listener])
self._bridge.connect()
try:
with tf.train.MonitoredTrainingSession(
master=target,
config=config,
is_chief=(self._worker_rank == 0),
chief_only_hooks=[saver_hook],
checkpoint_dir=checkpoint_path,
save_checkpoint_steps=save_checkpoint_steps,
save_checkpoint_secs=save_checkpoint_secs,
hooks=spec.training_hooks) as sess:
iter_id = 0
data_checkpoint_value = None
if hasattr(saver_hook, "data_checkpoint"):
data_checkpoint_value = saver_hook.data_checkpoint
if not self._restore_datablock(data_checkpoint_value):
raise ValueError("Restore data checkpoint error")
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(spec.train_op, feed_dict={})
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
return self
def evaluate(self,
input_fn,
checkpoint_path=None):
if not tf.train.latest_checkpoint(checkpoint_path):
raise ValueError(
"Could not find trained model at %s" % checkpoint_path)
with tf.Graph().as_default():
features, labels = self._get_features_and_labels_from_input_fn(
input_fn, ModeKeys.EVAL)
spec, model = self._get_model_spec(features, labels, ModeKeys.EVAL)
eval_metric_ops = spec.eval_metric_ops or {}
if model_fn_lib.LOSS_METRIC_KEY not in eval_metric_ops:
loss_metric = tf.metrics.mean(spec.loss)
eval_metric_ops[model_fn_lib.LOSS_METRIC_KEY] = loss_metric
update_ops, eval_dict = _extract_metric_update_ops(eval_metric_ops)
update_ops.extend(model._train_ops)
eval_op = tf.group(*update_ops)
if tf.GraphKeys.GLOBAL_STEP in eval_dict:
raise ValueError(
'Metric with name `global_step` is not allowed, because '
'Estimator already defines a default metric with the '
'same name.')
eval_dict[tf.GraphKeys.GLOBAL_STEP] = \
tf.train.get_or_create_global_step()
scaffold = tf.train.Scaffold()
session_creator = tf.train.ChiefSessionCreator(
scaffold=scaffold,
checkpoint_dir=checkpoint_path)
all_hooks = list(spec.evaluation_hooks) or []
final_ops_hook = tf.train.FinalOpsHook(eval_dict)
all_hooks.append(final_ops_hook)
self._bridge.connect()
try:
with tf.train.MonitoredSession(
session_creator=session_creator, hooks=all_hooks) as sess:
if not self._restore_datablock(DATA_CHECKPOINT_INIT_VALUE):
raise ValueError("Restore data checkpoint error")
iter_id = 0
while not sess.should_stop():
self._bridge.start(iter_id)
logging.debug('after bridge start.')
start_time = time.time()
sess.run(eval_op)
end_time = time.time()
metrics.emit_timer(
name="iter_timer",
value=end_time-start_time,
tags={})
logging.debug('after session run.')
self._bridge.commit()
logging.debug('after bridge commit.')
iter_id += 1
finally:
self._bridge.terminate()
logging.info('Metrics for iteration %d: %s',
iter_id, _dict_to_str(final_ops_hook.final_ops_values))
return final_ops_hook.final_ops_values
def export_saved_model(self,
export_dir_base,
serving_input_receiver_fn,
checkpoint_path=None):
with tf.Graph().as_default():
receiver = serving_input_receiver_fn()
spec, model = self._get_model_spec(receiver.features, None,
ModeKeys.PREDICT)
assert not model.sends, "Exported model cannot send"
assert not model.recvs, "Exported model cannot receive"
with tf.Session() as sess:
saver_for_restore = tf.train.Saver(sharded=True)
saver_for_restore.restore(
sess, tf.train.latest_checkpoint(checkpoint_path))
tf.saved_model.simple_save(sess, export_dir_base,
receiver.receiver_tensors,
spec.predictions, None)
return export_dir_base
def _extract_metric_update_ops(eval_dict):
update_ops = []
value_ops = {}
for name in sorted(eval_dict.keys()):
metric_tensor, update_op = eval_dict[name]
value_ops[name] = metric_tensor
update_ops.append(update_op)
return update_ops, value_ops
def _dict_to_str(dictionary):
return ', '.join('%s = %s' % (k, v)
for k, v in sorted(dictionary.items())
if not isinstance(v, bytes))
| true | true |
f72b173c37bf64ae1456501212bb02ffe852962a | 2,398 | py | Python | sdk/python/pulumi_azure_native/azurestack/v20200601preview/get_registration_activation_key.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/azurestack/v20200601preview/get_registration_activation_key.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/azurestack/v20200601preview/get_registration_activation_key.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetRegistrationActivationKeyResult',
'AwaitableGetRegistrationActivationKeyResult',
'get_registration_activation_key',
]
@pulumi.output_type
class GetRegistrationActivationKeyResult:
"""
The resource containing the Azure Stack activation key.
"""
def __init__(__self__, activation_key=None):
if activation_key and not isinstance(activation_key, str):
raise TypeError("Expected argument 'activation_key' to be a str")
pulumi.set(__self__, "activation_key", activation_key)
@property
@pulumi.getter(name="activationKey")
def activation_key(self) -> Optional[str]:
"""
Azure Stack activation key.
"""
return pulumi.get(self, "activation_key")
class AwaitableGetRegistrationActivationKeyResult(GetRegistrationActivationKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegistrationActivationKeyResult(
activation_key=self.activation_key)
def get_registration_activation_key(registration_name: Optional[str] = None,
resource_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistrationActivationKeyResult:
"""
The resource containing the Azure Stack activation key.
:param str registration_name: Name of the Azure Stack registration.
:param str resource_group: Name of the resource group.
"""
__args__ = dict()
__args__['registrationName'] = registration_name
__args__['resourceGroup'] = resource_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestack/v20200601preview:getRegistrationActivationKey', __args__, opts=opts, typ=GetRegistrationActivationKeyResult).value
return AwaitableGetRegistrationActivationKeyResult(
activation_key=__ret__.activation_key)
| 36.333333 | 175 | 0.710592 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetRegistrationActivationKeyResult',
'AwaitableGetRegistrationActivationKeyResult',
'get_registration_activation_key',
]
@pulumi.output_type
class GetRegistrationActivationKeyResult:
def __init__(__self__, activation_key=None):
if activation_key and not isinstance(activation_key, str):
raise TypeError("Expected argument 'activation_key' to be a str")
pulumi.set(__self__, "activation_key", activation_key)
@property
@pulumi.getter(name="activationKey")
def activation_key(self) -> Optional[str]:
return pulumi.get(self, "activation_key")
class AwaitableGetRegistrationActivationKeyResult(GetRegistrationActivationKeyResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetRegistrationActivationKeyResult(
activation_key=self.activation_key)
def get_registration_activation_key(registration_name: Optional[str] = None,
resource_group: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRegistrationActivationKeyResult:
__args__ = dict()
__args__['registrationName'] = registration_name
__args__['resourceGroup'] = resource_group
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:azurestack/v20200601preview:getRegistrationActivationKey', __args__, opts=opts, typ=GetRegistrationActivationKeyResult).value
return AwaitableGetRegistrationActivationKeyResult(
activation_key=__ret__.activation_key)
| true | true |
f72b176a16e94f285b596a275b3c38e265d42aba | 11,711 | py | Python | onmt/model_builder.py | Nazukixv/OpenNMT-py | 6265ddbbe9053b018714ac1fb4be9ec8adbaa128 | [
"MIT"
] | 11 | 2019-11-22T16:46:36.000Z | 2021-07-17T04:06:14.000Z | onmt/model_builder.py | Nazukixv/OpenNMT-py | 6265ddbbe9053b018714ac1fb4be9ec8adbaa128 | [
"MIT"
] | 3 | 2019-11-11T05:40:10.000Z | 2020-03-05T14:04:38.000Z | onmt/model_builder.py | Nazukixv/OpenNMT-py | 6265ddbbe9053b018714ac1fb4be9ec8adbaa128 | [
"MIT"
] | 3 | 2020-04-04T12:21:52.000Z | 2022-02-27T13:29:45.000Z | """
This file is for models creation, which consults options
and creates each encoder and decoder accordingly.
"""
import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder
from onmt.decoders.transformer import TransformerDecoder
from onmt.decoders.cnn_decoder import CNNDecoder
from onmt.modules import Embeddings, CopyGenerator
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
"""
Build an Embeddings instance.
Args:
opt: the option in current environment.
word_dict(Vocab): words dictionary.
feature_dicts([Vocab], optional): a list of feature dictionary.
for_encoder(bool): build Embeddings for encoder or decoder?
"""
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam")
def build_encoder(opt, embeddings):
"""
Various encoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this encoder.
"""
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
# "rnn" or "brnn"
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.enc_rnn_size, opt.dropout, embeddings,
opt.bridge)
def build_decoder(opt, embeddings):
"""
Various decoder dispatcher function.
Args:
opt: the option in current environment.
embeddings (Embeddings): vocab embeddings for this decoder.
"""
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
def load_test_model(opt, dummy_opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
fields = inputters.load_fields_from_vocab(
checkpoint['vocab'], data_type=opt.data_type)
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None):
"""
Args:
model_opt: the option loaded from checkpoint.
fields: `Field` objects for the model.
gpu(bool): whether to use gpu.
checkpoint: the model gnerated by train phase, or a resumed snapshot
model from a stopped training.
Returns:
the NMTModel.
"""
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
# for backward compatibility
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
if model_opt.model_type == 'text' and \
model_opt.enc_rnn_size != model_opt.dec_rnn_size:
raise AssertionError("""We do not support different encoder and
decoder rnn sizes for translation now.""")
# Build encoder.
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'src')
src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)
encoder = build_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
if ("image_channel_size" not in model_opt.__dict__):
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
# Build decoder.
tgt_dict = fields["tgt"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = build_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
# Share the embedding matrix - preprocess with share_vocab required.
if model_opt.share_embeddings:
# src/tgt vocab should be the same if `-share_vocab` is specified.
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = build_decoder(model_opt, tgt_embeddings)
# Build NMTModel(= encoder + decoder).
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
# Build Generator.
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt.dec_rnn_size,
fields["tgt"].vocab)
# Load the model states from checkpoint or initialize them.
if checkpoint is not None:
# This preserves backward-compat for models using customed layernorm
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = \
{fix_key(k): v for (k, v) in checkpoint['model'].items()}
# end of patch for backward compatibility
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
# Add generator to model (this registers it as parameter of model).
model.generator = generator
model.to(device)
return model
def build_model(model_opt, opt, fields, checkpoint):
""" Build the Model """
logger.info('Building model...')
model = build_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
logger.info(model)
return model
| 40.663194 | 79 | 0.59252 | import re
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
import onmt.inputters as inputters
import onmt.modules
from onmt.encoders.rnn_encoder import RNNEncoder
from onmt.encoders.transformer import TransformerEncoder
from onmt.encoders.cnn_encoder import CNNEncoder
from onmt.encoders.mean_encoder import MeanEncoder
from onmt.encoders.audio_encoder import AudioEncoder
from onmt.encoders.image_encoder import ImageEncoder
from onmt.decoders.decoder import InputFeedRNNDecoder, StdRNNDecoder
from onmt.decoders.transformer import TransformerDecoder
from onmt.decoders.cnn_decoder import CNNDecoder
from onmt.modules import Embeddings, CopyGenerator
from onmt.utils.misc import use_gpu
from onmt.utils.logging import logger
def build_embeddings(opt, word_dict, feature_dicts, for_encoder=True):
if for_encoder:
embedding_dim = opt.src_word_vec_size
else:
embedding_dim = opt.tgt_word_vec_size
word_padding_idx = word_dict.stoi[inputters.PAD_WORD]
num_word_embeddings = len(word_dict)
feats_padding_idx = [feat_dict.stoi[inputters.PAD_WORD]
for feat_dict in feature_dicts]
num_feat_embeddings = [len(feat_dict) for feat_dict in
feature_dicts]
return Embeddings(word_vec_size=embedding_dim,
position_encoding=opt.position_encoding,
feat_merge=opt.feat_merge,
feat_vec_exponent=opt.feat_vec_exponent,
feat_vec_size=opt.feat_vec_size,
dropout=opt.dropout,
word_padding_idx=word_padding_idx,
feat_padding_idx=feats_padding_idx,
word_vocab_size=num_word_embeddings,
feat_vocab_sizes=num_feat_embeddings,
sparse=opt.optim == "sparseadam")
def build_encoder(opt, embeddings):
if opt.encoder_type == "transformer":
return TransformerEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.heads, opt.transformer_ff,
opt.dropout, embeddings)
elif opt.encoder_type == "cnn":
return CNNEncoder(opt.enc_layers, opt.enc_rnn_size,
opt.cnn_kernel_width,
opt.dropout, embeddings)
elif opt.encoder_type == "mean":
return MeanEncoder(opt.enc_layers, embeddings)
else:
return RNNEncoder(opt.rnn_type, opt.brnn, opt.enc_layers,
opt.enc_rnn_size, opt.dropout, embeddings,
opt.bridge)
def build_decoder(opt, embeddings):
if opt.decoder_type == "transformer":
return TransformerDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.heads, opt.transformer_ff,
opt.global_attention, opt.copy_attn,
opt.self_attn_type,
opt.dropout, embeddings)
elif opt.decoder_type == "cnn":
return CNNDecoder(opt.dec_layers, opt.dec_rnn_size,
opt.global_attention, opt.copy_attn,
opt.cnn_kernel_width, opt.dropout,
embeddings)
elif opt.input_feed:
return InputFeedRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
else:
return StdRNNDecoder(opt.rnn_type, opt.brnn,
opt.dec_layers, opt.dec_rnn_size,
opt.global_attention,
opt.global_attention_function,
opt.coverage_attn,
opt.context_gate,
opt.copy_attn,
opt.dropout,
embeddings,
opt.reuse_copy_attn)
def load_test_model(opt, dummy_opt, model_path=None):
if model_path is None:
model_path = opt.models[0]
checkpoint = torch.load(model_path,
map_location=lambda storage, loc: storage)
fields = inputters.load_fields_from_vocab(
checkpoint['vocab'], data_type=opt.data_type)
model_opt = checkpoint['opt']
for arg in dummy_opt:
if arg not in model_opt:
model_opt.__dict__[arg] = dummy_opt[arg]
model = build_base_model(model_opt, fields, use_gpu(opt), checkpoint)
model.eval()
model.generator.eval()
return fields, model, model_opt
def build_base_model(model_opt, fields, gpu, checkpoint=None):
assert model_opt.model_type in ["text", "img", "audio"], \
("Unsupported model type %s" % (model_opt.model_type))
if model_opt.rnn_size != -1:
model_opt.enc_rnn_size = model_opt.rnn_size
model_opt.dec_rnn_size = model_opt.rnn_size
if model_opt.model_type == 'text' and \
model_opt.enc_rnn_size != model_opt.dec_rnn_size:
raise AssertionError("""We do not support different encoder and
decoder rnn sizes for translation now.""")
if model_opt.model_type == "text":
src_dict = fields["src"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'src')
src_embeddings = build_embeddings(model_opt, src_dict, feature_dicts)
encoder = build_encoder(model_opt, src_embeddings)
elif model_opt.model_type == "img":
if ("image_channel_size" not in model_opt.__dict__):
image_channel_size = 3
else:
image_channel_size = model_opt.image_channel_size
encoder = ImageEncoder(model_opt.enc_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dropout,
image_channel_size)
elif model_opt.model_type == "audio":
encoder = AudioEncoder(model_opt.rnn_type,
model_opt.enc_layers,
model_opt.dec_layers,
model_opt.brnn,
model_opt.enc_rnn_size,
model_opt.dec_rnn_size,
model_opt.audio_enc_pooling,
model_opt.dropout,
model_opt.sample_rate,
model_opt.window_size)
tgt_dict = fields["tgt"].vocab
feature_dicts = inputters.collect_feature_vocabs(fields, 'tgt')
tgt_embeddings = build_embeddings(model_opt, tgt_dict,
feature_dicts, for_encoder=False)
if model_opt.share_embeddings:
if src_dict != tgt_dict:
raise AssertionError('The `-share_vocab` should be set during '
'preprocess if you use share_embeddings!')
tgt_embeddings.word_lut.weight = src_embeddings.word_lut.weight
decoder = build_decoder(model_opt, tgt_embeddings)
device = torch.device("cuda" if gpu else "cpu")
model = onmt.models.NMTModel(encoder, decoder)
if not model_opt.copy_attn:
if model_opt.generator_function == "sparsemax":
gen_func = onmt.modules.sparse_activations.LogSparsemax(dim=-1)
else:
gen_func = nn.LogSoftmax(dim=-1)
generator = nn.Sequential(
nn.Linear(model_opt.dec_rnn_size, len(fields["tgt"].vocab)),
gen_func
)
if model_opt.share_decoder_embeddings:
generator[0].weight = decoder.embeddings.word_lut.weight
else:
generator = CopyGenerator(model_opt.dec_rnn_size,
fields["tgt"].vocab)
if checkpoint is not None:
def fix_key(s):
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.b_2',
r'\1.layer_norm\2.bias', s)
s = re.sub(r'(.*)\.layer_norm((_\d+)?)\.a_2',
r'\1.layer_norm\2.weight', s)
return s
checkpoint['model'] = \
{fix_key(k): v for (k, v) in checkpoint['model'].items()}
model.load_state_dict(checkpoint['model'], strict=False)
generator.load_state_dict(checkpoint['generator'], strict=False)
else:
if model_opt.param_init != 0.0:
for p in model.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
for p in generator.parameters():
p.data.uniform_(-model_opt.param_init, model_opt.param_init)
if model_opt.param_init_glorot:
for p in model.parameters():
if p.dim() > 1:
xavier_uniform_(p)
for p in generator.parameters():
if p.dim() > 1:
xavier_uniform_(p)
if hasattr(model.encoder, 'embeddings'):
model.encoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_enc, model_opt.fix_word_vecs_enc)
if hasattr(model.decoder, 'embeddings'):
model.decoder.embeddings.load_pretrained_vectors(
model_opt.pre_word_vecs_dec, model_opt.fix_word_vecs_dec)
model.generator = generator
model.to(device)
return model
def build_model(model_opt, opt, fields, checkpoint):
logger.info('Building model...')
model = build_base_model(model_opt, fields,
use_gpu(opt), checkpoint)
logger.info(model)
return model
| true | true |
f72b18ac7bf95dbe78dbadf8c1485e348aca0705 | 870 | py | Python | code/extractWAVdata.py | eepsmedia/ping-pong-bounce | 8e06363032da88976f14146704af26d9312d195a | [
"MIT"
] | null | null | null | code/extractWAVdata.py | eepsmedia/ping-pong-bounce | 8e06363032da88976f14146704af26d9312d195a | [
"MIT"
] | null | null | null | code/extractWAVdata.py | eepsmedia/ping-pong-bounce | 8e06363032da88976f14146704af26d9312d195a | [
"MIT"
] | null | null | null | """Convert a .wav file to .csv
Uses the `wave` package to convert a .wav file to a .csv.
Assumes that the file is monoaural (one channel).
Be sure to edit the code to point to correct values of `inFileName` and `outFileName`
"""
import wave
import numpy
inFileName = "../data/pingpong.wav"
outFileName = '../data/pingpong raw redux.csv'
f = wave.open(inFileName, 'rb')
params = f.getparams()
print("There are {} frames.".format(params.nframes))
bytesData = f.readframes(params.nframes)
f.close()
a = numpy.frombuffer(bytesData, dtype=numpy.dtype('i2')) # answer is an ndarray
i = 0
with open(outFileName, 'w') as out:
out.write('time, sound\n')
for val in a:
time = 1000 * i / params.framerate # milliseconds
theLine = '{:g}, {:g}\n'.format(time, val)
out.write(theLine)
i += 1
print("Wrote {} frames.".format(i))
| 22.894737 | 85 | 0.658621 |
import wave
import numpy
inFileName = "../data/pingpong.wav"
outFileName = '../data/pingpong raw redux.csv'
f = wave.open(inFileName, 'rb')
params = f.getparams()
print("There are {} frames.".format(params.nframes))
bytesData = f.readframes(params.nframes)
f.close()
a = numpy.frombuffer(bytesData, dtype=numpy.dtype('i2'))
i = 0
with open(outFileName, 'w') as out:
out.write('time, sound\n')
for val in a:
time = 1000 * i / params.framerate
theLine = '{:g}, {:g}\n'.format(time, val)
out.write(theLine)
i += 1
print("Wrote {} frames.".format(i))
| true | true |
f72b18b4de5b0fdf2cba2aac9ddd50531ba9f7c0 | 2,145 | py | Python | setup.py | hindman/short-con | 45242757ab50a3b8b8b9826704a58006f918955d | [
"MIT"
] | null | null | null | setup.py | hindman/short-con | 45242757ab50a3b8b8b9826704a58006f918955d | [
"MIT"
] | null | null | null | setup.py | hindman/short-con | 45242757ab50a3b8b8b9826704a58006f918955d | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from os.path import dirname, realpath, join
from setuptools import setup, find_packages
import sys
####
# Basic project info.
####
project_name = 'short-con'
package_name = project_name.replace('-', '_')
repo_name = project_name
description = 'Constants collections without boilerplate'
url = 'https://github.com/hindman/' + repo_name
author = 'Monty Hindman'
author_email = 'mhindman@gmail.com'
license = 'MIT'
src_subdir = 'src'
project_dir = dirname(realpath(__file__))
####
# Requirements.
####
reqs = [
'attrs',
'six',
]
extras = {
'test' : [
'pytest',
'pytest-cov',
'tox',
],
'dev' : [
'invoke',
'ipython' if sys.version_info.major > 2 else 'ipython<6.0',
'pycodestyle',
'twine',
'virtualenv',
'virtualenvwrapper',
],
}
####
# Set __version__, long description, and classifiers.
####
version_file = join(project_dir, src_subdir, package_name, 'version.py')
exec(open(version_file).read())
readme_file = join(project_dir, 'README.md')
long_desc = open(readme_file).read()
long_desc_type = 'text/markdown'
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
]
####
# Packages and scripts.
####
packages = find_packages(where = src_subdir)
package_data = {
package_name: [],
}
####
# Install.
####
setup(
name = project_name,
version = __version__,
author = author,
author_email = author_email,
url = url,
description = description,
zip_safe = False,
packages = packages,
package_dir = {'': src_subdir},
package_data = package_data,
install_requires = reqs,
tests_require = extras['test'],
extras_require = extras,
license = license,
long_description = long_desc,
long_description_content_type = long_desc_type,
classifiers = classifiers,
)
| 21.029412 | 72 | 0.635897 |
from os.path import dirname, realpath, join
from setuptools import setup, find_packages
import sys
name = 'short-con'
package_name = project_name.replace('-', '_')
repo_name = project_name
description = 'Constants collections without boilerplate'
url = 'https://github.com/hindman/' + repo_name
author = 'Monty Hindman'
author_email = 'mhindman@gmail.com'
license = 'MIT'
src_subdir = 'src'
project_dir = dirname(realpath(__file__))
'attrs',
'six',
]
extras = {
'test' : [
'pytest',
'pytest-cov',
'tox',
],
'dev' : [
'invoke',
'ipython' if sys.version_info.major > 2 else 'ipython<6.0',
'pycodestyle',
'twine',
'virtualenv',
'virtualenvwrapper',
],
}
file = join(project_dir, src_subdir, package_name, 'version.py')
exec(open(version_file).read())
readme_file = join(project_dir, 'README.md')
long_desc = open(readme_file).read()
long_desc_type = 'text/markdown'
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development',
]
= find_packages(where = src_subdir)
package_data = {
package_name: [],
}
name = project_name,
version = __version__,
author = author,
author_email = author_email,
url = url,
description = description,
zip_safe = False,
packages = packages,
package_dir = {'': src_subdir},
package_data = package_data,
install_requires = reqs,
tests_require = extras['test'],
extras_require = extras,
license = license,
long_description = long_desc,
long_description_content_type = long_desc_type,
classifiers = classifiers,
)
| true | true |
f72b19bcddea7c052af0ab512ac1b3f2f93a86bf | 112,844 | py | Python | tensorflow/python/ops/variables.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | 1 | 2019-01-14T07:11:06.000Z | 2019-01-14T07:11:06.000Z | tensorflow/python/ops/variables.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/variables.py | m4rkl1u/tensorflow | 90a8825c7ae9719e8969d45040b4155b0e7de130 | [
"Apache-2.0"
] | 2 | 2019-02-26T16:21:15.000Z | 2020-12-04T17:48:17.000Z | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Variable class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import functools
import os
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import compat
from tensorflow.python.util import tf_should_use
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def default_variable_creator(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def default_variable_creator_v2(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def _make_getter(captured_getter, captured_previous):
"""To avoid capturing loop variables."""
def getter(**kwargs):
return captured_getter(captured_previous, **kwargs)
return getter
def _has_cycle(op, path):
"""Detect cycles in the dependencies of `initial_value`."""
if op.name in path:
return True
path.add(op.name)
for op_input in op.inputs:
if _has_cycle(op_input.op, path):
return True
for op_control_input in op.control_inputs:
if _has_cycle(op_control_input, path):
return True
path.remove(op.name)
return False
@tf_export("VariableSynchronization")
class VariableSynchronization(enum.Enum):
"""Indicates when a distributed variable will be synced.
* `AUTO`: Indicates that the synchronization will be determined by the current
`DistributionStrategy` (eg. With `MirroredStrategy` this would be
`ON_WRITE`).
* `NONE`: Indicates that there will only be one copy of the variable, so
there is no need to sync.
* `ON_WRITE`: Indicates that the variable will be updated across devices
every time it is written.
* `ON_READ`: Indicates that the variable will be aggregated across devices
when it is read (eg. when checkpointing or when evaluating an op that uses
the variable).
"""
AUTO = 0
NONE = 1
ON_WRITE = 2
ON_READ = 3
@tf_export("VariableAggregation", v1=[])
class VariableAggregationV2(enum.Enum):
"""Indicates how a distributed variable will be aggregated.
`tf.contrib.distribute.DistributionStrategy` distributes a model by making
multiple copies (called "replicas") acting data-parallel on different elements
of the input batch. When performing some variable-update operation, say
`var.assign_add(x)`, in a model, we need to resolve how to combine the
different values for `x` computed in the different replicas.
* `NONE`: This is the default, giving an error if you use a
variable-update operation with multiple replicas.
* `SUM`: Add the updates across replicas.
* `MEAN`: Take the arithmetic mean ("average") of the updates across replicas.
* `ONLY_FIRST_REPLICA`: This is for when every replica is performing the same
update, but we only want to perform the update once. Used, e.g., for the
global step counter.
"""
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
@tf_export(v1=["VariableAggregation"])
class VariableAggregation(enum.Enum):
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
ONLY_FIRST_TOWER = 3 # DEPRECATED
VariableAggregation.__doc__ = (
VariableAggregationV2.__doc__ +
"* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\n ")
class VariableMetaclass(type):
"""Metaclass to allow construction of tf.Variable to be overridden."""
def _variable_v1_call(cls,
initial_value=None,
trainable=None,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Call on Variable class. Useful to force the signature."""
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
# Reset `aggregation` that is explicitly set as `None` to the enum NONE.
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
expected_shape=expected_shape,
import_scope=import_scope,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
def _variable_v2_call(cls,
initial_value=None,
trainable=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Call on Variable class. Useful to force the signature."""
previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)
for getter in ops.get_default_graph()._variable_creator_stack: # pylint: disable=protected-access
previous_getter = _make_getter(getter, previous_getter)
# Reset `aggregation` that is explicitly set as `None` to the enum NONE.
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
import_scope=import_scope,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def __call__(cls, *args, **kwargs):
if cls is VariableV1:
return cls._variable_v1_call(*args, **kwargs)
elif cls is Variable:
return cls._variable_v2_call(*args, **kwargs)
else:
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
@tf_export("Variable", v1=[])
class Variable(six.with_metaclass(VariableMetaclass,
checkpointable.CheckpointableBase)):
"""See the [Variables Guide](https://tensorflow.org/guide/variables).
A variable maintains state in the graph across calls to `run()`. You add a
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
import tensorflow as tf
# Create a variable.
w = tf.Variable(<initial-value>, name=<optional-name>)
# Use the variable in the graph like any Tensor.
y = tf.matmul(w, ...another variable or tensor...)
# The overloaded operators are available too.
z = tf.sigmoid(w + y)
# Assign a new value to the variable with `assign()` or a related method.
w.assign(w + 1.0)
w.assign_add(1.0)
```
When you launch the graph, variables have to be explicitly initialized before
you can run Ops that use their value. You can initialize a variable by
running its *initializer op*, restoring the variable from a save file, or
simply running an `assign` Op that assigns a value to the variable. In fact,
the variable *initializer op* is just an `assign` Op that assigns the
variable's initial value to the variable itself.
```python
# Launch the graph in a session.
with tf.Session() as sess:
# Run the variable initializer.
sess.run(w.initializer)
# ...you now can run ops that use the value of 'w'...
```
The most common initialization pattern is to use the convenience function
`global_variables_initializer()` to add an Op to the graph that initializes
all the variables. You then run that Op after launching the graph.
```python
# Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
# Launch the graph in a session.
with tf.Session() as sess:
# Run the Op that initializes global variables.
sess.run(init_op)
# ...you can now run any Op that uses variable values...
```
If you need to create a variable with an initial value dependent on another
variable, use the other variable's `initialized_value()`. This ensures that
variables are initialized in the right order.
All variables are automatically collected in the graph where they are
created. By default, the constructor adds the new variable to the graph
collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
`global_variables()` returns the contents of that collection.
When building a machine learning model it is often convenient to distinguish
between variables holding the trainable model parameters and other variables
such as a `global step` variable used to count training steps. To make this
easier, the variable constructor supports a `trainable=<bool>` parameter. If
`True`, the new variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. The convenience function
`trainable_variables()` returns the contents of this collection. The
various `Optimizer` classes use this collection as the default list of
variables to optimize.
WARNING: tf.Variable objects by default have a non-intuitive memory model. A
Variable is represented internally as a mutable Tensor which can
non-deterministically alias other Tensors in a graph. The set of operations
which consume a Variable and can lead to aliasing is undetermined and can
change across TensorFlow versions. Avoid writing code which relies on the
value of a Variable either changing or not changing as other operations
happen. For example, using Variable objects or simple functions thereof as
predicates in a `tf.cond` is dangerous and error-prone:
```
v = tf.Variable(True)
tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.
```
Here replacing adding `use_resource=True` when constructing the variable will
fix any nondeterminism issues:
```
v = tf.Variable(True, use_resource=True)
tf.cond(v, lambda: v.assign(False), my_false_fn)
```
To use the replacement for variables which does
not have these issues:
* Add `use_resource=True` when constructing `tf.Variable`;
* Call `tf.get_variable_scope().set_use_resource(True)` inside a
`tf.variable_scope` before the `tf.get_variable()` call.
"""
def __init__(self,
initial_value=None,
trainable=True,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, GradientTapes automatically watch uses
of this variable.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents, referencing the variable's nodes
in the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If eager execution is enabled.
"""
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
raise NotImplementedError
def read_value(self):
"""Returns the value of this variable, read in the current context.
Can be different from value() if it's on another device, with control
dependencies, etc.
Returns:
A `Tensor` containing the value of the variable.
"""
raise NotImplementedError
def set_shape(self, shape):
"""Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
"""
raise NotImplementedError
@property
def trainable(self):
raise NotImplementedError
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If
none, the default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
raise NotImplementedError
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
raise NotImplementedError
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
raise NotImplementedError
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
raise NotImplementedError
def assign(self, value, use_locking=False, name=None, read_value=True):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
raise NotImplementedError
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
raise NotImplementedError
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
raise NotImplementedError
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_assign(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
raise NotImplementedError
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
raise NotImplementedError
def load(self, value, session=None):
"""Load new value into this variable.
Writes new value to variable's memory. Doesn't add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
v.load([2, 3], sess)
print(v.eval(sess)) # prints [2 3]
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
v.load([3, 4], sess)
print(v.eval()) # prints [3 4]
```
Args:
value: New variable value
session: The session to use to evaluate this variable. If
none, the default session is used.
Raises:
ValueError: Session is not passed and no default session
"""
raise NotImplementedError
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
@classmethod
def _OverloadAllOperators(cls): # pylint: disable=invalid-name
"""Register overloads for all operators."""
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
cls._OverloadOperator(operator)
# For slicing, bind getitem differently than a tensor (use SliceHelperVar
# instead)
# pylint: disable=protected-access
setattr(cls, "__getitem__", array_ops._SliceHelperVar)
@classmethod
def _OverloadOperator(cls, operator): # pylint: disable=invalid-name
"""Defer an operator overload to `ops.Tensor`.
We pull the operator out of ops.Tensor dynamically to avoid ordering issues.
Args:
operator: string. The operator name.
"""
tensor_oper = getattr(ops.Tensor, operator)
def _run_op(a, *args, **kwargs):
# pylint: disable=protected-access
return tensor_oper(a._AsTensor(), *args, **kwargs)
functools.update_wrapper(_run_op, tensor_oper)
setattr(cls, operator, _run_op)
def __iter__(self):
"""Dummy method to prevent iteration. Do not call.
NOTE(mrry): If we register __getitem__ as an overloaded operator,
Python will valiantly attempt to iterate over the variable's Tensor from 0
to infinity. Declaring this method prevents this unintended behavior.
Raises:
TypeError: when invoked.
"""
raise TypeError("'Variable' object is not iterable.")
# NOTE(mrry): This enables the Variable's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Variables interact
# with ndarrays.
__array_priority__ = 100
@property
def name(self):
"""The name of this variable."""
raise NotImplementedError
@property
def initializer(self):
"""The initializer operation for this variable."""
raise NotImplementedError
@property
def device(self):
"""The device of this variable."""
raise NotImplementedError
@property
def dtype(self):
"""The `DType` of this variable."""
raise NotImplementedError
@property
def op(self):
"""The `Operation` of this variable."""
raise NotImplementedError
@property
def graph(self):
"""The `Graph` of this variable."""
raise NotImplementedError
@property
def shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
raise NotImplementedError
def get_shape(self):
"""Alias of Variable.shape."""
raise NotImplementedError
def to_proto(self, export_scope=None):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
raise NotImplementedError
@staticmethod
def from_proto(variable_def, import_scope=None):
"""Returns a `Variable` object created from `variable_def`."""
return RefVariable(variable_def=variable_def,
import_scope=import_scope)
class SaveSliceInfo(object):
"""Information on how to save this Variable as a slice.
Provides internal support for saving variables as slices of a larger
variable. This API is not public and is subject to change.
Available properties:
* full_name
* full_shape
* var_offset
* var_shape
"""
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
"""Create a `SaveSliceInfo`.
Args:
full_name: Name of the full variable of which this `Variable` is a
slice.
full_shape: Shape of the full variable, as a list of int.
var_offset: Offset of this `Variable` into the full variable, as a
list of int.
var_shape: Shape of this `Variable`, as a list of int.
save_slice_info_def: `SaveSliceInfoDef` protocol buffer. If not `None`,
recreates the SaveSliceInfo object its contents.
`save_slice_info_def` and other arguments are mutually
exclusive.
import_scope: Optional `string`. Name scope to add. Only used
when initializing from protocol buffer.
"""
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
"""Computes the spec string used for saving."""
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
])
return full_shape_str + sl_spec
def to_proto(self, export_scope=None):
"""Returns a SaveSliceInfoDef() proto.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaveSliceInfoDef` protocol buffer, or None if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
def __iadd__(self, other):
raise NotImplementedError
def __isub__(self, other):
raise NotImplementedError
def __imul__(self, other):
raise NotImplementedError
def __idiv__(self, other):
raise NotImplementedError
def __itruediv__(self, other):
raise NotImplementedError
def __irealdiv__(self, other):
raise NotImplementedError
def __ipow__(self, other):
raise NotImplementedError
@tf_export(v1=["Variable"])
class VariableV1(Variable):
"""See the [Variables Guide](https://tensorflow.org/guide/variables).
A variable maintains state in the graph across calls to `run()`. You add a
variable to the graph by constructing an instance of the class `Variable`.
The `Variable()` constructor requires an initial value for the variable,
which can be a `Tensor` of any type and shape. The initial value defines the
type and shape of the variable. After construction, the type and shape of
the variable are fixed. The value can be changed using one of the assign
methods.
If you want to change the shape of a variable later you have to use an
`assign` Op with `validate_shape=False`.
Just like any `Tensor`, variables created with `Variable()` can be used as
inputs for other Ops in the graph. Additionally, all the operators
overloaded for the `Tensor` class are carried over to variables, so you can
also add nodes to the graph by just doing arithmetic on variables.
```python
import tensorflow as tf
# Create a variable.
w = tf.Variable(<initial-value>, name=<optional-name>)
# Use the variable in the graph like any Tensor.
y = tf.matmul(w, ...another variable or tensor...)
# The overloaded operators are available too.
z = tf.sigmoid(w + y)
# Assign a new value to the variable with `assign()` or a related method.
w.assign(w + 1.0)
w.assign_add(1.0)
```
When you launch the graph, variables have to be explicitly initialized before
you can run Ops that use their value. You can initialize a variable by
running its *initializer op*, restoring the variable from a save file, or
simply running an `assign` Op that assigns a value to the variable. In fact,
the variable *initializer op* is just an `assign` Op that assigns the
variable's initial value to the variable itself.
```python
# Launch the graph in a session.
with tf.Session() as sess:
# Run the variable initializer.
sess.run(w.initializer)
# ...you now can run ops that use the value of 'w'...
```
The most common initialization pattern is to use the convenience function
`global_variables_initializer()` to add an Op to the graph that initializes
all the variables. You then run that Op after launching the graph.
```python
# Add an Op to initialize global variables.
init_op = tf.global_variables_initializer()
# Launch the graph in a session.
with tf.Session() as sess:
# Run the Op that initializes global variables.
sess.run(init_op)
# ...you can now run any Op that uses variable values...
```
If you need to create a variable with an initial value dependent on another
variable, use the other variable's `initialized_value()`. This ensures that
variables are initialized in the right order.
All variables are automatically collected in the graph where they are
created. By default, the constructor adds the new variable to the graph
collection `GraphKeys.GLOBAL_VARIABLES`. The convenience function
`global_variables()` returns the contents of that collection.
When building a machine learning model it is often convenient to distinguish
between variables holding the trainable model parameters and other variables
such as a `global step` variable used to count training steps. To make this
easier, the variable constructor supports a `trainable=<bool>` parameter. If
`True`, the new variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. The convenience function
`trainable_variables()` returns the contents of this collection. The
various `Optimizer` classes use this collection as the default list of
variables to optimize.
WARNING: tf.Variable objects by default have a non-intuitive memory model. A
Variable is represented internally as a mutable Tensor which can
non-deterministically alias other Tensors in a graph. The set of operations
which consume a Variable and can lead to aliasing is undetermined and can
change across TensorFlow versions. Avoid writing code which relies on the
value of a Variable either changing or not changing as other operations
happen. For example, using Variable objects or simple functions thereof as
predicates in a `tf.cond` is dangerous and error-prone:
```
v = tf.Variable(True)
tf.cond(v, lambda: v.assign(False), my_false_fn) # Note: this is broken.
```
Here replacing adding `use_resource=True` when constructing the variable will
fix any nondeterminism issues:
```
v = tf.Variable(True, use_resource=True)
tf.cond(v, lambda: v.assign(False), my_false_fn)
```
To use the replacement for variables which does
not have these issues:
* Add `use_resource=True` when constructing `tf.Variable`;
* Call `tf.get_variable_scope().set_use_resource(True)` inside a
`tf.variable_scope` before the `tf.get_variable()` call.
"""
def __init__(self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents, referencing the variable's nodes
in the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
expected_shape: A TensorShape. If set, initial_value is expected
to have this shape.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
use_resource: whether to use resource variables.
synchronization: unused
aggregation: unused
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If eager execution is enabled.
"""
SaveSliceInfo = Variable.SaveSliceInfo
# TODO(apassos): do not repeat all comments here
class RefVariable(VariableV1):
"""Ref-based implementation of variables."""
def __init__(self, # pylint: disable=super-init-not-called
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None):
"""Creates a new variable with value `initial_value`.
The new variable is added to the graph collections listed in `collections`,
which defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
If `trainable` is `True` the variable is also added to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`.
This constructor creates both a `variable` Op and an `assign` Op to set the
variable to its initial value.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called. In
that case, `dtype` must be specified. (Note that initializer functions
from init_ops.py must first be bound to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string describing where the Variable
should be cached for reading. Defaults to the Variable's device.
If not `None`, caches on another device. Typical use is to cache
on the device where the Ops using the Variable reside, to deduplicate
copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
variable_def: `VariableDef` protocol buffer. If not `None`, recreates
the Variable object with its contents, referencing the variable's nodes
in the graph, which must already exist. The graph is not changed.
`variable_def` and the other arguments are mutually exclusive.
dtype: If set, initial_value will be converted to the given type.
If `None`, either the datatype will be kept (if `initial_value` is
a Tensor), or `convert_to_tensor` will decide.
expected_shape: A TensorShape. If set, initial_value is expected
to have this shape.
import_scope: Optional `string`. Name scope to add to the
`Variable.` Only used when initializing from protocol buffer.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If both `variable_def` and initial_value are specified.
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If eager execution is enabled.
"""
self._in_graph_mode = True
if variable_def:
# If variable_def is provided, recreates the variable from its fields.
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
# Create from initial_value.
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
expected_shape=expected_shape,
constraint=constraint)
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
return "<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>" % (
self.name, self.get_shape(), self.dtype.name,
ops.numpy_text(self.read_value(), is_repr=True))
else:
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
expected_shape=None,
constraint=None):
"""Creates a new variable from arguments.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: If `False`, allows the variable to be initialized with a
value of unknown shape. If `True`, the default, the shape of
`initial_value` must be known.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
expected_shape: Deprecated. Ignored.
constraint: An optional projection function to be applied to the variable
after being updated by an `Optimizer` (e.g. used to implement norm
constraints or value constraints for layer weights). The function must
take as input the unprojected Tensor representing the value of the
variable and return the Tensor for the projected value
(which must have the same shape). Constraints are not safe to
use when doing asynchronous distributed training.
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
RuntimeError: If lifted into the eager context.
"""
_ = expected_shape
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
# Store the graph key so optimizers know how to only retrieve variables from
# this graph.
self._graph_key = ops.get_default_graph()._graph_key # pylint: disable=protected-access
if isinstance(initial_value, checkpointable.CheckpointInitialValue):
self._maybe_initialize_checkpointable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
self._trainable = trainable
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.init_scope():
# Ensure that we weren't lifted into the eager context.
if context.executing_eagerly():
raise RuntimeError(
"RefVariable not supported when eager execution is enabled. ")
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
true_name = ops._name_from_scope_name(name) # pylint: disable=protected-access
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
# pylint: disable=protected-access
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if self._initial_value.op._get_control_flow_context() is not None:
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
# In this case, the variable op can't be created until after the
# initial_value has been converted to a Tensor with a known type.
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Manually overrides the variable's shape with the initial value's.
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s" %
self._initial_value)
# If 'initial_value' makes use of other variables, make sure we don't
# have an issue if these other variables aren't initialized first by
# using their initialized_value() method.
self._initializer_op = state_ops.assign(
self._variable,
self._try_guard_against_uninitialized_dependencies(
self._initial_value),
validate_shape=validate_shape).op
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
self._constraint = constraint
def _init_from_proto(self, variable_def, import_scope=None):
"""Recreates the Variable object from a `VariableDef` protocol buffer.
Args:
variable_def: `VariableDef` protocol buffer, describing a variable
whose nodes already exists in the graph.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
# Tests whether initial_value_name exists first for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(variable_def.initial_value_name,
import_scope=import_scope))
else:
self._initial_value = None
self._trainable = getattr(variable_def, "trainable", True)
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._constraint = None
def _as_graph_element(self):
"""Conversion function for Graph.as_graph_element()."""
return self._variable
def _AsTensor(self): # pylint: disable=invalid-name
"""Converts this variable to a Tensor.
See `tf.Variable.value`.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def value(self):
"""Returns the last snapshot of this variable.
You usually do not need to call this method as all ops that need the value
of the variable call it automatically through a `convert_to_tensor()` call.
Returns a `Tensor` which holds the value of the variable. You can not
assign a new value to this tensor as it is not a reference to the variable.
To avoid copies, if the consumer of the returned value is on the same device
as the variable, this actually returns the live value of the variable, not
a copy. Updates to the variable are seen by the consumer. If the consumer
is on a different device it will get a copy of the variable.
Returns:
A `Tensor` containing the value of the variable.
"""
return self._snapshot
def read_value(self):
"""Returns the value of this variable, read in the current context.
Can be different from value() if it's on another device, with control
dependencies, etc.
Returns:
A `Tensor` containing the value of the variable.
"""
return array_ops.identity(self._variable, name="read")
def _ref(self):
"""Returns a reference to this variable.
You usually do not need to call this method as all ops that need a reference
to the variable call it automatically.
Returns is a `Tensor` which holds a reference to the variable. You can
assign a new value to the variable by passing the tensor to an assign op.
See `tf.Variable.value` if you want to get the value of the
variable.
Returns:
A `Tensor` that is a reference to the variable.
"""
return self._variable
def set_shape(self, shape):
"""Overrides the shape for this variable.
Args:
shape: the `TensorShape` representing the overridden shape.
"""
self._ref().set_shape(shape)
self.value().set_shape(shape)
@property
def trainable(self):
return self._trainable
def eval(self, session=None):
"""In a session, computes and returns the value of this variable.
This is not a graph construction method, it does not add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
print(v.eval(sess))
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
print(v.eval())
```
Args:
session: The session to use to evaluate this variable. If
none, the default session is used.
Returns:
A numpy `ndarray` with a copy of the value of this variable.
"""
return self._variable.eval(session=session)
def initialized_value(self):
"""Returns the value of the initialized variable.
You should use this instead of the variable itself to initialize another
variable with a value that depends on the value of this variable.
```python
# Initialize 'v' with a random tensor.
v = tf.Variable(tf.truncated_normal([10, 40]))
# Use `initialized_value` to guarantee that `v` has been
# initialized before its value is used to initialize `w`.
# The random values are picked only once.
w = tf.Variable(v.initialized_value() * 2.0)
```
Returns:
A `Tensor` holding the value of this variable after its initializer
has run.
"""
with ops.init_scope():
return control_flow_ops.cond(is_variable_initialized(self),
self.read_value,
lambda: self.initial_value)
@property
def initial_value(self):
"""Returns the Tensor used as the initial value for the variable.
Note that this is different from `initialized_value()` which runs
the op that initializes the variable before returning its value.
This method returns the tensor that is used by the op that initializes
the variable.
Returns:
A `Tensor`.
"""
return self._initial_value
@property
def constraint(self):
"""Returns the constraint function associated with this variable.
Returns:
The constraint function that was passed to the variable constructor.
Can be `None` if no constraint was passed.
"""
return self._constraint
def assign(self, value, use_locking=False, name=None, read_value=True):
"""Assigns a new value to the variable.
This is essentially a shortcut for `assign(self, value)`.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the assignment has completed.
"""
assign = state_ops.assign(self._variable, value, use_locking=use_locking,
name=name)
if read_value:
return assign
return assign.op
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
"""Adds a value to this variable.
This is essentially a shortcut for `assign_add(self, delta)`.
Args:
delta: A `Tensor`. The value to add to this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the addition has completed.
"""
assign = state_ops.assign_add(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
"""Subtracts a value from this variable.
This is essentially a shortcut for `assign_sub(self, delta)`.
Args:
delta: A `Tensor`. The value to subtract from this variable.
use_locking: If `True`, use locking during the operation.
name: The name of the operation to be created
read_value: if True, will return something which evaluates to the
new value of the variable; if False will return the assign op.
Returns:
A `Tensor` that will hold the new value of this variable after
the subtraction has completed.
"""
assign = state_ops.assign_sub(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
"""Subtracts `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be subtracted from this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_sub(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
"""Adds `IndexedSlices` from this variable.
Args:
sparse_delta: `IndexedSlices` to be added to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_add(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
"""Assigns `IndexedSlices` to this variable.
Args:
sparse_delta: `IndexedSlices` to be assigned to this variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_update(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_nd_sub(self, indices, updates, name=None):
"""Applies sparse subtraction to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_sub(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, -9, 3, -6, -6, 6, 7, -4]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return gen_state_ops.scatter_nd_sub(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_add(self, indices, updates, name=None):
"""Applies sparse addition to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
add = ref.scatter_nd_add(indices, updates)
with tf.Session() as sess:
print sess.run(add)
```
The resulting update to ref would look like this:
[1, 13, 3, 14, 14, 6, 7, 20]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return gen_state_ops.scatter_nd_add(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_update(self, indices, updates, name=None):
"""Applies sparse assignment to individual values or slices in a Variable.
`ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`.
`indices` must be integer tensor, containing indices into `ref`.
It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`.
The innermost dimension of `indices` (with length `K`) corresponds to
indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th
dimension of `ref`.
`updates` is `Tensor` of rank `Q-1+P-K` with shape:
```
[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
```
For example, say we want to add 4 scattered elements to a rank-1 tensor to
8 elements. In Python, that update would look like this:
```python
ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
indices = tf.constant([[4], [3], [1] ,[7]])
updates = tf.constant([9, 10, 11, 12])
op = ref.scatter_nd_update(indices, updates)
with tf.Session() as sess:
print sess.run(op)
```
The resulting update to ref would look like this:
[1, 11, 3, 10, 9, 6, 7, 12]
See `tf.scatter_nd` for more details about how to make updates to
slices.
Args:
indices: The indices to be used in the operation.
updates: The values to be used in the operation.
name: the name of the operation.
Returns:
A `Tensor` that will hold the new value of this variable after
the scattered subtraction has completed.
Raises:
ValueError: if `sparse_delta` is not an `IndexedSlices`.
"""
return gen_state_ops.scatter_nd_update(
self._variable, indices, updates, use_locking=True, name=name)
def _strided_slice_assign(self,
begin,
end,
strides,
value,
name,
begin_mask,
end_mask,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask):
return gen_array_ops.strided_slice_assign(ref=self._ref(),
begin=begin,
end=end,
strides=strides,
value=value,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def count_up_to(self, limit):
"""Increments this variable until it reaches `limit`.
When that Op is run it tries to increment the variable by `1`. If
incrementing the variable would bring it above `limit` then the Op raises
the exception `OutOfRangeError`.
If no error is raised, the Op outputs the value of the variable before
the increment.
This is essentially a shortcut for `count_up_to(self, limit)`.
Args:
limit: value at which incrementing the variable raises an error.
Returns:
A `Tensor` that will hold the variable value before the increment. If no
other Op modifies this variable, the values produced will all be
distinct.
"""
return state_ops.count_up_to(self._variable, limit=limit)
def load(self, value, session=None):
"""Load new value into this variable.
Writes new value to variable's memory. Doesn't add ops to the graph.
This convenience method requires a session where the graph
containing this variable has been launched. If no session is
passed, the default session is used. See `tf.Session` for more
information on launching a graph and on sessions.
```python
v = tf.Variable([1, 2])
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
# Usage passing the session explicitly.
v.load([2, 3], sess)
print(v.eval(sess)) # prints [2 3]
# Usage with the default session. The 'with' block
# above makes 'sess' the default session.
v.load([3, 4], sess)
print(v.eval()) # prints [3 4]
```
Args:
value: New variable value
session: The session to use to evaluate this variable. If
none, the default session is used.
Raises:
ValueError: Session is not passed and no default session
"""
if context.executing_eagerly():
self.assign(value)
else:
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
"""Utility function for converting a Variable to a Tensor."""
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
def _gather_saveables_for_checkpoint(self):
"""For implementing `Checkpointable`. This object is saveable on its own."""
return {checkpointable.VARIABLE_VALUE_KEY: self}
def _try_guard_against_uninitialized_dependencies(self, initial_value):
"""Attempt to guard against dependencies on uninitialized variables.
Replace references to variables in `initial_value` with references to the
variable's initialized values. The initialized values are essentially
conditional TensorFlow graphs that return a variable's value if it is
initialized or its `initial_value` if it hasn't been initialized. This
replacement is done on a best effort basis:
- If the `initial_value` graph contains cycles, we don't do any
replacements for that graph.
- If the variables that `initial_value` depends on are not present in the
`GLOBAL_VARIABLES` or `LOCAL_VARIABLES` we don't replace them.
In these cases, it is up to the caller to ensure that the `initial_value`
graph uses initialized variables or that they guard access to variables
using their `initialized_value` method.
Args:
initial_value: `Tensor`. The initial value.
Returns:
A `Tensor` suitable to initialize a variable.
Raises:
TypeError: If `initial_value` is not a `Tensor`.
"""
if not isinstance(initial_value, ops.Tensor):
raise TypeError("initial_value needs to be a Tensor: %s" % initial_value)
# Don't modify initial_value if it contains any cyclic dependencies.
if _has_cycle(initial_value.op, path=set()):
return initial_value
return self._safe_initial_value_from_tensor(initial_value, op_cache={})
def _safe_initial_value_from_tensor(self, tensor, op_cache):
"""Replace dependencies on variables with their initialized values.
Args:
tensor: A `Tensor`. The tensor to replace.
op_cache: A dict mapping operation names to `Operation`s. Used to memoize
the results so as to avoid creating redundant operations.
Returns:
A `Tensor` compatible with `tensor`. Any inputs that lead to variable
values will be replaced with a corresponding graph that uses the
variable's initialized values. This is done on a best-effort basis. If no
modifications need to be made then `tensor` will be returned unchanged.
"""
op = tensor.op
new_op = op_cache.get(op.name)
if new_op is None:
new_op = self._safe_initial_value_from_op(op, op_cache)
op_cache[op.name] = new_op
return new_op.outputs[tensor.value_index]
def _safe_initial_value_from_op(self, op, op_cache):
"""Replace dependencies on variables with their initialized values.
Args:
op: An `Operation`. The operation to replace.
op_cache: A dict mapping operation names to `Operation`s. Used to memoize
the results so as to avoid creating redundant operations.
Returns:
An `Operation` compatible with `op`. Any inputs that lead to variable
values will be replaced with a corresponding graph that uses the
variable's initialized values. This is done on a best-effort basis. If no
modifications need to be made then `op` will be returned unchanged.
"""
op_type = op.node_def.op
if op_type in ("IsVariableInitialized", "VarIsInitializedOp",
"ReadVariableOp"):
return op
# Attempt to find the initialized_value of any variable reference / handles.
# TODO(b/70206927): Fix handling of ResourceVariables.
if op_type in ("Variable", "VariableV2", "VarHandleOp"):
initialized_value = self._find_initialized_value_for_variable(op)
return op if initialized_value is None else initialized_value.op
# Recursively build initializer expressions for inputs.
modified = False
new_op_inputs = []
for op_input in op.inputs:
new_op_input = self._safe_initial_value_from_tensor(op_input, op_cache)
new_op_inputs.append(new_op_input)
modified = modified or (new_op_input != op_input)
# If at least one input was modified, replace the op.
if modified:
new_op_type = op_type
if new_op_type == "RefSwitch":
new_op_type = "Switch"
new_op_name = op.node_def.name + "_" + self.name
new_op_name = new_op_name.replace(":", "_")
return self.graph.create_op(
new_op_type, new_op_inputs,
op._output_types, # pylint: disable=protected-access
name=new_op_name, attrs=op.node_def.attr)
return op
def _find_initialized_value_for_variable(self, variable_op):
"""Find the initialized value for a variable op.
To do so, lookup the variable op in the variables collection.
Args:
variable_op: A variable `Operation`.
Returns:
A `Tensor` representing the initialized value for the variable or `None`
if the initialized value could not be found.
"""
try:
var_names = [variable_op.node_def.name, variable_op.node_def.name + ":0"]
for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES):
for var in self.graph.get_collection(collection_name):
if var.name in var_names:
return var.initialized_value()
except AttributeError:
# Return None when an incomplete user-defined variable type was put in
# the collection.
return None
return None
# NOTE(mrry): This enables the Variable's overloaded "right" binary
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
# mechanism, which allows more control over how Variables interact
# with ndarrays.
__array_priority__ = 100
@property
def name(self):
"""The name of this variable."""
return self._variable.name
@property
def _shared_name(self):
"""The shared name of the variable.
Unlike name(), shared_name doesn't have ":0" suffix. It is user-specified
name with name scope prefix.
Returns:
variable name.
"""
return self.name[:-2]
@property
def initializer(self):
"""The initializer operation for this variable."""
return self._initializer_op
@property
def device(self):
"""The device of this variable."""
return self._variable.device
@property
def dtype(self):
"""The `DType` of this variable."""
return self._variable.dtype
@property
def op(self):
"""The `Operation` of this variable."""
return self._variable.op
@property
def graph(self):
"""The `Graph` of this variable."""
return self._variable.graph
@property
def shape(self):
"""The `TensorShape` of this variable.
Returns:
A `TensorShape`.
"""
return self._variable.get_shape()
def get_shape(self):
"""Alias of Variable.shape."""
return self.shape
def to_proto(self, export_scope=None):
"""Converts a `Variable` to a `VariableDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `VariableDef` protocol buffer, or `None` if the `Variable` is not
in the specified name scope.
"""
if (export_scope is None or
self._variable.name.startswith(export_scope)):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(
self._variable.name, export_scope)
if self._initial_value is not None:
# For backwards compatibility.
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.trainable = self.trainable
var_def.initializer_name = ops.strip_name_scope(
self.initializer.name, export_scope)
var_def.snapshot_name = ops.strip_name_scope(
self._snapshot.name, export_scope)
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(
export_scope=export_scope))
return var_def
else:
return None
def __iadd__(self, other):
logging.log_first_n(
logging.WARN,
"Variable += will be deprecated. Use variable.assign_add"
" if you want assignment to the variable value or 'x = x + y'"
" if you want a new python Tensor object.", 1)
return self + other
def __isub__(self, other):
logging.log_first_n(
logging.WARN,
"Variable -= will be deprecated. Use variable.assign_sub"
" if you want assignment to the variable value or 'x = x - y'"
" if you want a new python Tensor object.", 1)
return self - other
def __imul__(self, other):
logging.log_first_n(
logging.WARN,
"Variable *= will be deprecated. Use `var.assign(var * other)`"
" if you want assignment to the variable value or `x = x * y`"
" if you want a new python Tensor object.", 1)
return self * other
def __idiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __itruediv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __irealdiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __ipow__(self, other):
logging.log_first_n(
logging.WARN,
"Variable **= will be deprecated. Use `var.assign(var ** other)`"
" if you want assignment to the variable value or `x = x ** y`"
" if you want a new python Tensor object.", 1)
return self ** other
def _set_save_slice_info(self, save_slice_info):
"""Sets the slice info for this `Variable`.
Args:
save_slice_info: A `Variable.SaveSliceInfo` object.
"""
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
class PartitionedVariable(object):
"""A container for partitioned `Variable` objects.
@compatibility(eager) `tf.PartitionedVariable` is not compatible with
eager execution. Use `tf.Variable` instead which is compatible
with both eager execution and graph construction. See [the
TensorFlow Eager Execution
guide](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/contrib/eager/python/g3doc/guide.md#variables-and-optimizers)
for details on how variables work in eager execution.
@end_compatibility
"""
def __init__(self, name, shape, dtype, variable_list, partitions):
"""Creates a new partitioned variable wrapper.
Variables passed via the variable_list must contain a save_slice_info
field. Concatenation and iteration is in lexicographic order according
to the var_offset property of the save_slice_info.
Args:
name: String. Overall name of the variables.
shape: List of integers. Overall shape of the variables.
dtype: Type of the variables.
variable_list: List of `Variable` that comprise this partitioned variable.
partitions: List of integers. Number of partitions for each dimension.
Raises:
TypeError: If `variable_list` is not a list of `Variable` objects, or
`partitions` is not a list.
ValueError: If `variable_list` is empty, or the `Variable` shape
information does not match `shape`, or `partitions` has invalid values.
"""
if not isinstance(variable_list, (list, tuple)):
raise TypeError(
"variable_list is not a list or tuple: %s" % variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all(p >= 1 for p in partitions):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
# pylint: disable=protected-access
for v in variable_list:
# Sort the variable_list lexicographically according to var offset value.
if not all(v._get_save_slice_info() is not None for v in variable_list):
raise ValueError(
"All variables must have a save_slice_info available: %s"
% [v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s"
% (shape, partitions))
if v._get_save_slice_info().full_shape != shape:
raise ValueError(
"All variables' full shapes must match shape: %s; "
"but full shapes were: %s"
% (shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
"""Return an iterable for accessing the underlying partition Variables."""
return iter(self._variable_list)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes"
% num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all(p == 1 for p in self._partitions):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
"""Returns the overall concatenated value as a `Tensor`.
This is different from using the partitioned variable directly as a tensor
(through tensor conversion and `as_tensor`) in that it creates a new set of
operations that keeps the control dependencies from its scope.
Returns:
`Tensor` containing the concatenated value.
"""
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
"""Returns the overall concatenated value as a `Tensor`.
The returned tensor will not inherit the control dependencies from the scope
where the value is used, which is similar to getting the value of
`Variable`.
Returns:
`Tensor` containing the concatenated value.
"""
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self.get_shape()
def get_shape(self):
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def _apply_assign_fn(self, assign_fn, value):
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot do assign action along more than one dimension: %s. "
"Multi-axis partition assign action is not supported " %
str(partition_axes))
if isinstance(value, list):
assert len(value) == len(self._variable_list)
value_list = value
elif isinstance(value, PartitionedVariable):
value_list = [var_part for var_part in value]
else:
partition_ix = partition_axes[0]
size_splits_list = [
tensor_shape.dimension_value(var.shape[partition_ix])
for var in self._variable_list
]
value_list = array_ops.split(value, size_splits_list, axis=partition_ix)
op_list = [
assign_fn(var, value_list[idx])
for idx, var in enumerate(self._variable_list)
]
return op_list
def assign(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_add(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_add(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_sub(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
@tf_export(v1=["global_variables"])
def global_variables(scope=None):
"""Returns global variables.
Global variables are variables that are shared across machines in a
distributed environment. The `Variable()` constructor or `get_variable()`
automatically adds new variables to the graph collection
`GraphKeys.GLOBAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to global variables are local variables. See
`tf.local_variables`
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)
@tf_export(v1=["all_variables"])
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
"""See `tf.global_variables`."""
return global_variables()
def _all_saveable_objects(scope=None):
"""Returns all variables and `SaveableObject`s that must be checkpointed.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of `Variable` and `SaveableObject` to be checkpointed
"""
# TODO(andreasst): make this function public once things are settled.
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))
@tf_export(v1=["local_variables"])
def local_variables(scope=None):
"""Returns local variables.
Local variables - per process variables, usually not saved/restored to
checkpoint and used for temporary or intermediate values.
For example, they can be used as counters for metrics computation or
number of epochs this machine has read data.
The `tf.contrib.framework.local_variable()` function automatically adds the
new variable to `GraphKeys.LOCAL_VARIABLES`.
This convenience function returns the contents of that collection.
An alternative to local variables are global variables. See
`tf.global_variables`
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of local `Variable` objects.
"""
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)
@tf_export(v1=["model_variables"])
def model_variables(scope=None):
"""Returns all variables in the MODEL_VARIABLES collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of local Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)
@tf_export(v1=["trainable_variables"])
def trainable_variables(scope=None):
"""Returns all variables created with `trainable=True`.
When passed `trainable=True`, the `Variable()` constructor automatically
adds new variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES`. This convenience function returns the
contents of that collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)
@tf_export(v1=["moving_average_variables"])
def moving_average_variables(scope=None):
"""Returns all variables that maintain their moving averages.
If an `ExponentialMovingAverage` object is created and the `apply()`
method is called on a list of variables, these variables will
be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
This convenience function returns the contents of that collection.
Args:
scope: (Optional.) A string. If supplied, the resulting list is filtered
to include only items whose `name` attribute matches `scope` using
`re.match`. Items without a `name` attribute are never returned if a
scope is supplied. The choice of `re.match` means that a `scope` without
special tokens filters by prefix.
Returns:
A list of Variable objects.
"""
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)
@tf_export(v1=["initializers.variables", "variables_initializer"])
def variables_initializer(var_list, name="init"):
"""Returns an Op that initializes a list of variables.
After you launch the graph in a session, you can run the returned Op to
initialize all the variables in `var_list`. This Op runs all the
initializers of the variables in `var_list` in parallel.
Calling `initialize_variables()` is equivalent to passing the list of
initializers to `Group()`.
If `var_list` is empty, however, the function still returns an Op that can
be run. That Op just has no effect.
Args:
var_list: List of `Variable` objects to initialize.
name: Optional name for the returned operation.
Returns:
An Op that run the initializers of all the specified variables.
"""
if var_list and not context.executing_eagerly():
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@tf_export(v1=["initialize_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
"""See `tf.variables_initializer`."""
return variables_initializer(var_list, name=name)
@tf_export(v1=["initializers.global_variables", "global_variables_initializer"])
def global_variables_initializer():
"""Returns an Op that initializes global variables.
This is just a shortcut for `variables_initializer(global_variables())`
Returns:
An Op that initializes global variables in the graph.
"""
if context.executing_eagerly():
return control_flow_ops.no_op(name="global_variables_initializer")
return variables_initializer(global_variables())
@tf_export(v1=["initialize_all_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
"""See `tf.global_variables_initializer`."""
return global_variables_initializer()
@tf_export(v1=["initializers.local_variables", "local_variables_initializer"])
def local_variables_initializer():
"""Returns an Op that initializes all local variables.
This is just a shortcut for `variables_initializer(local_variables())`
Returns:
An Op that initializes all local variables in the graph.
"""
if context.executing_eagerly():
return control_flow_ops.no_op(name="local_variables_initializer")
return variables_initializer(local_variables())
@tf_export(v1=["initialize_local_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
"""See `tf.local_variables_initializer`."""
return local_variables_initializer()
@tf_export(v1=["is_variable_initialized"])
@tf_should_use.should_use_result
def is_variable_initialized(variable):
"""Tests if a variable has been initialized.
Args:
variable: A `Variable`.
Returns:
Returns a scalar boolean Tensor, `True` if the variable has been
initialized, `False` otherwise.
"""
return state_ops.is_variable_initialized(variable)
@tf_export(v1=["assert_variables_initialized"])
@tf_should_use.should_use_result
def assert_variables_initialized(var_list=None):
"""Returns an Op to check if variables are initialized.
NOTE: This function is obsolete and will be removed in 6 months. Please
change your implementation to use `report_uninitialized_variables()`.
When run, the returned Op will raise the exception `FailedPreconditionError`
if any of the variables has not yet been initialized.
Note: This function is implemented by trying to fetch the values of the
variables. If one of the variables is not initialized a message may be
logged by the C++ runtime. This is expected.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables().`
Returns:
An Op, or None if there are no variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.stack(ranks)
@tf_export(v1=["report_uninitialized_variables"])
@tf_should_use.should_use_result
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
"""Adds ops to list the names of uninitialized variables.
When run, it returns a 1-D tensor containing the names of uninitialized
variables if there are any, or an empty array if there are none.
Args:
var_list: List of `Variable` objects to check. Defaults to the
value of `global_variables() + local_variables()`
name: Optional name of the `Operation`.
Returns:
A 1-D tensor containing names of the uninitialized variables, or an empty
1-D tensor if there are no variables or no uninitialized variables.
"""
if var_list is None:
var_list = global_variables() + local_variables()
# Backwards compatibility for old-style variables. TODO(touts): remove.
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
# Run all operations on CPU
if var_list:
init_vars = [state_ops.is_variable_initialized(v) for v in var_list]
local_device = os.environ.get(
"TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0")
with ops.device(local_device):
if not var_list:
# Return an empty tensor so we only need to check for returned tensor
# size being 0 as an indication of model ready.
return array_ops.constant([], dtype=dtypes.string)
else:
# Get a 1-D boolean tensor listing whether each variable is initialized.
variables_mask = math_ops.logical_not(array_ops.stack(init_vars))
# Get a 1-D string tensor containing all the variable names.
variable_names_tensor = array_ops.constant(
[s.op.name for s in var_list])
# Return a 1-D tensor containing all the names of
# uninitialized variables.
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
# pylint: disable=protected-access
Variable._OverloadAllOperators()
ops.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction)
# pylint: enable=protected-access
ops.register_dense_tensor_like_type(Variable)
| 38.135857 | 134 | 0.682659 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum
import functools
import os
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.framework import variable_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.checkpointable import base as checkpointable
from tensorflow.python.util import compat
from tensorflow.python.util import tf_should_use
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.tf_export import tf_export
def default_variable_creator(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def default_variable_creator_v2(_, **kwds):
del kwds
raise NotImplementedError("variable_scope needs to be imported")
def _make_getter(captured_getter, captured_previous):
def getter(**kwargs):
return captured_getter(captured_previous, **kwargs)
return getter
def _has_cycle(op, path):
if op.name in path:
return True
path.add(op.name)
for op_input in op.inputs:
if _has_cycle(op_input.op, path):
return True
for op_control_input in op.control_inputs:
if _has_cycle(op_control_input, path):
return True
path.remove(op.name)
return False
@tf_export("VariableSynchronization")
class VariableSynchronization(enum.Enum):
AUTO = 0
NONE = 1
ON_WRITE = 2
ON_READ = 3
@tf_export("VariableAggregation", v1=[])
class VariableAggregationV2(enum.Enum):
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
@tf_export(v1=["VariableAggregation"])
class VariableAggregation(enum.Enum):
NONE = 0
SUM = 1
MEAN = 2
ONLY_FIRST_REPLICA = 3
ONLY_FIRST_TOWER = 3
VariableAggregation.__doc__ = (
VariableAggregationV2.__doc__ +
"* `ONLY_FIRST_TOWER`: Deprecated alias for `ONLY_FIRST_REPLICA`.\n ")
class VariableMetaclass(type):
def _variable_v1_call(cls,
initial_value=None,
trainable=None,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
previous_getter = lambda **kwargs: default_variable_creator(None, **kwargs)
for getter in ops.get_default_graph()._variable_creator_stack:
previous_getter = _make_getter(getter, previous_getter)
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
expected_shape=expected_shape,
import_scope=import_scope,
constraint=constraint,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation)
def _variable_v2_call(cls,
initial_value=None,
trainable=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
previous_getter = lambda **kws: default_variable_creator_v2(None, **kws)
for getter in ops.get_default_graph()._variable_creator_stack:
previous_getter = _make_getter(getter, previous_getter)
if aggregation is None:
aggregation = VariableAggregation.NONE
return previous_getter(
initial_value=initial_value,
trainable=trainable,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
variable_def=variable_def,
dtype=dtype,
import_scope=import_scope,
constraint=constraint,
synchronization=synchronization,
aggregation=aggregation)
def __call__(cls, *args, **kwargs):
if cls is VariableV1:
return cls._variable_v1_call(*args, **kwargs)
elif cls is Variable:
return cls._variable_v2_call(*args, **kwargs)
else:
return super(VariableMetaclass, cls).__call__(*args, **kwargs)
@tf_export("Variable", v1=[])
class Variable(six.with_metaclass(VariableMetaclass,
checkpointable.CheckpointableBase)):
def __init__(self,
initial_value=None,
trainable=True,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
import_scope=None,
constraint=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
raise NotImplementedError
def __repr__(self):
raise NotImplementedError
def value(self):
raise NotImplementedError
def read_value(self):
raise NotImplementedError
def set_shape(self, shape):
raise NotImplementedError
@property
def trainable(self):
raise NotImplementedError
def eval(self, session=None):
raise NotImplementedError
def initialized_value(self):
raise NotImplementedError
@property
def initial_value(self):
raise NotImplementedError
@property
def constraint(self):
raise NotImplementedError
def assign(self, value, use_locking=False, name=None, read_value=True):
raise NotImplementedError
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
raise NotImplementedError
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
raise NotImplementedError
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
def scatter_add(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
def scatter_update(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
def scatter_nd_sub(self, indices, updates, name=None):
raise NotImplementedError
def scatter_nd_add(self, indices, updates, name=None):
raise NotImplementedError
def scatter_nd_update(self, indices, updates, name=None):
raise NotImplementedError
def count_up_to(self, limit):
raise NotImplementedError
def load(self, value, session=None):
raise NotImplementedError
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref()
else:
return v.value()
@classmethod
def _OverloadAllOperators(cls):
for operator in ops.Tensor.OVERLOADABLE_OPERATORS:
cls._OverloadOperator(operator)
setattr(cls, "__getitem__", array_ops._SliceHelperVar)
@classmethod
def _OverloadOperator(cls, operator):
tensor_oper = getattr(ops.Tensor, operator)
def _run_op(a, *args, **kwargs):
return tensor_oper(a._AsTensor(), *args, **kwargs)
functools.update_wrapper(_run_op, tensor_oper)
setattr(cls, operator, _run_op)
def __iter__(self):
raise TypeError("'Variable' object is not iterable.")
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
__array_priority__ = 100
@property
def name(self):
raise NotImplementedError
@property
def initializer(self):
raise NotImplementedError
@property
def device(self):
raise NotImplementedError
@property
def dtype(self):
raise NotImplementedError
@property
def op(self):
raise NotImplementedError
@property
def graph(self):
raise NotImplementedError
@property
def shape(self):
raise NotImplementedError
def get_shape(self):
raise NotImplementedError
def to_proto(self, export_scope=None):
raise NotImplementedError
@staticmethod
def from_proto(variable_def, import_scope=None):
return RefVariable(variable_def=variable_def,
import_scope=import_scope)
class SaveSliceInfo(object):
def __init__(self,
full_name=None,
full_shape=None,
var_offset=None,
var_shape=None,
save_slice_info_def=None,
import_scope=None):
if save_slice_info_def:
assert isinstance(save_slice_info_def, variable_pb2.SaveSliceInfoDef)
self.full_name = ops.prepend_name_scope(
save_slice_info_def.full_name, import_scope=import_scope)
self.full_shape = [i for i in save_slice_info_def.full_shape]
self.var_offset = [i for i in save_slice_info_def.var_offset]
self.var_shape = [i for i in save_slice_info_def.var_shape]
else:
self.full_name = full_name
self.full_shape = full_shape
self.var_offset = var_offset
self.var_shape = var_shape
@property
def spec(self):
full_shape_str = " ".join(["%d" % d for d in self.full_shape]) + " "
sl_spec = ":".join([
"%d,%d" % (o, s) for o, s in zip(self.var_offset, self.var_shape)
])
return full_shape_str + sl_spec
def to_proto(self, export_scope=None):
if (export_scope is None or
self.full_name.startswith(export_scope)):
save_slice_info_def = variable_pb2.SaveSliceInfoDef()
save_slice_info_def.full_name = ops.strip_name_scope(
self.full_name, export_scope)
for i in self.full_shape:
save_slice_info_def.full_shape.append(i)
for i in self.var_offset:
save_slice_info_def.var_offset.append(i)
for i in self.var_shape:
save_slice_info_def.var_shape.append(i)
return save_slice_info_def
else:
return None
def __iadd__(self, other):
raise NotImplementedError
def __isub__(self, other):
raise NotImplementedError
def __imul__(self, other):
raise NotImplementedError
def __idiv__(self, other):
raise NotImplementedError
def __itruediv__(self, other):
raise NotImplementedError
def __irealdiv__(self, other):
raise NotImplementedError
def __ipow__(self, other):
raise NotImplementedError
@tf_export(v1=["Variable"])
class VariableV1(Variable):
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None,
use_resource=None,
synchronization=VariableSynchronization.AUTO,
aggregation=VariableAggregation.NONE):
SaveSliceInfo = Variable.SaveSliceInfo
class RefVariable(VariableV1):
def __init__(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
variable_def=None,
dtype=None,
expected_shape=None,
import_scope=None,
constraint=None):
self._in_graph_mode = True
if variable_def:
if initial_value:
raise ValueError("variable_def and initial_value are mutually "
"exclusive.")
self._init_from_proto(variable_def, import_scope=import_scope)
else:
self._init_from_args(
initial_value=initial_value,
trainable=trainable,
collections=collections,
validate_shape=validate_shape,
caching_device=caching_device,
name=name,
dtype=dtype,
expected_shape=expected_shape,
constraint=constraint)
def __repr__(self):
if context.executing_eagerly() and not self._in_graph_mode:
return "<tf.Variable '%s' shape=%s dtype=%s, numpy=%s>" % (
self.name, self.get_shape(), self.dtype.name,
ops.numpy_text(self.read_value(), is_repr=True))
else:
return "<tf.Variable '%s' shape=%s dtype=%s>" % (
self.name, self.get_shape(), self.dtype.name)
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None,
expected_shape=None,
constraint=None):
_ = expected_shape
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if constraint is not None and not callable(constraint):
raise ValueError("The `constraint` argument must be a callable.")
self._graph_key = ops.get_default_graph()._graph_key
if isinstance(initial_value, checkpointable.CheckpointInitialValue):
self._maybe_initialize_checkpointable()
self._update_uid = initial_value.checkpoint_position.restore_uid
initial_value = initial_value.wrapped_value
self._trainable = trainable
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
with ops.init_scope():
if context.executing_eagerly():
raise RuntimeError(
"RefVariable not supported when eager execution is enabled. ")
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
true_name = ops._name_from_scope_name(name)
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
if self._initial_value.op._get_control_flow_context() is not None:
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
shape = (self._initial_value.get_shape()
if validate_shape else tensor_shape.unknown_shape())
# initial_value has been converted to a Tensor with a known type.
self._variable = state_ops.variable_op_v2(
shape,
self._initial_value.dtype.base_dtype,
name=name)
# Manually overrides the variable's shape with the initial value's.
if validate_shape:
initial_value_shape = self._initial_value.get_shape()
if not initial_value_shape.is_fully_defined():
raise ValueError("initial_value must have a shape specified: %s" %
self._initial_value)
# If 'initial_value' makes use of other variables, make sure we don't
# using their initialized_value() method.
self._initializer_op = state_ops.assign(
self._variable,
self._try_guard_against_uninitialized_dependencies(
self._initial_value),
validate_shape=validate_shape).op
# TODO(vrv): Change this class to not take caching_device, but
# to take the op to colocate the snapshot with, so we can use
# colocation rather than devices.
if caching_device is not None:
with ops.device(caching_device):
self._snapshot = array_ops.identity(self._variable, name="read")
else:
with ops.colocate_with(self._variable.op):
self._snapshot = array_ops.identity(self._variable, name="read")
ops.add_to_collections(collections, self)
self._caching_device = caching_device
self._save_slice_info = None
self._constraint = constraint
def _init_from_proto(self, variable_def, import_scope=None):
assert isinstance(variable_def, variable_pb2.VariableDef)
# Create from variable_def.
g = ops.get_default_graph()
self._variable = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initializer_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
# Tests whether initial_value_name exists first for backwards compatibility.
if (hasattr(variable_def, "initial_value_name") and
variable_def.initial_value_name):
self._initial_value = g.as_graph_element(
ops.prepend_name_scope(variable_def.initial_value_name,
import_scope=import_scope))
else:
self._initial_value = None
self._trainable = getattr(variable_def, "trainable", True)
self._snapshot = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def,
import_scope=import_scope)
else:
self._save_slice_info = None
self._caching_device = None
self._constraint = None
def _as_graph_element(self):
return self._variable
def _AsTensor(self): # pylint: disable=invalid-name
return self._snapshot
def value(self):
return self._snapshot
def read_value(self):
return array_ops.identity(self._variable, name="read")
def _ref(self):
return self._variable
def set_shape(self, shape):
self._ref().set_shape(shape)
self.value().set_shape(shape)
@property
def trainable(self):
return self._trainable
def eval(self, session=None):
return self._variable.eval(session=session)
def initialized_value(self):
with ops.init_scope():
return control_flow_ops.cond(is_variable_initialized(self),
self.read_value,
lambda: self.initial_value)
@property
def initial_value(self):
return self._initial_value
@property
def constraint(self):
return self._constraint
def assign(self, value, use_locking=False, name=None, read_value=True):
assign = state_ops.assign(self._variable, value, use_locking=use_locking,
name=name)
if read_value:
return assign
return assign.op
def assign_add(self, delta, use_locking=False, name=None, read_value=True):
assign = state_ops.assign_add(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def assign_sub(self, delta, use_locking=False, name=None, read_value=True):
assign = state_ops.assign_sub(
self._variable, delta, use_locking=use_locking, name=name)
if read_value:
return assign
return assign.op
def scatter_sub(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_sub(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_add(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_add(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_update(self, sparse_delta, use_locking=False, name=None):
if not isinstance(sparse_delta, ops.IndexedSlices):
raise ValueError("sparse_delta is not IndexedSlices: %s" % sparse_delta)
return gen_state_ops.scatter_update(
self._variable,
sparse_delta.indices,
sparse_delta.values,
use_locking=use_locking,
name=name)
def scatter_nd_sub(self, indices, updates, name=None):
return gen_state_ops.scatter_nd_sub(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_add(self, indices, updates, name=None):
return gen_state_ops.scatter_nd_add(
self._variable, indices, updates, use_locking=True, name=name)
def scatter_nd_update(self, indices, updates, name=None):
return gen_state_ops.scatter_nd_update(
self._variable, indices, updates, use_locking=True, name=name)
def _strided_slice_assign(self,
begin,
end,
strides,
value,
name,
begin_mask,
end_mask,
ellipsis_mask,
new_axis_mask,
shrink_axis_mask):
return gen_array_ops.strided_slice_assign(ref=self._ref(),
begin=begin,
end=end,
strides=strides,
value=value,
name=name,
begin_mask=begin_mask,
end_mask=end_mask,
ellipsis_mask=ellipsis_mask,
new_axis_mask=new_axis_mask,
shrink_axis_mask=shrink_axis_mask)
def count_up_to(self, limit):
return state_ops.count_up_to(self._variable, limit=limit)
def load(self, value, session=None):
if context.executing_eagerly():
self.assign(value)
else:
session = session or ops.get_default_session()
if session is None:
raise ValueError(
"Either session argument should be provided or default session "
"should be established")
session.run(self._initializer_op, {self._initializer_op.inputs[1]: value})
# Conversion to tensor.
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False): # pylint: disable=invalid-name
_ = name
if dtype and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
return v._ref() # pylint: disable=protected-access
else:
return v.value()
def _gather_saveables_for_checkpoint(self):
return {checkpointable.VARIABLE_VALUE_KEY: self}
def _try_guard_against_uninitialized_dependencies(self, initial_value):
if not isinstance(initial_value, ops.Tensor):
raise TypeError("initial_value needs to be a Tensor: %s" % initial_value)
# Don't modify initial_value if it contains any cyclic dependencies.
if _has_cycle(initial_value.op, path=set()):
return initial_value
return self._safe_initial_value_from_tensor(initial_value, op_cache={})
def _safe_initial_value_from_tensor(self, tensor, op_cache):
op = tensor.op
new_op = op_cache.get(op.name)
if new_op is None:
new_op = self._safe_initial_value_from_op(op, op_cache)
op_cache[op.name] = new_op
return new_op.outputs[tensor.value_index]
def _safe_initial_value_from_op(self, op, op_cache):
op_type = op.node_def.op
if op_type in ("IsVariableInitialized", "VarIsInitializedOp",
"ReadVariableOp"):
return op
if op_type in ("Variable", "VariableV2", "VarHandleOp"):
initialized_value = self._find_initialized_value_for_variable(op)
return op if initialized_value is None else initialized_value.op
modified = False
new_op_inputs = []
for op_input in op.inputs:
new_op_input = self._safe_initial_value_from_tensor(op_input, op_cache)
new_op_inputs.append(new_op_input)
modified = modified or (new_op_input != op_input)
if modified:
new_op_type = op_type
if new_op_type == "RefSwitch":
new_op_type = "Switch"
new_op_name = op.node_def.name + "_" + self.name
new_op_name = new_op_name.replace(":", "_")
return self.graph.create_op(
new_op_type, new_op_inputs,
op._output_types,
name=new_op_name, attrs=op.node_def.attr)
return op
def _find_initialized_value_for_variable(self, variable_op):
try:
var_names = [variable_op.node_def.name, variable_op.node_def.name + ":0"]
for collection_name in (ops.GraphKeys.GLOBAL_VARIABLES,
ops.GraphKeys.LOCAL_VARIABLES):
for var in self.graph.get_collection(collection_name):
if var.name in var_names:
return var.initialized_value()
except AttributeError:
return None
return None
# operators to run when the left operand is an ndarray, because it
# accords the Variable class higher priority than an ndarray, or a
# numpy matrix.
# TODO(mrry): Convert this to using numpy's __numpy_ufunc__
__array_priority__ = 100
@property
def name(self):
return self._variable.name
@property
def _shared_name(self):
return self.name[:-2]
@property
def initializer(self):
return self._initializer_op
@property
def device(self):
return self._variable.device
@property
def dtype(self):
return self._variable.dtype
@property
def op(self):
return self._variable.op
@property
def graph(self):
return self._variable.graph
@property
def shape(self):
return self._variable.get_shape()
def get_shape(self):
return self.shape
def to_proto(self, export_scope=None):
if (export_scope is None or
self._variable.name.startswith(export_scope)):
var_def = variable_pb2.VariableDef()
var_def.variable_name = ops.strip_name_scope(
self._variable.name, export_scope)
if self._initial_value is not None:
var_def.initial_value_name = ops.strip_name_scope(
self._initial_value.name, export_scope)
var_def.trainable = self.trainable
var_def.initializer_name = ops.strip_name_scope(
self.initializer.name, export_scope)
var_def.snapshot_name = ops.strip_name_scope(
self._snapshot.name, export_scope)
if self._save_slice_info:
var_def.save_slice_info_def.MergeFrom(self._save_slice_info.to_proto(
export_scope=export_scope))
return var_def
else:
return None
def __iadd__(self, other):
logging.log_first_n(
logging.WARN,
"Variable += will be deprecated. Use variable.assign_add"
" if you want assignment to the variable value or 'x = x + y'"
" if you want a new python Tensor object.", 1)
return self + other
def __isub__(self, other):
logging.log_first_n(
logging.WARN,
"Variable -= will be deprecated. Use variable.assign_sub"
" if you want assignment to the variable value or 'x = x - y'"
" if you want a new python Tensor object.", 1)
return self - other
def __imul__(self, other):
logging.log_first_n(
logging.WARN,
"Variable *= will be deprecated. Use `var.assign(var * other)`"
" if you want assignment to the variable value or `x = x * y`"
" if you want a new python Tensor object.", 1)
return self * other
def __idiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __itruediv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __irealdiv__(self, other):
logging.log_first_n(
logging.WARN,
"Variable /= will be deprecated. Use `var.assign(var / other)`"
" if you want assignment to the variable value or `x = x / y`"
" if you want a new python Tensor object.", 1)
return self / other
def __ipow__(self, other):
logging.log_first_n(
logging.WARN,
"Variable **= will be deprecated. Use `var.assign(var ** other)`"
" if you want assignment to the variable value or `x = x ** y`"
" if you want a new python Tensor object.", 1)
return self ** other
def _set_save_slice_info(self, save_slice_info):
self._save_slice_info = save_slice_info
def _get_save_slice_info(self):
return self._save_slice_info
class PartitionedVariable(object):
def __init__(self, name, shape, dtype, variable_list, partitions):
if not isinstance(variable_list, (list, tuple)):
raise TypeError(
"variable_list is not a list or tuple: %s" % variable_list)
if not isinstance(partitions, (list, tuple)):
raise TypeError("partitions is not a list or tuple: %s" % partitions)
if not all(p >= 1 for p in partitions):
raise ValueError("partition values must be positive: %s" % partitions)
if not variable_list:
raise ValueError("variable_list may not be empty")
for v in variable_list:
if not all(v._get_save_slice_info() is not None for v in variable_list):
raise ValueError(
"All variables must have a save_slice_info available: %s"
% [v.name for v in variable_list])
if len(shape) != len(partitions):
raise ValueError("len(shape) != len(partitions): %s vs. %s"
% (shape, partitions))
if v._get_save_slice_info().full_shape != shape:
raise ValueError(
"All variables' full shapes must match shape: %s; "
"but full shapes were: %s"
% (shape, str([v._get_save_slice_info().full_shape])))
self._variable_list = sorted(
variable_list, key=lambda v: v._get_save_slice_info().var_offset)
# pylint: enable=protected-access
self._name = name
self._shape = shape
self._dtype = dtype
self._partitions = partitions
self._as_tensor = None
def __iter__(self):
return iter(self._variable_list)
def __len__(self):
num_partition_axes = len(self._partition_axes())
if num_partition_axes > 1:
raise ValueError("Cannot get a length for %d > 1 partition axes"
% num_partition_axes)
return len(self._variable_list)
def _partition_axes(self):
if all(p == 1 for p in self._partitions):
return [0]
else:
return [i for i, p in enumerate(self._partitions) if p > 1]
def _concat(self):
if len(self._variable_list) == 1:
with ops.name_scope(None):
return array_ops.identity(self._variable_list[0], name=self._name)
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot concatenate along more than one dimension: %s. "
"Multi-axis partition concat is not supported" % str(partition_axes))
partition_ix = partition_axes[0]
with ops.name_scope(self._name + "/ConcatPartitions/"):
concatenated = array_ops.concat(self._variable_list, partition_ix)
with ops.name_scope(None):
return array_ops.identity(concatenated, name=self._name)
def as_tensor(self):
with ops.control_dependencies(None):
return self._concat()
@staticmethod
def _TensorConversionFunction(v, dtype=None, name=None, as_ref=False):
# pylint: disable=invalid-name
_ = name
if dtype is not None and not dtype.is_compatible_with(v.dtype):
raise ValueError(
"Incompatible type conversion requested to type '%s' for variable "
"of type '%s'" % (dtype.name, v.dtype.name))
if as_ref:
raise NotImplementedError(
"PartitionedVariable doesn't support being used as a reference.")
else:
return v.as_tensor()
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self.get_shape()
def get_shape(self):
return self._shape
def _get_variable_list(self):
return self._variable_list
def _get_partitions(self):
return self._partitions
def _apply_assign_fn(self, assign_fn, value):
partition_axes = self._partition_axes()
if len(partition_axes) > 1:
raise NotImplementedError(
"Cannot do assign action along more than one dimension: %s. "
"Multi-axis partition assign action is not supported " %
str(partition_axes))
if isinstance(value, list):
assert len(value) == len(self._variable_list)
value_list = value
elif isinstance(value, PartitionedVariable):
value_list = [var_part for var_part in value]
else:
partition_ix = partition_axes[0]
size_splits_list = [
tensor_shape.dimension_value(var.shape[partition_ix])
for var in self._variable_list
]
value_list = array_ops.split(value, size_splits_list, axis=partition_ix)
op_list = [
assign_fn(var, value_list[idx])
for idx, var in enumerate(self._variable_list)
]
return op_list
def assign(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_add(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_add(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
def assign_sub(self, value, use_locking=False, name=None, read_value=True):
assign_fn = lambda var, r_value: var.assign_sub(
r_value, use_locking=use_locking,
name=name, read_value=read_value)
assign_list = self._apply_assign_fn(assign_fn, value)
if read_value:
return assign_list
return [assign.op for assign in assign_list]
@tf_export(v1=["global_variables"])
def global_variables(scope=None):
return ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope)
@tf_export(v1=["all_variables"])
@deprecated("2017-03-02", "Please use tf.global_variables instead.")
def all_variables():
return global_variables()
def _all_saveable_objects(scope=None):
return (ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, scope) +
ops.get_collection(ops.GraphKeys.SAVEABLE_OBJECTS, scope))
@tf_export(v1=["local_variables"])
def local_variables(scope=None):
return ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES, scope)
@tf_export(v1=["model_variables"])
def model_variables(scope=None):
return ops.get_collection(ops.GraphKeys.MODEL_VARIABLES, scope)
@tf_export(v1=["trainable_variables"])
def trainable_variables(scope=None):
return ops.get_collection(ops.GraphKeys.TRAINABLE_VARIABLES, scope)
@tf_export(v1=["moving_average_variables"])
def moving_average_variables(scope=None):
return ops.get_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, scope)
@tf_export(v1=["initializers.variables", "variables_initializer"])
def variables_initializer(var_list, name="init"):
if var_list and not context.executing_eagerly():
return control_flow_ops.group(*[v.initializer for v in var_list], name=name)
return control_flow_ops.no_op(name=name)
@tf_export(v1=["initialize_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.variables_initializer` instead.")
def initialize_variables(var_list, name="init"):
return variables_initializer(var_list, name=name)
@tf_export(v1=["initializers.global_variables", "global_variables_initializer"])
def global_variables_initializer():
if context.executing_eagerly():
return control_flow_ops.no_op(name="global_variables_initializer")
return variables_initializer(global_variables())
@tf_export(v1=["initialize_all_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.global_variables_initializer` instead.")
def initialize_all_variables():
return global_variables_initializer()
@tf_export(v1=["initializers.local_variables", "local_variables_initializer"])
def local_variables_initializer():
if context.executing_eagerly():
return control_flow_ops.no_op(name="local_variables_initializer")
return variables_initializer(local_variables())
@tf_export(v1=["initialize_local_variables"])
@tf_should_use.should_use_result
@deprecated("2017-03-02", "Use `tf.local_variables_initializer` instead.")
def initialize_local_variables():
return local_variables_initializer()
@tf_export(v1=["is_variable_initialized"])
@tf_should_use.should_use_result
def is_variable_initialized(variable):
return state_ops.is_variable_initialized(variable)
@tf_export(v1=["assert_variables_initialized"])
@tf_should_use.should_use_result
def assert_variables_initialized(var_list=None):
if var_list is None:
var_list = global_variables() + local_variables()
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
if not var_list:
return None
else:
ranks = []
for var in var_list:
with ops.colocate_with(var.op):
ranks.append(array_ops.rank_internal(var, optimize=False))
if len(ranks) == 1:
return ranks[0]
else:
return array_ops.stack(ranks)
@tf_export(v1=["report_uninitialized_variables"])
@tf_should_use.should_use_result
def report_uninitialized_variables(var_list=None,
name="report_uninitialized_variables"):
if var_list is None:
var_list = global_variables() + local_variables()
if not var_list:
var_list = []
for op in ops.get_default_graph().get_operations():
if op.type in ["Variable", "VariableV2", "AutoReloadVariable"]:
var_list.append(op.outputs[0])
with ops.name_scope(name):
if var_list:
init_vars = [state_ops.is_variable_initialized(v) for v in var_list]
local_device = os.environ.get(
"TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING", "/cpu:0")
with ops.device(local_device):
if not var_list:
return array_ops.constant([], dtype=dtypes.string)
else:
variables_mask = math_ops.logical_not(array_ops.stack(init_vars))
variable_names_tensor = array_ops.constant(
[s.op.name for s in var_list])
return array_ops.boolean_mask(variable_names_tensor, variables_mask)
Variable._OverloadAllOperators()
ops.register_tensor_conversion_function(
PartitionedVariable, PartitionedVariable._TensorConversionFunction)
ops.register_dense_tensor_like_type(Variable)
| true | true |
f72b1a1f689e870dc85c7c284ed9fdf8f206b085 | 4,540 | py | Python | python/tests/serialization/test_deserializers.py | aji-geo/incubator-sedona | ed7a1badf58f0c7efedf79a0a21a9ef6ebd1d6b1 | [
"Apache-2.0"
] | 1 | 2021-10-19T07:57:29.000Z | 2021-10-19T07:57:29.000Z | python/tests/serialization/test_deserializers.py | aji-geo/incubator-sedona | ed7a1badf58f0c7efedf79a0a21a9ef6ebd1d6b1 | [
"Apache-2.0"
] | null | null | null | python/tests/serialization/test_deserializers.py | aji-geo/incubator-sedona | ed7a1badf58f0c7efedf79a0a21a9ef6ebd1d6b1 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
from shapely.geometry import MultiPoint, Point, MultiLineString, LineString, Polygon, MultiPolygon
import geopandas as gpd
from tests.data import data_path
from tests.test_base import TestBase
class TestGeometryConvert(TestBase):
def test_register_functions(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.show()
def test_collect(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.collect()
def test_loading_from_file_deserialization(self):
geom = self.spark.read.\
options(delimiter="|", header=True).\
csv(os.path.join(data_path, "counties.csv")).\
limit(1).\
createOrReplaceTempView("counties")
geom_area = self.spark.sql("SELECT st_area(st_geomFromWKT(geom)) as area from counties").collect()[0][0]
polygon_shapely = self.spark.sql("SELECT st_geomFromWKT(geom) from counties").collect()[0][0]
assert geom_area == polygon_shapely.area
def test_polygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10),
(20 30, 35 35, 30 20, 20 30))') as geom"""
).collect()[0][0]
assert geom.area == 675.0
assert type(geom) == Polygon
def test_multipolygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),
((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),
(30 20, 20 15, 20 25, 30 20)))')"""
).collect()[0][0]
assert type(geom) == MultiPolygon
assert geom.area == 712.5
def test_multipolygon_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT()"""
)
def test_point_deserialization(self):
geom = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""").collect()[0][0]
assert geom.wkt == Point(-6.0, 52.0).wkt
def test_multipoint_deserialization(self):
geom = self.spark.sql("""select st_geomFromWKT('MULTIPOINT(1 2, -2 3)') as geom""").collect()[0][0]
assert geom.wkt == MultiPoint([(1, 2), (-2, 3)]).wkt
def test_linestring_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('LINESTRING (30 10, 10 30, 40 40)')"""
).collect()[0][0]
assert type(geom) == LineString
assert geom.wkt == LineString([(30, 10), (10, 30), (40, 40)]).wkt
def test_multilinestring_deserialization(self):
geom = self.spark.sql(
"""SELECT st_geomFromWKT('MULTILINESTRING ((10 10, 20 20, 10 40),
(40 40, 30 30, 40 20, 30 10))') as geom"""
).collect()[0][0]
assert type(geom) == MultiLineString
assert geom.wkt == MultiLineString([
((10, 10), (20, 20), (10, 40)),
((40, 40), (30, 30), (40, 20), (30, 10))
]).wkt
def test_from_geopandas_convert(self):
gdf = gpd.read_file(os.path.join(data_path, "gis_osm_pois_free_1.shp"))
self.spark.createDataFrame(
gdf
).show()
def test_to_geopandas(self):
counties = self.spark. \
read. \
option("delimiter", "|"). \
option("header", "true"). \
csv(os.path.join(data_path, "counties.csv")).limit(1)
counties.createOrReplaceTempView("county")
counties_geom = self.spark.sql(
"SELECT *, st_geomFromWKT(geom) as geometry from county"
)
gdf = counties_geom.toPandas()
print(gpd.GeoDataFrame(gdf, geometry="geometry"))
| 36.32 | 112 | 0.624009 |
import os
from shapely.geometry import MultiPoint, Point, MultiLineString, LineString, Polygon, MultiPolygon
import geopandas as gpd
from tests.data import data_path
from tests.test_base import TestBase
class TestGeometryConvert(TestBase):
def test_register_functions(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.show()
def test_collect(self):
df = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""")
df.collect()
def test_loading_from_file_deserialization(self):
geom = self.spark.read.\
options(delimiter="|", header=True).\
csv(os.path.join(data_path, "counties.csv")).\
limit(1).\
createOrReplaceTempView("counties")
geom_area = self.spark.sql("SELECT st_area(st_geomFromWKT(geom)) as area from counties").collect()[0][0]
polygon_shapely = self.spark.sql("SELECT st_geomFromWKT(geom) from counties").collect()[0][0]
assert geom_area == polygon_shapely.area
def test_polygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('POLYGON ((35 10, 45 45, 15 40, 10 20, 35 10),
(20 30, 35 35, 30 20, 20 30))') as geom"""
).collect()[0][0]
assert geom.area == 675.0
assert type(geom) == Polygon
def test_multipolygon_with_holes_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),
((20 35, 10 30, 10 10, 30 5, 45 20, 20 35),
(30 20, 20 15, 20 25, 30 20)))')"""
).collect()[0][0]
assert type(geom) == MultiPolygon
assert geom.area == 712.5
def test_multipolygon_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT()"""
)
def test_point_deserialization(self):
geom = self.spark.sql("""SELECT st_geomfromtext('POINT(-6.0 52.0)') as geom""").collect()[0][0]
assert geom.wkt == Point(-6.0, 52.0).wkt
def test_multipoint_deserialization(self):
geom = self.spark.sql("""select st_geomFromWKT('MULTIPOINT(1 2, -2 3)') as geom""").collect()[0][0]
assert geom.wkt == MultiPoint([(1, 2), (-2, 3)]).wkt
def test_linestring_deserialization(self):
geom = self.spark.sql(
"""select st_geomFromWKT('LINESTRING (30 10, 10 30, 40 40)')"""
).collect()[0][0]
assert type(geom) == LineString
assert geom.wkt == LineString([(30, 10), (10, 30), (40, 40)]).wkt
def test_multilinestring_deserialization(self):
geom = self.spark.sql(
"""SELECT st_geomFromWKT('MULTILINESTRING ((10 10, 20 20, 10 40),
(40 40, 30 30, 40 20, 30 10))') as geom"""
).collect()[0][0]
assert type(geom) == MultiLineString
assert geom.wkt == MultiLineString([
((10, 10), (20, 20), (10, 40)),
((40, 40), (30, 30), (40, 20), (30, 10))
]).wkt
def test_from_geopandas_convert(self):
gdf = gpd.read_file(os.path.join(data_path, "gis_osm_pois_free_1.shp"))
self.spark.createDataFrame(
gdf
).show()
def test_to_geopandas(self):
counties = self.spark. \
read. \
option("delimiter", "|"). \
option("header", "true"). \
csv(os.path.join(data_path, "counties.csv")).limit(1)
counties.createOrReplaceTempView("county")
counties_geom = self.spark.sql(
"SELECT *, st_geomFromWKT(geom) as geometry from county"
)
gdf = counties_geom.toPandas()
print(gpd.GeoDataFrame(gdf, geometry="geometry"))
| true | true |
f72b1cc1e0211ba34f94051f87bc32ad2cbf8b6f | 60 | py | Python | src/FLABasicTools/__main__.py | Fair-Lines-America/FLA_basic_tools | 9aedc23ef4b9df2bd530c96fedd94e046eb545c8 | [
"MIT"
] | 17 | 2020-05-07T20:02:30.000Z | 2022-03-02T10:59:28.000Z | src/FLABasicTools/__main__.py | Fair-Lines-America/FLA_basic_tools | 9aedc23ef4b9df2bd530c96fedd94e046eb545c8 | [
"MIT"
] | 3 | 2021-05-06T17:44:23.000Z | 2022-01-27T15:14:44.000Z | src/FLABasicTools/__main__.py | Fair-Lines-America/FLA_basic_tools | 9aedc23ef4b9df2bd530c96fedd94e046eb545c8 | [
"MIT"
] | null | null | null | from .cli import main
if __name__ == '__main__':
main() | 15 | 26 | 0.65 | from .cli import main
if __name__ == '__main__':
main() | true | true |
f72b1d438ff6542f0231c5e19b54a4ca0fdfaef9 | 7,860 | py | Python | agent/segmentation.py | johnnylord/trytry-segmentation | a88d75571ddba92bd10ac2d7303bee9426188b62 | [
"MIT"
] | null | null | null | agent/segmentation.py | johnnylord/trytry-segmentation | a88d75571ddba92bd10ac2d7303bee9426188b62 | [
"MIT"
] | null | null | null | agent/segmentation.py | johnnylord/trytry-segmentation | a88d75571ddba92bd10ac2d7303bee9426188b62 | [
"MIT"
] | null | null | null | import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as T
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from data.segmentation import SegmentDataset
from model.segmentation.fcn import FCN32
from model.segmentation.unet import UNet, UNetVGG16
__all__ = [ "SegmentAgent" ]
class SegmentAgent:
"""Train Image Segmentation model
Requirements:
Simple baseline
- (15%) validation mIoU > 0.635
- (15%) testing mIoU > 0.625
"""
def __init__(self, config):
self.config = config
# Check environment
if torch.cuda.is_available():
self.device = torch.device(config['train']['device'])
else:
raise RuntimeError("Please train your model with GPU")
# Create dataset
tr_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
te_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
train_dataset = SegmentDataset(root=config['dataset']['train']['root'],
transform=tr_transform)
valid_dataset = SegmentDataset(root=config['dataset']['valid']['root'],
transform=te_transform)
# Create dataloader
self.train_loader = DataLoader(train_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=True)
self.valid_loader = DataLoader(valid_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=False)
# Create model
if config['train']['model'] == 'fcn':
self.model = FCN32(n_classes=7)
elif config['train']['model'] == 'unet':
self.model = UNetVGG16(n_classes=7)
self.model.to(self.device)
# Create optimizer
self.optimizer = optim.Adam(self.model.parameters(), lr=config['optim']['lr'])
# Create loss function
self.criterion = nn.CrossEntropyLoss()
# Create tensorboard
tensorboard_dir = osp.join(config['train']['log_dir'], config['train']['exp_name'])
self.writer = SummaryWriter(tensorboard_dir)
# Logging
self.start_epoch = 0
self.current_epoch = -1
self.current_loss = 10000
# Resume training or not
if config['train']['resume']:
checkpoint_file = osp.join(config['train']['log_dir'],
config['train']['checkpoint_dir'],
'best.pth')
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
for param_group in self.optimizer.param_groups:
param_group['lr'] = config['optim']['lr']
self.current_epoch = checkpoint['current_epoch'] + 1
self.start_epoch = self.current_epoch + 1
print("Resume training at epoch {}".format(self.start_epoch))
def train(self):
for epoch in range(self.start_epoch, self.config['train']['n_epochs']):
self.current_epoch = epoch
self.train_one_epoch()
self.validate()
def train_one_epoch(self):
running_loss = 0
self.model.train()
for i, (imgs, targets) in enumerate(self.train_loader):
imgs = imgs.to(self.device)
targets = targets.to(self.device)
# Forward & Backward
self.optimizer.zero_grad()
outputs = self.model(imgs) # (n, c, h, w)
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
# Cumulate result
running_loss += loss.item() * len(imgs)
# Show training information
if (i % self.config['train']['interval']) == 0:
print("Epoch {}:{}({}%), Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'],
int(i*100/len(self.train_loader)), loss.item()))
train_loss = running_loss / len(self.train_loader.dataset)
print("Epoch {}:{}, Train Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'], train_loss))
# Export result to tensorboard
self.writer.add_scalar("Train Loss", train_loss, self.current_epoch)
def validate(self):
running_loss = 0
pred_masks = []
true_masks = []
self.model.eval()
with torch.no_grad():
for imgs, targets in self.valid_loader:
imgs = imgs.to(self.device)
targets = targets.to(self.device)
outputs = self.model(imgs) # (n, c, h, w)
# Save segmenation mask
pred_mask = np.argmax(outputs.detach().cpu().numpy(), axis=1)
pred_masks.append(pred_mask)
true_masks.append(targets.detach().cpu().numpy())
# Compute loss
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
# Validation Loss
running_loss += loss.item() * len(imgs)
# Show validation result
pred_masks = np.vstack(pred_masks)
true_masks = np.vstack(true_masks)
miou = self._mean_iou_score(pred_masks, true_masks)
valid_loss = running_loss / len(self.valid_loader.dataset)
print("Epoch {}:{}, Valid Loss: {:.2f}, mIoU: {:.3f}".format(
self.current_epoch, self.config['train']['n_epochs'],
valid_loss, miou))
# Save training checkpoints
if valid_loss < self.current_loss:
self.current_loss = valid_loss
self._save_checkpoint()
# Export result to tensorboard
self.writer.add_scalar("Valid Loss", valid_loss, self.current_epoch)
def finalize(self):
pass
def _save_checkpoint(self):
checkpoints = { 'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_epoch': self.current_epoch,
'current_loss': self.current_loss }
checkpoint_file = osp.join(self.config['train']['log_dir'],
self.config['train']['checkpoint_dir'],
'best.pth')
if not osp.exists(osp.dirname(checkpoint_file)):
os.makedirs(osp.dirname(checkpoint_file))
torch.save(checkpoints, checkpoint_file)
print("Save checkpoint to '{}'".format(checkpoint_file))
def _mean_iou_score(self, pred_masks, true_masks):
"""Compute mean IoU score over 6 classes"""
mean_iou = 0
for i in range(6):
tp_fp = np.sum(pred_masks == i)
tp_fn = np.sum(true_masks == i)
tp = np.sum((pred_masks == i) * (true_masks == i))
iou = tp / (tp_fp + tp_fn - tp)
mean_iou += iou / 6
return mean_iou
| 38.341463 | 91 | 0.549237 | import os
import os.path as osp
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as T
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
from data.segmentation import SegmentDataset
from model.segmentation.fcn import FCN32
from model.segmentation.unet import UNet, UNetVGG16
__all__ = [ "SegmentAgent" ]
class SegmentAgent:
def __init__(self, config):
self.config = config
if torch.cuda.is_available():
self.device = torch.device(config['train']['device'])
else:
raise RuntimeError("Please train your model with GPU")
tr_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
te_transform = T.Compose([
T.ToTensor(),
T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]), ])
train_dataset = SegmentDataset(root=config['dataset']['train']['root'],
transform=tr_transform)
valid_dataset = SegmentDataset(root=config['dataset']['valid']['root'],
transform=te_transform)
self.train_loader = DataLoader(train_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=True)
self.valid_loader = DataLoader(valid_dataset,
batch_size=config['loader']['batch_size'],
num_workers=config['loader']['num_workers'],
shuffle=False)
if config['train']['model'] == 'fcn':
self.model = FCN32(n_classes=7)
elif config['train']['model'] == 'unet':
self.model = UNetVGG16(n_classes=7)
self.model.to(self.device)
self.optimizer = optim.Adam(self.model.parameters(), lr=config['optim']['lr'])
self.criterion = nn.CrossEntropyLoss()
tensorboard_dir = osp.join(config['train']['log_dir'], config['train']['exp_name'])
self.writer = SummaryWriter(tensorboard_dir)
self.start_epoch = 0
self.current_epoch = -1
self.current_loss = 10000
if config['train']['resume']:
checkpoint_file = osp.join(config['train']['log_dir'],
config['train']['checkpoint_dir'],
'best.pth')
checkpoint = torch.load(checkpoint_file)
self.model.load_state_dict(checkpoint['model'])
self.optimizer.load_state_dict(checkpoint['optimizer'])
for param_group in self.optimizer.param_groups:
param_group['lr'] = config['optim']['lr']
self.current_epoch = checkpoint['current_epoch'] + 1
self.start_epoch = self.current_epoch + 1
print("Resume training at epoch {}".format(self.start_epoch))
def train(self):
for epoch in range(self.start_epoch, self.config['train']['n_epochs']):
self.current_epoch = epoch
self.train_one_epoch()
self.validate()
def train_one_epoch(self):
running_loss = 0
self.model.train()
for i, (imgs, targets) in enumerate(self.train_loader):
imgs = imgs.to(self.device)
targets = targets.to(self.device)
self.optimizer.zero_grad()
outputs = self.model(imgs)
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
running_loss += loss.item() * len(imgs)
if (i % self.config['train']['interval']) == 0:
print("Epoch {}:{}({}%), Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'],
int(i*100/len(self.train_loader)), loss.item()))
train_loss = running_loss / len(self.train_loader.dataset)
print("Epoch {}:{}, Train Loss: {:.2f}".format(
self.current_epoch, self.config['train']['n_epochs'], train_loss))
self.writer.add_scalar("Train Loss", train_loss, self.current_epoch)
def validate(self):
running_loss = 0
pred_masks = []
true_masks = []
self.model.eval()
with torch.no_grad():
for imgs, targets in self.valid_loader:
imgs = imgs.to(self.device)
targets = targets.to(self.device)
outputs = self.model(imgs)
pred_mask = np.argmax(outputs.detach().cpu().numpy(), axis=1)
pred_masks.append(pred_mask)
true_masks.append(targets.detach().cpu().numpy())
preds = outputs.transpose(1, 2).transpose(2, 3).contiguous().view(-1, 7)
labels = targets.flatten()
loss = self.criterion(preds, labels)
running_loss += loss.item() * len(imgs)
pred_masks = np.vstack(pred_masks)
true_masks = np.vstack(true_masks)
miou = self._mean_iou_score(pred_masks, true_masks)
valid_loss = running_loss / len(self.valid_loader.dataset)
print("Epoch {}:{}, Valid Loss: {:.2f}, mIoU: {:.3f}".format(
self.current_epoch, self.config['train']['n_epochs'],
valid_loss, miou))
if valid_loss < self.current_loss:
self.current_loss = valid_loss
self._save_checkpoint()
self.writer.add_scalar("Valid Loss", valid_loss, self.current_epoch)
def finalize(self):
pass
def _save_checkpoint(self):
checkpoints = { 'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'current_epoch': self.current_epoch,
'current_loss': self.current_loss }
checkpoint_file = osp.join(self.config['train']['log_dir'],
self.config['train']['checkpoint_dir'],
'best.pth')
if not osp.exists(osp.dirname(checkpoint_file)):
os.makedirs(osp.dirname(checkpoint_file))
torch.save(checkpoints, checkpoint_file)
print("Save checkpoint to '{}'".format(checkpoint_file))
def _mean_iou_score(self, pred_masks, true_masks):
mean_iou = 0
for i in range(6):
tp_fp = np.sum(pred_masks == i)
tp_fn = np.sum(true_masks == i)
tp = np.sum((pred_masks == i) * (true_masks == i))
iou = tp / (tp_fp + tp_fn - tp)
mean_iou += iou / 6
return mean_iou
| true | true |
f72b1e7549524106a9f828129970b89627719521 | 51,593 | py | Python | temporal/core.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | temporal/core.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | temporal/core.py | karttur/geoimagine02-grass | 09c207707ddd0dae04a871e006e184409aa87d99 | [
"BSD-3-Clause"
] | null | null | null | """
This module provides the functionality to create the temporal
SQL database and to establish a connection to the database.
Usage:
.. code-block:: python
>>> import grass.temporal as tgis
>>> # Create the temporal database
>>> tgis.init()
>>> # Establish a database connection
>>> dbif, connected = tgis.init_dbif(None)
>>> dbif.connect()
>>> # Execute a SQL statement
>>> dbif.execute_transaction("SELECT datetime(0, 'unixepoch', 'localtime');")
>>> # Mogrify an SQL statement
>>> dbif.mogrify_sql_statement(["SELECT name from raster_base where name = ?",
... ("precipitation",)])
"SELECT name from raster_base where name = 'precipitation'"
>>> dbif.close()
(C) 2011-2014 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
:author: Soeren Gebbert
"""
#import traceback
import os
import sys
import grass.script as gscript
if sys.version_info.major == 3:
long = int
from .c_libraries_interface import *
from grass.pygrass import messages
from grass.script.utils import decode, encode
# Import all supported database backends
# Ignore import errors since they are checked later
try:
import sqlite3
except ImportError:
pass
# Postgresql is optional, existence is checked when needed
try:
import psycopg2
import psycopg2.extras
except:
pass
import atexit
from datetime import datetime
###############################################################################
def profile_function(func):
"""Profiling function provided by the temporal framework"""
do_profiling = os.getenv("GRASS_TGIS_PROFILE")
if do_profiling == "True" or do_profiling == "1":
import cProfile, pstats
try:
import StringIO as io
except ImportError:
import io
pr = cProfile.Profile()
pr.enable()
func()
pr.disable()
s = io.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print(s.getvalue())
else:
func()
# Global variable that defines the backend
# of the temporal GIS
# It can either be "sqlite" or "pg"
tgis_backend = None
def get_tgis_backend():
"""Return the temporal GIS backend as string
:returns: either "sqlite" or "pg"
"""
global tgis_backend
return tgis_backend
# Global variable that defines the database string
# of the temporal GIS
tgis_database = None
def get_tgis_database():
"""Return the temporal database string specified with t.connect
"""
global tgis_database
return tgis_database
# The version of the temporal framework
# this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes in the TGIS API
tgis_version = 2
# The version of the temporal database since framework and database version
# can differ this value must be an integer larger than 0
# Increase this value in case of backward incompatible changes
# temporal database SQL layout
tgis_db_version = 2
# We need to know the parameter style of the database backend
tgis_dbmi_paramstyle = None
def get_tgis_dbmi_paramstyle():
"""Return the temporal database backend parameter style
:returns: "qmark" or ""
"""
global tgis_dbmi_paramstyle
return tgis_dbmi_paramstyle
# We need to access the current mapset quite often in the framework, so we make
# a global variable that will be initiated when init() is called
current_mapset = None
current_location = None
current_gisdbase = None
###############################################################################
def get_current_mapset():
"""Return the current mapset
This is the fastest way to receive the current mapset.
The current mapset is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_mapset
return current_mapset
###############################################################################
def get_current_location():
"""Return the current location
This is the fastest way to receive the current location.
The current location is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_location
return current_location
###############################################################################
def get_current_gisdbase():
"""Return the current gis database (gisdbase)
This is the fastest way to receive the current gisdbase.
The current gisdbase is set by init() and stored in a global variable.
This function provides access to this global variable.
"""
global current_gisdbase
return current_gisdbase
###############################################################################
# If this global variable is set True, then maps can only be registered in
# space time datasets with the same mapset. In addition, only maps in the
# current mapset can be inserted, updated or deleted from the temporal database.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_mapset_check = True
# If this global variable is set True, the timestamps of maps will be written
# as textfiles for each map that will be inserted or updated in the temporal
# database using the C-library timestamp interface.
# Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
# ATTENTION: Be aware to face corrupted temporal database in case this global
# variable is set to False. This feature is highly
# experimental and violates the grass permission guidance.
enable_timestamp_write = True
def get_enable_mapset_check():
"""Return True if the mapsets should be checked while insert, update,
delete requests and space time dataset registration.
If this global variable is set True, then maps can only be registered
in space time datasets with the same mapset. In addition, only maps in
the current mapset can be inserted, updated or deleted from the temporal
database.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_MAPSET_CHECK=True"
..warning::
Be aware to face corrupted temporal database in case this
global variable is set to False. This feature is highly
experimental and violates the grass permission guidance.
"""
global enable_mapset_check
return enable_mapset_check
def get_enable_timestamp_write():
"""Return True if the map timestamps should be written to the spatial
database metadata as well.
If this global variable is set True, the timestamps of maps will be
written as textfiles for each map that will be inserted or updated in
the temporal database using the C-library timestamp interface.
Overwrite this global variable by: g.gisenv set="TGIS_DISABLE_TIMESTAMP_WRITE=True"
..warning::
Be aware that C-libraries can not access timestamp information if
they are not written as spatial database metadata, hence modules
that make use of timestamps using the C-library interface will not
work with maps that were created without writing the timestamps.
"""
global enable_timestamp_write
return enable_timestamp_write
###############################################################################
# The global variable that stores the PyGRASS Messenger object that
# provides a fast and exit safe interface to the C-library message functions
message_interface = None
def _init_tgis_message_interface(raise_on_error=False):
"""Initiate the global message interface
:param raise_on_error: If True raise a FatalError exception in case of
a fatal error, call sys.exit(1) otherwise
"""
global message_interface
if message_interface is None:
message_interface = messages.get_msgr(raise_on_error=raise_on_error)
def get_tgis_message_interface():
"""Return the temporal GIS message interface which is of type
grass.pygrass.message.Messenger()
Use this message interface to print messages to stdout using the
GRASS C-library messaging system.
"""
global message_interface
return message_interface
###############################################################################
# The global variable that stores the C-library interface object that
# provides a fast and exit safe interface to the C-library libgis,
# libraster, libraster3d and libvector functions
c_library_interface = None
def _init_tgis_c_library_interface():
"""Set the global C-library interface variable that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
if c_library_interface is None:
c_library_interface = CLibrariesInterface()
def get_tgis_c_library_interface():
"""Return the C-library interface that
provides a fast and exit safe interface to the C-library libgis,
libraster, libraster3d and libvector functions
"""
global c_library_interface
return c_library_interface
###############################################################################
# Set this variable True to raise a FatalError exception
# in case a fatal error occurs using the messenger interface
raise_on_error = False
def set_raise_on_error(raise_exp=True):
"""Define behavior on fatal error, invoked using the tgis messenger
interface (msgr.fatal())
The messenger interface will be restarted using the new error policy
:param raise_exp: True to raise a FatalError exception instead of calling
sys.exit(1) when using the tgis messenger interface
.. code-block:: python
>>> import grass.temporal as tgis
>>> tgis.init()
>>> ignore = tgis.set_raise_on_error(False)
>>> msgr = tgis.get_tgis_message_interface()
>>> tgis.get_raise_on_error()
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 239, in fatal
sys.exit(1)
SystemExit: 1
>>> tgis.set_raise_on_error(True)
False
>>> msgr.fatal("Ohh no no no!")
Traceback (most recent call last):
File "__init__.py", line 241, in fatal
raise FatalError(message)
FatalError: Ohh no no no!
:returns: current status
"""
global raise_on_error
tmp_raise = raise_on_error
raise_on_error = raise_exp
global message_interface
if message_interface:
message_interface.set_raise_on_error(raise_on_error)
else:
_init_tgis_message_interface(raise_on_error)
return tmp_raise
def get_raise_on_error():
"""Return True if a FatalError exception is raised instead of calling
sys.exit(1) in case a fatal error was invoked with msgr.fatal()
"""
global raise_on_error
return raise_on_error
###############################################################################
def get_tgis_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_version
return tgis_version
###############################################################################
def get_tgis_db_version():
"""Get the version number of the temporal framework
:returns: The version number of the temporal framework as string
"""
global tgis_db_version
return tgis_db_version
###############################################################################
def get_tgis_metadata(dbif=None):
"""Return the tgis metadata table as a list of rows (dicts) or None if not
present
:param dbif: The database interface to be used
:returns: The selected rows with key/value columns or None
"""
dbif, connected = init_dbif(dbif)
# Select metadata if the table is present
try:
statement = "SELECT * FROM tgis_metadata;\n"
dbif.execute(statement)
rows = dbif.fetchall()
except:
rows = None
if connected:
dbif.close()
return rows
###############################################################################
# The temporal database string set with t.connect
# with substituted GRASS variables gisdbase, location and mapset
tgis_database_string = None
def get_tgis_database_string():
"""Return the preprocessed temporal database string
This string is the temporal database string set with t.connect
that was processed to substitue location, gisdbase and mapset
variables.
"""
global tgis_database_string
return tgis_database_string
###############################################################################
def get_sql_template_path():
base = os.getenv("GISBASE")
base_etc = os.path.join(base, "etc")
return os.path.join(base_etc, "sql")
###############################################################################
def stop_subprocesses():
"""Stop the messenger and C-interface subprocesses
that are started by tgis.init()
"""
global message_interface
global c_library_interface
if message_interface:
message_interface.stop()
if c_library_interface:
c_library_interface.stop()
# We register this function to be called at exit
atexit.register(stop_subprocesses)
def get_available_temporal_mapsets():
"""Return a list of of mapset names with temporal database driver and names
that are accessible from the current mapset.
:returns: A dictionary, mapset names are keys, the tuple (driver,
database) are the values
"""
global c_library_interface
global message_interface
mapsets = c_library_interface.available_mapsets()
tgis_mapsets = {}
for mapset in mapsets:
mapset = mapset
driver = c_library_interface.get_driver_name(mapset)
database = c_library_interface.get_database_name(mapset)
message_interface.debug(1, "get_available_temporal_mapsets: "\
"\n mapset %s\n driver %s\n database %s"%(mapset,
driver, database))
if driver and database:
# Check if the temporal sqlite database exists
# We need to set non-existing databases in case the mapset is the current mapset
# to create it
if (driver == "sqlite" and os.path.exists(database)) or mapset == get_current_mapset() :
tgis_mapsets[mapset] = (driver, database)
# We need to warn if the connection is defined but the database does not
# exists
if driver == "sqlite" and not os.path.exists(database):
message_interface.warning("Temporal database connection defined as:\n" + \
database + "\nBut database file does not exist.")
return tgis_mapsets
###############################################################################
def init(raise_fatal_error=False):
"""This function set the correct database backend from GRASS environmental
variables and creates the grass temporal database structure for raster,
vector and raster3d maps as well as for the space-time datasets strds,
str3ds and stvds in case it does not exist.
Several global variables are initiated and the messenger and C-library
interface subprocesses are spawned.
Re-run this function in case the following GRASS variables change while
the process runs:
- MAPSET
- LOCATION_NAME
- GISDBASE
- TGIS_DISABLE_MAPSET_CHECK
- TGIS_DISABLE_TIMESTAMP_WRITE
Re-run this function if the following t.connect variables change while
the process runs:
- temporal GIS driver (set by t.connect driver=)
- temporal GIS database (set by t.connect database=)
The following environmental variables are checked:
- GRASS_TGIS_PROFILE (True, False, 1, 0)
- GRASS_TGIS_RAISE_ON_ERROR (True, False, 1, 0)
..warning::
This functions must be called before any spatio-temporal processing
can be started
:param raise_fatal_error: Set this True to assure that the init()
function does not kill a persistent process
like the GUI. If set True a
grass.pygrass.messages.FatalError
exception will be raised in case a fatal
error occurs in the init process, otherwise
sys.exit(1) will be called.
"""
# We need to set the correct database backend and several global variables
# from the GRASS mapset specific environment variables of g.gisenv and t.connect
global tgis_backend
global tgis_database
global tgis_database_string
global tgis_dbmi_paramstyle
global raise_on_error
global enable_mapset_check
global enable_timestamp_write
global current_mapset
global current_location
global current_gisdbase
raise_on_error = raise_fatal_error
# We must run t.connect at first to create the temporal database and to
# get the environmental variables
gscript.run_command("t.connect", flags="c")
grassenv = gscript.gisenv()
# Set the global variable for faster access
current_mapset = grassenv["MAPSET"]
current_location = grassenv["LOCATION_NAME"]
current_gisdbase = grassenv["GISDBASE"]
# Check environment variable GRASS_TGIS_RAISE_ON_ERROR
if os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "True" or \
os.getenv("GRASS_TGIS_RAISE_ON_ERROR") == "1":
raise_on_error = True
# Check if the script library raises on error,
# if so we do the same
if gscript.get_raise_on_error() is True:
raise_on_error = True
# Start the GRASS message interface server
_init_tgis_message_interface(raise_on_error)
# Start the C-library interface server
_init_tgis_c_library_interface()
msgr = get_tgis_message_interface()
msgr.debug(1, "Initiate the temporal database")
#"\n traceback:%s"%(str(" \n".join(traceback.format_stack()))))
msgr.debug(1, ("Raise on error id: %s"%str(raise_on_error)))
ciface = get_tgis_c_library_interface()
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
# Set the mapset check and the timestamp write
if "TGIS_DISABLE_MAPSET_CHECK" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_MAPSET_CHECK"]) == "1":
enable_mapset_check = False
msgr.warning("TGIS_DISABLE_MAPSET_CHECK is True")
if "TGIS_DISABLE_TIMESTAMP_WRITE" in grassenv:
if gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "True" or \
gscript.encode(grassenv["TGIS_DISABLE_TIMESTAMP_WRITE"]) == "1":
enable_timestamp_write = False
msgr.warning("TGIS_DISABLE_TIMESTAMP_WRITE is True")
if driver_string is not None and driver_string != "":
driver_string = decode(driver_string)
if driver_string == "sqlite":
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
elif driver_string == "pg":
tgis_backend = driver_string
try:
import psycopg2
except ImportError:
msgr.error("Unable to locate the Postgresql SQL Python "
"interface module psycopg2.")
raise
dbmi = psycopg2
else:
msgr.fatal(_("Unable to initialize the temporal DBMI interface. "
"Please use t.connect to specify the driver and the"
" database string"))
else:
# Set the default sqlite3 connection in case nothing was defined
gscript.run_command("t.connect", flags="d")
driver_string = ciface.get_driver_name()
database_string = ciface.get_database_name()
tgis_backend = driver_string
try:
import sqlite3
except ImportError:
msgr.error("Unable to locate the sqlite SQL Python interface"
" module sqlite3.")
raise
dbmi = sqlite3
tgis_database_string = database_string
# Set the parameter style
tgis_dbmi_paramstyle = dbmi.paramstyle
# We do not know if the database already exists
db_exists = False
dbif = SQLDatabaseInterfaceConnection()
# Check if the database already exists
if tgis_backend == "sqlite":
# Check path of the sqlite database
if os.path.exists(tgis_database_string):
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT name FROM sqlite_master WHERE type='table' "
"AND name='raster_base';")
name = dbif.fetchone()
if name and name[0] == "raster_base":
db_exists = True
dbif.close()
elif tgis_backend == "pg":
# Connect to database
dbif.connect()
# Check for raster_base table
dbif.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('raster_base',))
if dbif.fetchone()[0]:
db_exists = True
backup_howto = "The format of your actual temporal database is not " \
"supported any more.\nSolution: You need to export it by " \
"restoring the GRASS GIS version used for creating this DB"\
". From there, create a backup of your temporal database "\
"to avoid the loss of your temporal data.\nNotes: Use " \
"t.rast.export and t.vect.export to make a backup of your" \
" existing space time datasets.To safe the timestamps of" \
" your existing maps and space time datasets, use " \
"t.rast.list, t.vect.list and t.rast3d.list. "\
"You can register the existing time stamped maps easily if"\
" you export columns=id,start_time,end_time into text "\
"files and use t.register to register them again in new" \
" created space time datasets (t.create). After the backup"\
" remove the existing temporal database, a new one will be"\
" created automatically.\n"
if db_exists is True:
# Check the version of the temporal database
dbif.close()
dbif.connect()
metadata = get_tgis_metadata(dbif)
dbif.close()
if metadata is None:
msgr.fatal(_("Unable to receive temporal database metadata.\n"
"Current temporal database info:%(info)s") % (
{"info": get_database_info_string()}))
for entry in metadata:
if "tgis_version" in entry and entry[1] != str(get_tgis_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)s Supported temporal API version is:"
" %(api)i.\nPlease update your GRASS GIS "
"installation.\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"api": get_tgis_version(),
"info": get_database_info_string()}))
if "tgis_db_version" in entry and entry[1] != str(get_tgis_db_version()):
msgr.fatal(_("Unsupported temporal database: version mismatch."
"\n %(backup)sSupported temporal database version"
" is: %(tdb)i\nCurrent temporal database info:"
"%(info)s") % ({"backup": backup_howto,
"tdb": get_tgis_version(),
"info": get_database_info_string()}))
return
create_temporal_database(dbif)
###############################################################################
def get_database_info_string():
dbif = SQLDatabaseInterfaceConnection()
info = "\nDBMI interface:..... " + str(dbif.get_dbmi().__name__)
info += "\nTemporal database:.. " + str(get_tgis_database_string())
return info
###############################################################################
def create_temporal_database(dbif):
"""This function will create the temporal database
It will create all tables and triggers that are needed to run
the temporal GIS
:param dbif: The database interface to be used
"""
global tgis_backend
global tgis_version
global tgis_db_version
global tgis_database_string
template_path = get_sql_template_path()
msgr = get_tgis_message_interface()
# Read all SQL scripts and templates
map_tables_template_sql = open(os.path.join(
template_path, "map_tables_template.sql"), 'r').read()
raster_metadata_sql = open(os.path.join(
get_sql_template_path(), "raster_metadata_table.sql"), 'r').read()
raster3d_metadata_sql = open(os.path.join(template_path,
"raster3d_metadata_table.sql"),
'r').read()
vector_metadata_sql = open(os.path.join(template_path,
"vector_metadata_table.sql"),
'r').read()
raster_views_sql = open(os.path.join(template_path, "raster_views.sql"),
'r').read()
raster3d_views_sql = open(os.path.join(template_path,
"raster3d_views.sql"), 'r').read()
vector_views_sql = open(os.path.join(template_path, "vector_views.sql"),
'r').read()
stds_tables_template_sql = open(os.path.join(template_path,
"stds_tables_template.sql"),
'r').read()
strds_metadata_sql = open(os.path.join(template_path,
"strds_metadata_table.sql"),
'r').read()
str3ds_metadata_sql = open(os.path.join(template_path,
"str3ds_metadata_table.sql"),
'r').read()
stvds_metadata_sql = open(os.path.join(template_path,
"stvds_metadata_table.sql"),
'r').read()
strds_views_sql = open(os.path.join(template_path, "strds_views.sql"),
'r').read()
str3ds_views_sql = open(os.path.join(template_path, "str3ds_views.sql"),
'r').read()
stvds_views_sql = open(os.path.join(template_path, "stvds_views.sql"),
'r').read()
# Create the raster, raster3d and vector tables SQL statements
raster_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "raster")
vector_tables_sql = map_tables_template_sql.replace("GRASS_MAP", "vector")
raster3d_tables_sql = map_tables_template_sql.replace(
"GRASS_MAP", "raster3d")
# Create the space-time raster, raster3d and vector dataset tables
# SQL statements
strds_tables_sql = stds_tables_template_sql.replace("STDS", "strds")
stvds_tables_sql = stds_tables_template_sql.replace("STDS", "stvds")
str3ds_tables_sql = stds_tables_template_sql.replace("STDS", "str3ds")
msgr.message(_("Creating temporal database: %s" % (str(tgis_database_string))))
if tgis_backend == "sqlite":
# We need to create the sqlite3 database path if it does not exist
tgis_dir = os.path.dirname(tgis_database_string)
if not os.path.exists(tgis_dir):
try:
os.makedirs(tgis_dir)
except Exception as e:
msgr.fatal(_("Unable to create SQLite temporal database\n"
"Exception: %s\nPlease use t.connect to set a "
"read- and writable temporal database path" % (e)))
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"sqlite3_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path, "sqlite3_indexes.sql"),
'r').read()
else:
# Set up the trigger that takes care of
# the correct deletion of entries across the different tables
delete_trigger_sql = open(os.path.join(template_path,
"postgresql_delete_trigger.sql"),
'r').read()
indexes_sql = open(os.path.join(template_path,
"postgresql_indexes.sql"), 'r').read()
# Connect now to the database
if dbif.connected is not True:
dbif.connect()
# Execute the SQL statements for sqlite
# Create the global tables for the native grass datatypes
dbif.execute_transaction(raster_tables_sql)
dbif.execute_transaction(raster_metadata_sql)
dbif.execute_transaction(raster_views_sql)
dbif.execute_transaction(vector_tables_sql)
dbif.execute_transaction(vector_metadata_sql)
dbif.execute_transaction(vector_views_sql)
dbif.execute_transaction(raster3d_tables_sql)
dbif.execute_transaction(raster3d_metadata_sql)
dbif.execute_transaction(raster3d_views_sql)
# Create the tables for the new space-time datatypes
dbif.execute_transaction(strds_tables_sql)
dbif.execute_transaction(strds_metadata_sql)
dbif.execute_transaction(strds_views_sql)
dbif.execute_transaction(stvds_tables_sql)
dbif.execute_transaction(stvds_metadata_sql)
dbif.execute_transaction(stvds_views_sql)
dbif.execute_transaction(str3ds_tables_sql)
dbif.execute_transaction(str3ds_metadata_sql)
dbif.execute_transaction(str3ds_views_sql)
# The delete trigger
dbif.execute_transaction(delete_trigger_sql)
# The indexes
dbif.execute_transaction(indexes_sql)
# Create the tgis metadata table to store the database
# initial configuration
# The metadata table content
metadata = {}
metadata["tgis_version"] = tgis_version
metadata["tgis_db_version"] = tgis_db_version
metadata["creation_time"] = datetime.today()
_create_tgis_metadata_table(metadata, dbif)
dbif.close()
###############################################################################
def _create_tgis_metadata_table(content, dbif=None):
"""!Create the temporal gis metadata table which stores all metadata
information about the temporal database.
:param content: The dictionary that stores the key:value metadata
that should be stored in the metadata table
:param dbif: The database interface to be used
"""
dbif, connected = init_dbif(dbif)
statement = "CREATE TABLE tgis_metadata (key VARCHAR NOT NULL, value VARCHAR);\n";
dbif.execute_transaction(statement)
for key in content.keys():
statement = "INSERT INTO tgis_metadata (key, value) VALUES " + \
"(\'%s\' , \'%s\');\n" % (str(key), str(content[key]))
dbif.execute_transaction(statement)
if connected:
dbif.close()
###############################################################################
class SQLDatabaseInterfaceConnection(object):
def __init__(self):
self.tgis_mapsets = get_available_temporal_mapsets()
self.current_mapset = get_current_mapset()
self.connections = {}
self.connected = False
self.unique_connections = {}
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
if dbstring not in self.unique_connections.keys():
self.unique_connections[dbstring] = DBConnection(backend=driver,
dbstring=dbstring)
self.connections[mapset] = self.unique_connections[dbstring]
self.msgr = get_tgis_message_interface()
def get_dbmi(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
return self.connections[mapset].dbmi
def rollback(self, mapset=None):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if mapset is None:
mapset = self.current_mapset
def connect(self):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
"""
for mapset in self.tgis_mapsets.keys():
driver, dbstring = self.tgis_mapsets[mapset]
conn = self.connections[mapset]
if conn.is_connected() is False:
conn.connect(dbstring)
self.connected = True
def is_connected(self):
return self.connected
def close(self):
"""Close the DBMI connection
There may be several temporal databases in a location, hence
close all temporal databases that have been opened.
"""
for key in self.unique_connections.keys():
self.unique_connections[key].close()
self.connected = False
def mogrify_sql_statement(self, content, mapset=None):
"""Return the SQL statement and arguments as executable SQL string
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to mogrify sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].mogrify_sql_statement(content)
def check_table(self, table_name, mapset=None):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to check table. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].check_table(table_name)
def execute(self, statement, args=None, mapset=None):
"""
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute sql statement. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute(statement, args)
def fetchone(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch one. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchone()
def fetchall(self, mapset=None):
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to fetch all. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].fetchall()
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
if mapset is None:
mapset = self.current_mapset
mapset = decode(mapset)
if mapset not in self.tgis_mapsets.keys():
self.msgr.fatal(_("Unable to execute transaction. " +
self._create_mapset_error_message(mapset)))
return self.connections[mapset].execute_transaction(statement)
def _create_mapset_error_message(self, mapset):
return("You have no permission to "
"access mapset <%(mapset)s>, or "
"mapset <%(mapset)s> has no temporal database. "
"Accessible mapsets are: <%(mapsets)s>" % \
{"mapset": decode(mapset),
"mapsets":','.join(self.tgis_mapsets.keys())})
###############################################################################
class DBConnection(object):
"""This class represents the database interface connection
and provides access to the chosen backend modules.
The following DBMS are supported:
- sqlite via the sqlite3 standard library
- postgresql via psycopg2
"""
def __init__(self, backend=None, dbstring=None):
""" Constructor of a database connection
param backend:The database backend sqlite or pg
param dbstring: The database connection string
"""
self.connected = False
if backend is None:
global tgis_backend
if decode(tgis_backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
else:
if decode(backend) == "sqlite":
self.dbmi = sqlite3
else:
self.dbmi = psycopg2
if dbstring is None:
global tgis_database_string
self.dbstring = tgis_database_string
self.dbstring = dbstring
self.msgr = get_tgis_message_interface()
self.msgr.debug(1, "DBConnection constructor:"\
"\n backend: %s"\
"\n dbstring: %s"%(backend, self.dbstring))
#"\n traceback:%s"%(backend, self.dbstring,
#str(" \n".join(traceback.format_stack()))))
def __del__(self):
if self.connected is True:
self.close()
def is_connected(self):
return self.connected
def rollback(self):
"""
Roll back the last transaction. This must be called
in case a new query should be performed after a db error.
This is only relevant for postgresql database.
"""
if self.dbmi.__name__ == "psycopg2":
if self.connected:
self.connection.rollback()
def connect(self, dbstring=None):
"""Connect to the DBMI to execute SQL statements
Supported backends are sqlite3 and postgresql
param dbstring: The database connection string
"""
# Connection in the current mapset
if dbstring is None:
dbstring = self.dbstring
dbstring = decode(dbstring)
try:
if self.dbmi.__name__ == "sqlite3":
self.connection = self.dbmi.connect(dbstring,
detect_types=self.dbmi.PARSE_DECLTYPES | self.dbmi.PARSE_COLNAMES)
self.connection.row_factory = self.dbmi.Row
self.connection.isolation_level = None
self.connection.text_factory = str
self.cursor = self.connection.cursor()
self.cursor.execute("PRAGMA synchronous = OFF")
self.cursor.execute("PRAGMA journal_mode = MEMORY")
elif self.dbmi.__name__ == "psycopg2":
self.connection = self.dbmi.connect(dbstring)
#self.connection.set_isolation_level(dbmi.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.cursor = self.connection.cursor(
cursor_factory=self.dbmi.extras.DictCursor)
self.connected = True
except Exception as e:
self.msgr.fatal(_("Unable to connect to %(db)s database: "
"%(string)s\nException: \"%(ex)s\"\nPlease use"
" t.connect to set a read- and writable "
"temporal database backend") % (
{"db": self.dbmi.__name__,
"string": tgis_database_string, "ex": e, }))
def close(self):
"""Close the DBMI connection
TODO:
There may be several temporal databases in a location, hence
close all temporal databases that have been opened. Use a dictionary
to manage different connections.
"""
self.connection.commit()
self.cursor.close()
self.connected = False
def mogrify_sql_statement(self, content):
"""Return the SQL statement and arguments as executable SQL string
TODO:
Use the mapset argument to identify the correct database driver
:param content: The content as tuple with two entries, the first
entry is the SQL statement with DBMI specific
place holder (?), the second entry is the argument
list that should substitute the place holder.
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
Usage:
.. code-block:: python
>>> init()
>>> dbif = SQLDatabaseInterfaceConnection()
>>> dbif.mogrify_sql_statement(["SELECT ctime FROM raster_base WHERE id = ?",
... ["soil@PERMANENT",]])
"SELECT ctime FROM raster_base WHERE id = 'soil@PERMANENT'"
"""
sql = content[0]
args = content[1]
if self.dbmi.__name__ == "psycopg2":
if len(args) == 0:
return sql
else:
if self.connected:
try:
return self.cursor.mogrify(sql, args)
except Exception as exc:
print(sql, args)
raise exc
else:
self.connect()
statement = self.cursor.mogrify(sql, args)
self.close()
return statement
elif self.dbmi.__name__ == "sqlite3":
if len(args) == 0:
return sql
else:
# Unfortunately as sqlite does not support
# the transformation of sql strings and qmarked or
# named arguments we must make our hands dirty
# and do it by ourself. :(
# Doors are open for SQL injection because of the
# limited python sqlite3 implementation!!!
pos = 0
count = 0
maxcount = 100
statement = sql
while count < maxcount:
pos = statement.find("?", pos + 1)
if pos == -1:
break
if args[count] is None:
statement = "%sNULL%s" % (statement[0:pos],
statement[pos + 1:])
elif isinstance(args[count], (int, long)):
statement = "%s%d%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], float):
statement = "%s%f%s" % (statement[0:pos], args[count],
statement[pos + 1:])
elif isinstance(args[count], datetime):
statement = "%s\'%s\'%s" % (statement[0:pos], str(args[count]),
statement[pos + 1:])
else:
# Default is a string, this works for datetime
# objects too
statement = "%s\'%s\'%s" % (statement[0:pos],
str(args[count]),
statement[pos + 1:])
count += 1
return statement
def check_table(self, table_name):
"""Check if a table exists in the temporal database
:param table_name: The name of the table to be checked for existence
:param mapset: The mapset of the abstract dataset or temporal
database location, if None the current mapset
will be used
:returns: True if the table exists, False otherwise
TODO:
There may be several temporal databases in a location, hence
the mapset is used to query the correct temporal database.
"""
table_exists = False
connected = False
if not self.connected:
self.connect()
connected = True
# Check if the database already exists
if self.dbmi.__name__ == "sqlite3":
self.cursor.execute("SELECT name FROM sqlite_master WHERE "
"type='table' AND name='%s';" % table_name)
name = self.cursor.fetchone()
if name and name[0] == table_name:
table_exists = True
else:
# Check for raster_base table
self.cursor.execute("SELECT EXISTS(SELECT * FROM information_schema.tables "
"WHERE table_name=%s)", ('%s' % table_name,))
if self.cursor.fetchone()[0]:
table_exists = True
if connected:
self.close()
return table_exists
def execute(self, statement, args=None):
"""Execute a SQL statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
try:
if args:
self.cursor.execute(statement, args)
else:
self.cursor.execute(statement)
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute :\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
def fetchone(self):
if self.connected:
return self.cursor.fetchone()
return None
def fetchall(self):
if self.connected:
return self.cursor.fetchall()
return None
def execute_transaction(self, statement, mapset=None):
"""Execute a transactional SQL statement
The BEGIN and END TRANSACTION statements will be added automatically
to the sql statement
:param statement: The executable SQL statement or SQL script
"""
connected = False
if not self.connected:
self.connect()
connected = True
sql_script = ""
sql_script += "BEGIN TRANSACTION;\n"
sql_script += statement
sql_script += "END TRANSACTION;"
try:
if self.dbmi.__name__ == "sqlite3":
self.cursor.executescript(statement)
else:
self.cursor.execute(statement)
self.connection.commit()
except:
if connected:
self.close()
self.msgr.error(_("Unable to execute transaction:\n %(sql)s" %
{"sql": statement}))
raise
if connected:
self.close()
###############################################################################
def init_dbif(dbif):
"""This method checks if the database interface connection exists,
if not a new one will be created, connected and True will be returned.
If the database interface exists but is connected, the connection will
be established.
:returns: the tuple (dbif, True|False)
Usage code sample:
.. code-block:: python
dbif, connect = tgis.init_dbif(None)
sql = dbif.mogrify_sql_statement(["SELECT * FROM raster_base WHERE ? = ?"],
["id", "soil@PERMANENT"])
dbif.execute_transaction(sql)
if connect:
dbif.close()
"""
if dbif is None:
dbif = SQLDatabaseInterfaceConnection()
dbif.connect()
return dbif, True
elif dbif.is_connected() is False:
dbif.connect()
return dbif, True
return dbif, False
###############################################################################
if __name__ == "__main__":
import doctest
doctest.testmod()
| 37.170749 | 100 | 0.589111 |
import os
import sys
import grass.script as gscript
if sys.version_info.major == 3:
long = int
from .c_libraries_interface import *
from grass.pygrass import messages
from grass.script.utils import decode, encode
try:
import sqlite3
except ImportError:
pass
try:
import psycopg2
import psycopg2.extras
except:
pass
import atexit
from datetime import datetime
| true | true |
f72b21cb7cd90c4cedf514ee804f2b47f748ee67 | 4,395 | py | Python | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | null | null | null | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | 1 | 2021-07-27T21:03:40.000Z | 2021-07-27T21:03:40.000Z | runner.py | Robinson04/mdscript | 7a89a4453f0266a5ed318eceebc12b401e419ff4 | [
"MIT"
] | null | null | null | import logging
import os
import re
from pathlib import Path
from typing import Any
from mdscript.files_dependencies_manager import FilesDependenciesManager
from mdscript.watcher import Watcher
class Runner:
def __init__(self, config: Any, base_dirpath: str):
self.config = config
self.base_dirpath = base_dirpath
self.watcher = Watcher(runner=self)
self.files_dependencies = FilesDependenciesManager(watcher=self.watcher)
def _run_in_file(self, source_filepath: str, output_filepath: str, run_test: bool):
try:
with open(source_filepath, 'r') as source_markdown_file:
source_file_content = source_markdown_file.read()
rendered_file_content = ""
remaining_unprocessed_file_content = source_file_content
transformers_names_selectors: str = '|'.join(self.config.transformers.keys())
transformers_regex = '({{)' + f'({transformers_names_selectors})' + '(::)((.|\n)*)(::}})'
# Instead of looking for each transformer one by one, we create a simple regex tasked with finding any transformer
for match in re.finditer(pattern=transformers_regex, string=source_file_content):
match_start = match.start()
match_end = match.end()
index_relative_to_remaining_unprocessed = len(source_file_content) - len(remaining_unprocessed_file_content)
unprocessed_text_pre_match = remaining_unprocessed_file_content[0:match_start - index_relative_to_remaining_unprocessed]
remaining_unprocessed_file_content = remaining_unprocessed_file_content[match_end - index_relative_to_remaining_unprocessed:]
transformer_name = match[2]
transformer_attribute = match[4]
transformer_class_type = self.config.transformers.get(transformer_name, None)
if transformer_class_type is None:
raise Exception(f"No transformer found for {transformer_name}")
transformer_instance = transformer_class_type(
runner=self, source_filepath=source_filepath, attribute=transformer_attribute
)
if run_test is True:
transformer_instance.test()
transformed_content = transformer_instance.transform()
rendered_file_content += f"{unprocessed_text_pre_match}{transformed_content}"
rendered_file_content += remaining_unprocessed_file_content
with open(output_filepath, 'w+') as output_file:
output_file.write(rendered_file_content)
except Exception as e:
logging.warning(e)
def _run_with_filepath(self, source_filepath: str, run_test: bool):
source_filepath_object = Path(source_filepath)
formatted_output_filename = source_filepath_object.name[2:]
output_filepath = os.path.join(source_filepath_object.parent, formatted_output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_test)
def _run_in_folder(self, dirpath: str, run_tests: bool):
for root_dirpath, dirs, filenames in os.walk(dirpath):
for filename in filenames:
if filename[0:2] == '__':
source_filepath = os.path.join(root_dirpath, filename)
output_filename = filename[2:]
output_filepath = os.path.join(root_dirpath, output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_tests)
def _start(self, run_tests: bool):
self._run_in_folder(dirpath=self.base_dirpath, run_tests=run_tests)
# When starting the runner, we first run the base_dirpath folder once, which
# will build all of our mdscript files, and index all the dependency files.
self.watcher.start()
# Then, we simply start the watcher, which will always watch the entire base_dirpath
# folder, and all of the dependencies files will have already been added to its watch.
def start(self):
self._start(run_tests=False)
def start_with_tests(self):
self._start(run_tests=True)
| 49.943182 | 145 | 0.669852 | import logging
import os
import re
from pathlib import Path
from typing import Any
from mdscript.files_dependencies_manager import FilesDependenciesManager
from mdscript.watcher import Watcher
class Runner:
def __init__(self, config: Any, base_dirpath: str):
self.config = config
self.base_dirpath = base_dirpath
self.watcher = Watcher(runner=self)
self.files_dependencies = FilesDependenciesManager(watcher=self.watcher)
def _run_in_file(self, source_filepath: str, output_filepath: str, run_test: bool):
try:
with open(source_filepath, 'r') as source_markdown_file:
source_file_content = source_markdown_file.read()
rendered_file_content = ""
remaining_unprocessed_file_content = source_file_content
transformers_names_selectors: str = '|'.join(self.config.transformers.keys())
transformers_regex = '({{)' + f'({transformers_names_selectors})' + '(::)((.|\n)*)(::}})'
for match in re.finditer(pattern=transformers_regex, string=source_file_content):
match_start = match.start()
match_end = match.end()
index_relative_to_remaining_unprocessed = len(source_file_content) - len(remaining_unprocessed_file_content)
unprocessed_text_pre_match = remaining_unprocessed_file_content[0:match_start - index_relative_to_remaining_unprocessed]
remaining_unprocessed_file_content = remaining_unprocessed_file_content[match_end - index_relative_to_remaining_unprocessed:]
transformer_name = match[2]
transformer_attribute = match[4]
transformer_class_type = self.config.transformers.get(transformer_name, None)
if transformer_class_type is None:
raise Exception(f"No transformer found for {transformer_name}")
transformer_instance = transformer_class_type(
runner=self, source_filepath=source_filepath, attribute=transformer_attribute
)
if run_test is True:
transformer_instance.test()
transformed_content = transformer_instance.transform()
rendered_file_content += f"{unprocessed_text_pre_match}{transformed_content}"
rendered_file_content += remaining_unprocessed_file_content
with open(output_filepath, 'w+') as output_file:
output_file.write(rendered_file_content)
except Exception as e:
logging.warning(e)
def _run_with_filepath(self, source_filepath: str, run_test: bool):
source_filepath_object = Path(source_filepath)
formatted_output_filename = source_filepath_object.name[2:]
output_filepath = os.path.join(source_filepath_object.parent, formatted_output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_test)
def _run_in_folder(self, dirpath: str, run_tests: bool):
for root_dirpath, dirs, filenames in os.walk(dirpath):
for filename in filenames:
if filename[0:2] == '__':
source_filepath = os.path.join(root_dirpath, filename)
output_filename = filename[2:]
output_filepath = os.path.join(root_dirpath, output_filename)
self._run_in_file(source_filepath=source_filepath, output_filepath=output_filepath, run_test=run_tests)
def _start(self, run_tests: bool):
self._run_in_folder(dirpath=self.base_dirpath, run_tests=run_tests)
self.watcher.start()
def start(self):
self._start(run_tests=False)
def start_with_tests(self):
self._start(run_tests=True)
| true | true |
f72b239857d42e26a3ecdb3d5902e5cf5b358e32 | 2,569 | py | Python | verilator/dut_gen.py | mlulaj/fuzzing | 81e17a3363490361475bfd9ae28a5ae495be27b8 | [
"BSD-3-Clause"
] | 48 | 2018-09-26T03:35:37.000Z | 2022-03-20T05:05:56.000Z | verilator/dut_gen.py | mlulaj/fuzzing | 81e17a3363490361475bfd9ae28a5ae495be27b8 | [
"BSD-3-Clause"
] | 10 | 2018-07-19T21:16:22.000Z | 2021-09-06T22:21:01.000Z | verilator/dut_gen.py | mlulaj/fuzzing | 81e17a3363490361475bfd9ae28a5ae495be27b8 | [
"BSD-3-Clause"
] | 6 | 2020-02-06T01:33:54.000Z | 2021-08-29T21:20:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2018, Kevin Laeufer <ekiwi@berkeley.edu>
# Generates the `dut.hpp` file which contains dut specific interface code
# from the TOML dut description file.
import os, sys, argparse
import toml
template = """
// This file was generated from {conf_toml} using the dut_gen.py script.
// It contains DUt specific interface code for the verilator C++ test harness.
#ifndef DUT_CONF_HPP
#define DUT_CONF_HPP
#if defined(E2E)
#include <V{toplevel}_E2EHarness.h>
#define TOP_TYPE V{toplevel}_E2EHarness
#else
#include <V{toplevel}_VHarness.h>
#define TOP_TYPE V{toplevel}_VHarness
#endif
#define TOPLEVEL_STR "{toplevel}"
static constexpr size_t CoverageSize = {cov_size};
static constexpr size_t InputSize = {input_size};
static inline void apply_input(TOP_TYPE* top, const uint8_t* input) {{
{apply_input}
}}
static inline void read_coverage(TOP_TYPE* top, uint8_t* coverage) {{
{read_coverage}
}}
#endif // DUT_CONF_HPP
"""
align = 8
def bits_to_size(bits):
bytes = (bits + 7) // 8
words = (bytes + align - 1) // align
return words * align
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='generate DUT specific verilator code')
parser.add_argument('-o', '--output', help='dut header file name', required=True)
parser.add_argument('-i', '--input', help='toml dut description', required=True)
args = parser.parse_args()
conf_toml = args.input
if not os.path.isfile(conf_toml):
sys.stderr.write("dur config file `{}` not found\n".format(conf_toml))
sys.exit(1)
header = args.output
header_dir = os.path.dirname(os.path.abspath(header))
if not os.path.isdir(header_dir):
sys.stderr.write("output directory `{}` does not exist\n".format(header_dir))
sys.exit(1)
conf = toml.loads(open(conf_toml).read())
input_bits = sum(ii['width'] for ii in conf['input'])
input_size = bits_to_size(input_bits)
cov_bits = sum(counter['width'] for counter in conf['counter'])
# the cycles count in front of the coverage feedback takes 16bit
cov_size = bits_to_size(cov_bits + 2 * 8) - 2
i_line = "\ttop->io_input_bytes_{0: <3} = input[{0: >3}];"
c_line = "\tcoverage[{0: >3}] = top->io_coverage_bytes_{0};"
dd = { 'conf_toml': conf_toml, 'toplevel': conf['general']['top'],
'cov_size': cov_size, 'input_size': input_size,
'apply_input': "\n".join(i_line.format(ii) for ii in range(input_size)),
'read_coverage': "\n".join(c_line.format(ii) for ii in range(cov_size))
}
output = template.format(**dd)
open(header, 'w').write(output) | 30.583333 | 82 | 0.708836 |
import os, sys, argparse
import toml
template = """
// This file was generated from {conf_toml} using the dut_gen.py script.
// It contains DUt specific interface code for the verilator C++ test harness.
#ifndef DUT_CONF_HPP
#define DUT_CONF_HPP
#if defined(E2E)
#include <V{toplevel}_E2EHarness.h>
#define TOP_TYPE V{toplevel}_E2EHarness
#else
#include <V{toplevel}_VHarness.h>
#define TOP_TYPE V{toplevel}_VHarness
#endif
#define TOPLEVEL_STR "{toplevel}"
static constexpr size_t CoverageSize = {cov_size};
static constexpr size_t InputSize = {input_size};
static inline void apply_input(TOP_TYPE* top, const uint8_t* input) {{
{apply_input}
}}
static inline void read_coverage(TOP_TYPE* top, uint8_t* coverage) {{
{read_coverage}
}}
#endif // DUT_CONF_HPP
"""
align = 8
def bits_to_size(bits):
bytes = (bits + 7) // 8
words = (bytes + align - 1) // align
return words * align
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='generate DUT specific verilator code')
parser.add_argument('-o', '--output', help='dut header file name', required=True)
parser.add_argument('-i', '--input', help='toml dut description', required=True)
args = parser.parse_args()
conf_toml = args.input
if not os.path.isfile(conf_toml):
sys.stderr.write("dur config file `{}` not found\n".format(conf_toml))
sys.exit(1)
header = args.output
header_dir = os.path.dirname(os.path.abspath(header))
if not os.path.isdir(header_dir):
sys.stderr.write("output directory `{}` does not exist\n".format(header_dir))
sys.exit(1)
conf = toml.loads(open(conf_toml).read())
input_bits = sum(ii['width'] for ii in conf['input'])
input_size = bits_to_size(input_bits)
cov_bits = sum(counter['width'] for counter in conf['counter'])
cov_size = bits_to_size(cov_bits + 2 * 8) - 2
i_line = "\ttop->io_input_bytes_{0: <3} = input[{0: >3}];"
c_line = "\tcoverage[{0: >3}] = top->io_coverage_bytes_{0};"
dd = { 'conf_toml': conf_toml, 'toplevel': conf['general']['top'],
'cov_size': cov_size, 'input_size': input_size,
'apply_input': "\n".join(i_line.format(ii) for ii in range(input_size)),
'read_coverage': "\n".join(c_line.format(ii) for ii in range(cov_size))
}
output = template.format(**dd)
open(header, 'w').write(output) | true | true |
f72b24aadd868431479c08d35f7980c4d40e563c | 5,289 | py | Python | deepctr/models/din.py | BradyBromley/DeepCTR | 3d12ffc0e0a5e893dce8bd315824c180445b772e | [
"Apache-2.0"
] | 2 | 2019-11-07T10:17:40.000Z | 2020-04-13T14:25:14.000Z | deepctr/models/din.py | BradyBromley/DeepCTR | 3d12ffc0e0a5e893dce8bd315824c180445b772e | [
"Apache-2.0"
] | 7 | 2019-12-16T22:22:25.000Z | 2022-02-10T00:37:34.000Z | deepctr/models/din.py | BradyBromley/DeepCTR | 3d12ffc0e0a5e893dce8bd315824c180445b772e | [
"Apache-2.0"
] | 1 | 2020-01-07T09:12:21.000Z | 2020-01-07T09:12:21.000Z | # -*- coding:utf-8 -*-
"""
Author:
Weichen Shen,wcshen1994@163.com
Reference:
[1] Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068. (https://arxiv.org/pdf/1706.06978.pdf)
"""
from tensorflow.python.keras.layers import Dense,Concatenate, Flatten
from tensorflow.python.keras.models import Model
from ..inputs import build_input_features,create_embedding_matrix,SparseFeat,VarLenSparseFeat,DenseFeat,embedding_lookup,get_dense_input,varlen_embedding_lookup,get_varlen_pooling_list,combined_dnn_input
from ..layers.core import DNN, PredictionLayer
from ..layers.sequence import AttentionSequencePoolingLayer
from ..layers.utils import concat_fun, NoMask
def DIN(dnn_feature_columns, history_feature_list, embedding_size=8, hist_len_max=16, dnn_use_bn=False,
dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice",
att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024,
task='binary'):
"""Instantiates the Deep Interest Network architecture.
:param dnn_feature_columns: An iterable containing all the features used by deep part of the model.
:param history_feature_list: list,to indicate sequence sparse field
:param embedding_size: positive integer,sparse feature embedding_size.
:param hist_len_max: positive int, to indicate the max length of seq input
:param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in deep net
:param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param dnn_activation: Activation function to use in deep net
:param att_hidden_size: list,list of positive integer , the layer number and units in each layer of attention net
:param att_activation: Activation function to use in attention net
:param att_weight_normalization: bool.Whether normalize the attention score of local activation unit.
:param l2_reg_dnn: float. L2 regularizer strength applied to DNN
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss
:return: A Keras model instance.
"""
features = build_input_features(dnn_feature_columns)
sparse_feature_columns = list(filter(lambda x:isinstance(x,SparseFeat),dnn_feature_columns)) if dnn_feature_columns else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else []
varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []
history_feature_columns = []
sparse_varlen_feature_columns = []
history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list))
for fc in varlen_sparse_feature_columns:
feature_name = fc.name
if feature_name in history_fc_names:
history_feature_columns.append(fc)
else:
sparse_varlen_feature_columns.append(fc)
inputs_list = list(features.values())
embedding_dict = create_embedding_matrix(dnn_feature_columns,l2_reg_embedding,init_std,seed,embedding_size, prefix="")
query_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,history_feature_list,history_feature_list)#query是单独的
keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names)
dnn_input_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,mask_feat_list=history_feature_list)
dense_value_list = get_dense_input(features, dense_feature_columns)
sequence_embed_dict = varlen_embedding_lookup(embedding_dict,features,sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns)
dnn_input_emb_list += sequence_embed_list
keys_emb = concat_fun(keys_emb_list,mask=True)
deep_input_emb = concat_fun(dnn_input_emb_list)
query_emb = concat_fun(query_emb_list,mask=True)
hist = AttentionSequencePoolingLayer(att_hidden_size, att_activation,
weight_normalization=att_weight_normalization, supports_masking=True)([
query_emb, keys_emb])
deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])
deep_input_emb = Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb],dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
| 52.366337 | 256 | 0.772169 |
from tensorflow.python.keras.layers import Dense,Concatenate, Flatten
from tensorflow.python.keras.models import Model
from ..inputs import build_input_features,create_embedding_matrix,SparseFeat,VarLenSparseFeat,DenseFeat,embedding_lookup,get_dense_input,varlen_embedding_lookup,get_varlen_pooling_list,combined_dnn_input
from ..layers.core import DNN, PredictionLayer
from ..layers.sequence import AttentionSequencePoolingLayer
from ..layers.utils import concat_fun, NoMask
def DIN(dnn_feature_columns, history_feature_list, embedding_size=8, hist_len_max=16, dnn_use_bn=False,
dnn_hidden_units=(200, 80), dnn_activation='relu', att_hidden_size=(80, 40), att_activation="dice",
att_weight_normalization=False, l2_reg_dnn=0, l2_reg_embedding=1e-6, dnn_dropout=0, init_std=0.0001, seed=1024,
task='binary'):
features = build_input_features(dnn_feature_columns)
sparse_feature_columns = list(filter(lambda x:isinstance(x,SparseFeat),dnn_feature_columns)) if dnn_feature_columns else []
dense_feature_columns = list(
filter(lambda x: isinstance(x, DenseFeat), dnn_feature_columns)) if dnn_feature_columns else []
varlen_sparse_feature_columns = list(filter(lambda x: isinstance(x, VarLenSparseFeat), dnn_feature_columns)) if dnn_feature_columns else []
history_feature_columns = []
sparse_varlen_feature_columns = []
history_fc_names = list(map(lambda x: "hist_" + x, history_feature_list))
for fc in varlen_sparse_feature_columns:
feature_name = fc.name
if feature_name in history_fc_names:
history_feature_columns.append(fc)
else:
sparse_varlen_feature_columns.append(fc)
inputs_list = list(features.values())
embedding_dict = create_embedding_matrix(dnn_feature_columns,l2_reg_embedding,init_std,seed,embedding_size, prefix="")
query_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,history_feature_list,history_feature_list)
keys_emb_list = embedding_lookup(embedding_dict, features, history_feature_columns, history_fc_names, history_fc_names)
dnn_input_emb_list = embedding_lookup(embedding_dict,features,sparse_feature_columns,mask_feat_list=history_feature_list)
dense_value_list = get_dense_input(features, dense_feature_columns)
sequence_embed_dict = varlen_embedding_lookup(embedding_dict,features,sparse_varlen_feature_columns)
sequence_embed_list = get_varlen_pooling_list(sequence_embed_dict, features, sparse_varlen_feature_columns)
dnn_input_emb_list += sequence_embed_list
keys_emb = concat_fun(keys_emb_list,mask=True)
deep_input_emb = concat_fun(dnn_input_emb_list)
query_emb = concat_fun(query_emb_list,mask=True)
hist = AttentionSequencePoolingLayer(att_hidden_size, att_activation,
weight_normalization=att_weight_normalization, supports_masking=True)([
query_emb, keys_emb])
deep_input_emb = Concatenate()([NoMask()(deep_input_emb), hist])
deep_input_emb = Flatten()(deep_input_emb)
dnn_input = combined_dnn_input([deep_input_emb],dense_value_list)
output = DNN(dnn_hidden_units, dnn_activation, l2_reg_dnn,
dnn_dropout, dnn_use_bn, seed)(dnn_input)
final_logit = Dense(1, use_bias=False)(output)
output = PredictionLayer(task)(final_logit)
model = Model(inputs=inputs_list, outputs=output)
return model
| true | true |
f72b252e105b7da5db34c619077f0de2012fa5c7 | 301 | py | Python | deeplab3/evaluators/__init__.py | crmauceri/pytorch-deeplab-xception | aec2cb7b0c09c346519c6bf22c2cbf419021fdc7 | [
"MIT"
] | 1 | 2021-12-11T08:21:19.000Z | 2021-12-11T08:21:19.000Z | deeplab3/evaluators/__init__.py | crmauceri/rgbd_deeplab | aec2cb7b0c09c346519c6bf22c2cbf419021fdc7 | [
"MIT"
] | null | null | null | deeplab3/evaluators/__init__.py | crmauceri/rgbd_deeplab | aec2cb7b0c09c346519c6bf22c2cbf419021fdc7 | [
"MIT"
] | null | null | null | from deeplab3.evaluators.segmentation_evaluator import SegmentationEvaluator
def make_evaluator(cfg, num_classes):
if cfg.EVALUATOR.NAME == "segmentation":
return SegmentationEvaluator(num_classes)
else:
raise ValueError("Model not implemented: {}".format(cfg.EVALUATOR.NAME)) | 43 | 80 | 0.76412 | from deeplab3.evaluators.segmentation_evaluator import SegmentationEvaluator
def make_evaluator(cfg, num_classes):
if cfg.EVALUATOR.NAME == "segmentation":
return SegmentationEvaluator(num_classes)
else:
raise ValueError("Model not implemented: {}".format(cfg.EVALUATOR.NAME)) | true | true |
f72b25859b28cd579a78605dc1ed921ca8af258c | 3,159 | py | Python | analytics/models.py | SmithJesko/volny-films | 7c50713eb1d2c2d5984700a5de20a12e4045e1b9 | [
"MIT"
] | 1 | 2021-02-23T00:12:43.000Z | 2021-02-23T00:12:43.000Z | analytics/models.py | SmithJesko/volny-films | 7c50713eb1d2c2d5984700a5de20a12e4045e1b9 | [
"MIT"
] | null | null | null | analytics/models.py | SmithJesko/volny-films | 7c50713eb1d2c2d5984700a5de20a12e4045e1b9 | [
"MIT"
] | 1 | 2021-02-23T06:04:13.000Z | 2021-02-23T06:04:13.000Z | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class ClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "Client Connection"
verbose_name_plural = "Client Connections"
@property
def title(self):
return str(self.ip)
# idk why tf i made these two seperate models, but now i'm too lazy to change
class UserClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "User Client Connection"
verbose_name_plural = "User Client Connections"
@property
def title(self):
return str(self.ip)
class MovieView(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
movie_id = models.CharField(max_length=512)
media_type = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return self.ip
class Meta:
verbose_name = "Movie View"
verbose_name_plural = "Movie Views"
@property
def title(self):
return str(self.ip) | 41.565789 | 83 | 0.719848 | from django.contrib.auth import get_user_model
from django.db import models
User = get_user_model()
class ClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "Client Connection"
verbose_name_plural = "Client Connections"
@property
def title(self):
return str(self.ip)
class UserClientConnection(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, blank=True, null=True)
url = models.CharField(max_length=512, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
request_body = models.TextField(blank=True, null=True)
country_code = models.CharField(max_length=512, blank=True, null=True)
country_name = models.CharField(max_length=512, blank=True, null=True)
region_code = models.CharField(max_length=512, blank=True, null=True)
region_name = models.CharField(max_length=512, blank=True, null=True)
city = models.CharField(max_length=512, blank=True, null=True)
zip_code = models.CharField(max_length=512, blank=True, null=True)
latitude = models.CharField(max_length=512, blank=True, null=True)
longitude = models.CharField(max_length=512, blank=True, null=True)
metro_code = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return str(self.ip)
class Meta:
verbose_name = "User Client Connection"
verbose_name_plural = "User Client Connections"
@property
def title(self):
return str(self.ip)
class MovieView(models.Model):
ip = models.CharField(max_length=50, default="xxx", blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
movie_id = models.CharField(max_length=512)
media_type = models.CharField(max_length=512, blank=True, null=True)
def __str__(self):
return self.ip
class Meta:
verbose_name = "Movie View"
verbose_name_plural = "Movie Views"
@property
def title(self):
return str(self.ip) | true | true |
f72b2611795c1b7d27319858d6c69d00eadf80ef | 32,514 | py | Python | old_projects/eola/chapter8p2.py | thevivekpandey/manim | 483dbfc232fa684e7722969221bd416fde8bd55a | [
"MIT"
] | 9 | 2019-12-17T04:59:53.000Z | 2020-11-10T21:02:41.000Z | old_projects/eola/chapter8p2.py | Hammer7/manim | a19a6317ec187f65efb0c8f46bc613b4a978d22a | [
"MIT"
] | 5 | 2021-03-19T03:01:04.000Z | 2022-03-11T23:57:24.000Z | old_projects/eola/chapter8p2.py | Hammer7/manim | a19a6317ec187f65efb0c8f46bc613b4a978d22a | [
"MIT"
] | 3 | 2020-04-12T16:50:57.000Z | 2020-07-19T17:53:53.000Z | from manimlib.imports import *
from old_projects.eola.chapter5 import get_det_text
from old_projects.eola.chapter8 import *
class OpeningQuote(Scene):
def construct(self):
words = TextMobject(
"From [Grothendieck], I have also learned not",
"to take glory in the ",
"difficulty of a proof:",
"difficulty means we have not understood.",
"The idea is to be able to ",
"paint a landscape",
"in which the proof is obvious.",
arg_separator = " "
)
words.set_color_by_tex("difficulty of a proof:", RED)
words.set_color_by_tex("paint a landscape", GREEN)
words.set_width(FRAME_WIDTH - 2)
words.to_edge(UP)
author = TextMobject("-Pierre Deligne")
author.set_color(YELLOW)
author.next_to(words, DOWN, buff = 0.5)
self.play(FadeIn(words))
self.wait(4)
self.play(Write(author, run_time = 3))
self.wait()
class CrossProductSymbols(Scene):
def construct(self):
v_tex, w_tex, p_tex = get_vect_tex(*"vwp")
equation = TexMobject(
v_tex, "\\times", w_tex, "=", p_tex
)
equation.set_color_by_tex(v_tex, V_COLOR)
equation.set_color_by_tex(w_tex, W_COLOR)
equation.set_color_by_tex(p_tex, P_COLOR)
brace = Brace(equation[-1])
brace.stretch_to_fit_width(0.7)
vector_text = brace.get_text("Vector")
vector_text.set_color(RED)
self.add(equation)
self.play(*list(map(Write, [brace, vector_text])))
self.wait()
class DeterminantTrickCopy(DeterminantTrick):
pass
class BruteForceVerification(Scene):
def construct(self):
v = Matrix(["v_1", "v_2", "v_3"])
w = Matrix(["w_1", "w_2", "w_3"])
v1, v2, v3 = v.get_entries()
w1, w2, w3 = w.get_entries()
v.set_color(V_COLOR)
w.set_color(W_COLOR)
def get_term(e1, e2, e3, e4):
group = VGroup(
e1.copy(), e2.copy(),
TexMobject("-"),
e3.copy(), e4.copy(),
)
group.arrange()
return group
cross = Matrix(list(it.starmap(get_term, [
(v2, w3, v3, w2),
(v3, w1, v1, w3),
(v2, w3, v3, w2),
])))
cross_product = VGroup(
v.copy(), TexMobject("\\times"), w.copy(),
TexMobject("="), cross.copy()
)
cross_product.arrange()
cross_product.scale(0.75)
formula_word = TextMobject("Numerical formula")
computation_words = TextMobject("""
Facts you could (painfully)
verify computationally
""")
computation_words.scale(0.75)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
v_line = Line(UP, DOWN).scale(FRAME_Y_RADIUS)
computation_words.to_edge(UP, buff = MED_SMALL_BUFF/2)
h_line.next_to(computation_words, DOWN)
formula_word.next_to(h_line, UP, buff = MED_SMALL_BUFF)
computation_words.shift(FRAME_X_RADIUS*RIGHT/2)
formula_word.shift(FRAME_X_RADIUS*LEFT/2)
cross_product.next_to(formula_word, DOWN, buff = LARGE_BUFF)
self.add(formula_word, computation_words)
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
Write(cross_product)
)
v_tex, w_tex = get_vect_tex(*"vw")
v_dot, w_dot = [
TexMobject(
tex, "\\cdot",
"(", v_tex, "\\times", w_tex, ")",
"= 0"
)
for tex in (v_tex, w_tex)
]
theta_def = TexMobject(
"\\theta",
"= \\cos^{-1} \\big(", v_tex, "\\cdot", w_tex, "/",
"(||", v_tex, "||", "\\cdot", "||", w_tex, "||)", "\\big)"
)
length_check = TexMobject(
"||", "(", v_tex, "\\times", w_tex, ")", "|| = ",
"(||", v_tex, "||)",
"(||", w_tex, "||)",
"\\sin(", "\\theta", ")"
)
last_point = h_line.get_center()+FRAME_X_RADIUS*RIGHT/2
max_width = FRAME_X_RADIUS-1
for mob in v_dot, w_dot, theta_def, length_check:
mob.set_color_by_tex(v_tex, V_COLOR)
mob.set_color_by_tex(w_tex, W_COLOR)
mob.set_color_by_tex("\\theta", GREEN)
mob.next_to(last_point, DOWN, buff = MED_SMALL_BUFF)
if mob.get_width() > max_width:
mob.set_width(max_width)
last_point = mob
self.play(FadeIn(mob))
self.wait()
class ButWeCanDoBetter(TeacherStudentsScene):
def construct(self):
self.teacher_says("But we can do \\\\ better than that")
self.change_student_modes(*["happy"]*3)
self.random_blink(3)
class Prerequisites(Scene):
def construct(self):
title = TextMobject("Prerequisites")
title.to_edge(UP)
title.set_color(YELLOW)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_width(FRAME_X_RADIUS - 1)
left_rect, right_rect = [
rect.copy().shift(DOWN/2).to_edge(edge)
for edge in (LEFT, RIGHT)
]
chapter5 = TextMobject("""
\\centering
Chapter 5
Determinants
""")
chapter7 = TextMobject("""
\\centering
Chapter 7:
Dot products and duality
""")
self.add(title)
for chapter, rect in (chapter5, left_rect), (chapter7, right_rect):
if chapter.get_width() > rect.get_width():
chapter.set_width(rect.get_width())
chapter.next_to(rect, UP)
self.play(
Write(chapter5),
ShowCreation(left_rect)
)
self.play(
Write(chapter7),
ShowCreation(right_rect)
)
self.wait()
class DualityReview(TeacherStudentsScene):
def construct(self):
words = TextMobject("Quick", "duality", "review")
words[1].set_color_by_gradient(BLUE, YELLOW)
self.teacher_says(words, target_mode = "surprised")
self.change_student_modes("pondering")
self.random_blink(2)
class DotProductToTransformSymbol(Scene):
CONFIG = {
"vect_coords" : [2, 1]
}
def construct(self):
v_mob = TexMobject(get_vect_tex("v"))
v_mob.set_color(V_COLOR)
matrix = Matrix([self.vect_coords])
vector = Matrix(self.vect_coords)
matrix.set_column_colors(X_COLOR, Y_COLOR)
vector.set_column_colors(YELLOW)
_input = Matrix(["x", "y"])
_input.get_entries().set_color_by_gradient(X_COLOR, Y_COLOR)
left_input, right_input = [_input.copy() for x in range(2)]
dot, equals = list(map(TexMobject, ["\\cdot", "="]))
equation = VGroup(
vector, dot, left_input, equals,
matrix, right_input
)
equation.arrange()
left_brace = Brace(VGroup(vector, left_input))
right_brace = Brace(matrix, UP)
left_words = left_brace.get_text("Dot product")
right_words = right_brace.get_text("Transform")
right_words.set_width(right_brace.get_width())
right_v_brace = Brace(right_input, UP)
right_v_mob = v_mob.copy()
right_v_brace.put_at_tip(right_v_mob)
right_input.add(right_v_brace, right_v_mob)
left_v_brace = Brace(left_input, UP)
left_v_mob = v_mob.copy()
left_v_brace.put_at_tip(left_v_mob)
left_input.add(left_v_brace, left_v_mob)
self.add(matrix, right_input)
self.play(
GrowFromCenter(right_brace),
Write(right_words, run_time = 1)
)
self.wait()
self.play(
Write(equals),
Write(dot),
Transform(matrix.copy(), vector),
Transform(right_input.copy(), left_input)
)
self.play(
GrowFromCenter(left_brace),
Write(left_words, run_time = 1)
)
self.wait()
class MathematicalWild(Scene):
def construct(self):
title = TextMobject("In the mathematical wild")
title.to_edge(UP)
self.add(title)
randy = Randolph()
randy.shift(DOWN)
bubble = ThoughtBubble(width = 5, height = 4)
bubble.write("""
\\centering
Some linear
transformation
to the number line
""")
bubble.content.set_color(BLUE)
bubble.content.shift(MED_SMALL_BUFF*UP/2)
bubble.remove(*bubble[:-1])
bubble.add(bubble.content)
bubble.next_to(randy.get_corner(UP+RIGHT), RIGHT)
vector = Vector([1, 2])
vector.move_to(randy.get_corner(UP+LEFT), aligned_edge = DOWN+LEFT)
dual_words = TextMobject("Dual vector")
dual_words.set_color_by_gradient(BLUE, YELLOW)
dual_words.next_to(vector, LEFT)
self.add(randy)
self.play(Blink(randy))
self.play(FadeIn(bubble))
self.play(randy.change_mode, "sassy")
self.play(Blink(randy))
self.wait()
self.play(randy.look, UP+LEFT)
self.play(
ShowCreation(vector),
randy.change_mode, "raise_right_hand"
)
self.wait()
self.play(Write(dual_words))
self.play(Blink(randy))
self.wait()
class ThreeStepPlan(Scene):
def construct(self):
title = TextMobject("The plan")
title.set_color(YELLOW)
title.to_edge(UP)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
h_line.next_to(title, DOWN)
v_tex, w_tex = get_vect_tex(*"vw")
v_text, w_text, cross_text = [
"$%s$"%s
for s in (v_tex, w_tex, v_tex + "\\times" + w_tex)
]
steps = [
TextMobject(
"1. Define a 3d-to-1d", "linear \\\\", "transformation",
"in terms of", v_text, "and", w_text
),
TextMobject(
"2. Find its", "dual vector"
),
TextMobject(
"3. Show that this dual is", cross_text
)
]
linear, transformation = steps[0][1:1+2]
steps[0].set_color_by_tex(v_text, V_COLOR)
steps[0].set_color_by_tex(w_text, W_COLOR)
steps[1][1].set_color_by_gradient(BLUE, YELLOW)
steps[2].set_color_by_tex(cross_text, P_COLOR)
VGroup(*steps).arrange(
DOWN, aligned_edge = LEFT, buff = LARGE_BUFF
).next_to(h_line, DOWN, buff = MED_SMALL_BUFF)
self.add(title)
self.play(ShowCreation(h_line))
for step in steps:
self.play(Write(step, run_time = 2))
self.wait()
linear_transformation = TextMobject("Linear", "transformation")
linear_transformation.next_to(h_line, DOWN, MED_SMALL_BUFF)
det = self.get_det()
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(3.5)
left_right_arrow = TexMobject("\\Leftrightarrow")
left_right_arrow.shift(DOWN)
det.next_to(left_right_arrow, LEFT)
rect.next_to(left_right_arrow, RIGHT)
steps[0].remove(linear, transformation)
self.play(
Transform(
VGroup(linear, transformation),
linear_transformation
),
*list(map(FadeOut, steps))
)
self.wait()
self.play(Write(left_right_arrow))
self.play(Write(det))
self.play(ShowCreation(rect))
self.wait(0)
def get_det(self):
matrix = Matrix(np.array([
["\\hat{\\imath}", "\\hat{\\jmath}", "\\hat{k}"],
["v_%d"%d for d in range(1, 4)],
["w_%d"%d for d in range(1, 4)],
]).T)
matrix.set_column_colors(X_COLOR, V_COLOR, W_COLOR)
matrix.get_mob_matrix()[1, 0].set_color(Y_COLOR)
matrix.get_mob_matrix()[2, 0].set_color(Z_COLOR)
VGroup(*matrix.get_mob_matrix()[1, 1:]).shift(0.15*DOWN)
VGroup(*matrix.get_mob_matrix()[2, 1:]).shift(0.35*DOWN)
det_text = get_det_text(matrix)
det_text.add(matrix)
return det_text
class DefineDualTransform(Scene):
def construct(self):
self.add_title()
self.show_triple_cross_product()
self.write_function()
self.introduce_dual_vector()
self.expand_dot_product()
self.ask_question()
def add_title(self):
title = TextMobject("What a student might think")
title.not_real = TextMobject("Not the real cross product")
for mob in title, title.not_real:
mob.set_width(FRAME_X_RADIUS - 1)
mob.set_color(RED)
mob.to_edge(UP)
self.add(title)
self.title = title
def show_triple_cross_product(self):
colors = [WHITE, ORANGE, W_COLOR]
tex_mobs = list(map(TexMobject, get_vect_tex(*"uvw")))
u_tex, v_tex, w_tex = tex_mobs
arrays = [
Matrix(["%s_%d"%(s, d) for d in range(1, 4)])
for s in "uvw"
]
defs_equals = VGroup()
definitions = VGroup()
for array, tex_mob, color in zip(arrays, tex_mobs, colors):
array.set_column_colors(color)
tex_mob.set_color(color)
equals = TexMobject("=")
definition = VGroup(tex_mob, equals, array)
definition.arrange(RIGHT)
definitions.add(definition)
defs_equals.add(equals)
definitions.arrange(buff = MED_SMALL_BUFF)
definitions.shift(2*DOWN)
mobs_with_targets = list(it.chain(
tex_mobs, *[a.get_entries() for a in arrays]
))
for mob in mobs_with_targets:
mob.target = mob.copy()
matrix = Matrix(np.array([
[e.target for e in array.get_entries()]
for array in arrays
]).T)
det_text = get_det_text(matrix, background_rect = False)
syms = times1, times2, equals = [
TexMobject(sym)
for sym in ("\\times", "\\times", "=",)
]
triple_cross = VGroup(
u_tex.target, times1, v_tex.target, times2, w_tex.target, equals
)
triple_cross.arrange()
final_mobs = VGroup(triple_cross, VGroup(det_text, matrix))
final_mobs.arrange()
final_mobs.next_to(self.title, DOWN, buff = MED_SMALL_BUFF)
for mob in definitions, final_mobs:
mob.set_width(FRAME_X_RADIUS - 1)
for array in arrays:
brackets = array.get_brackets()
brackets.target = matrix.get_brackets()
mobs_with_targets.append(brackets)
for def_equals in defs_equals:
def_equals.target = equals
mobs_with_targets.append(def_equals)
self.play(FadeIn(
definitions,
run_time = 2,
lag_ratio = 0.5
))
self.wait(2)
self.play(*[
Transform(mob.copy(), mob.target)
for mob in tex_mobs
] + [
Write(times1),
Write(times2),
])
triple_cross.add(*self.get_mobjects_from_last_animation()[:3])
self.play(*[
Transform(mob.copy(), mob.target)
for mob in mobs_with_targets
if mob not in tex_mobs
])
u_entries = self.get_mobjects_from_last_animation()[:3]
v_entries = self.get_mobjects_from_last_animation()[3:6]
w_entries = self.get_mobjects_from_last_animation()[6:9]
self.play(Write(det_text))
self.wait(2)
self.det_text = det_text
self.definitions = definitions
self.u_entries = u_entries
self.v_entries = v_entries
self.w_entries = w_entries
self.matrix = matrix
self.triple_cross = triple_cross
self.v_tex, self.w_tex = v_tex, w_tex
self.equals = equals
def write_function(self):
brace = Brace(self.det_text, DOWN)
number_text = brace.get_text("Number")
self.play(Transform(self.title, self.title.not_real))
self.wait()
self.play(FadeOut(self.definitions))
self.play(
GrowFromCenter(brace),
Write(number_text)
)
self.wait()
x, y, z = variables = list(map(TexMobject, "xyz"))
for var, entry in zip(variables, self.u_entries):
var.scale(0.8)
var.move_to(entry)
entry.target = var
brace.target = Brace(z)
brace.target.stretch_to_fit_width(0.5)
number_text.target = brace.target.get_text("Variable")
v_brace = Brace(self.matrix.get_mob_matrix()[0, 1], UP)
w_brace = Brace(self.matrix.get_mob_matrix()[0, 2], UP)
for vect_brace, tex in (v_brace, self.v_tex), (w_brace, self.w_tex):
vect_brace.stretch_to_fit_width(brace.target.get_width())
new_tex = tex.copy()
vect_brace.put_at_tip(new_tex)
vect_brace.tex = new_tex
func_tex = TexMobject(
"f\\left(%s\\right)"%matrix_to_tex_string(list("xyz"))
)
func_tex.scale(0.7)
func_input = Matrix(list("xyz"))
func_input_template = VGroup(*func_tex[3:-2])
func_input.set_height(func_input_template.get_height())
func_input.next_to(VGroup(*func_tex[:3]), RIGHT)
VGroup(*func_tex[-2:]).next_to(func_input, RIGHT)
func_tex[0].scale_in_place(1.5)
func_tex = VGroup(
VGroup(*[func_tex[i] for i in (0, 1, 2, -2, -1)]),
func_input
)
func_tex.next_to(self.equals, LEFT)
self.play(
FadeOut(self.title),
FadeOut(self.triple_cross),
*[
Transform(mob, mob.target)
for mob in [brace, number_text]
]
)
self.play(*[
Transform(mob, mob.target)
for mob in self.u_entries
])
self.play(*[
Write(VGroup(vect_brace, vect_brace.tex))
for vect_brace in (v_brace, w_brace)
])
self.wait()
self.play(Write(func_tex))
self.wait()
self.func_tex = func_tex
self.variables_text = VGroup(brace, number_text)
def introduce_dual_vector(self):
everything = VGroup(*self.get_mobjects())
colors = [X_COLOR, Y_COLOR, Z_COLOR]
q_marks = VGroup(*list(map(TextMobject, "???")))
q_marks.scale(2)
q_marks.set_color_by_gradient(*colors)
title = VGroup(TextMobject("This function is linear"))
title.set_color(GREEN)
title.to_edge(UP)
matrix = Matrix([list(q_marks.copy())])
matrix.set_height(self.func_tex.get_height()/2)
dual_vector = Matrix(list(q_marks))
dual_vector.set_height(self.func_tex.get_height())
dual_vector.get_brackets()[0].shift(0.2*LEFT)
dual_vector.get_entries().shift(0.1*LEFT)
dual_vector.scale(1.25)
dual_dot = VGroup(
dual_vector,
TexMobject("\\cdot").next_to(dual_vector)
)
matrix_words = TextMobject("""
$1 \\times 3$ matrix encoding the
3d-to-1d linear transformation
""")
self.play(
Write(title, run_time = 2),
everything.shift, DOWN
)
self.remove(everything)
self.add(*everything)
self.wait()
func, func_input = self.func_tex
func_input.target = func_input.copy()
func_input.target.scale(1.2)
func_input.target.move_to(self.func_tex, aligned_edge = RIGHT)
matrix.next_to(func_input.target, LEFT)
dual_dot.next_to(func_input.target, LEFT)
matrix_words.next_to(matrix, DOWN, buff = 1.5)
matrix_words.shift_onto_screen()
matrix_arrow = Arrow(
matrix_words.get_top(),
matrix.get_bottom(),
color = WHITE
)
self.play(
Transform(func, matrix),
MoveToTarget(func_input),
FadeOut(self.variables_text),
)
self.wait()
self.play(
Write(matrix_words),
ShowCreation(matrix_arrow)
)
self.wait(2)
self.play(*list(map(FadeOut, [matrix_words, matrix_arrow])))
self.play(
Transform(func, dual_vector),
Write(dual_dot[1])
)
self.wait()
p_coords = VGroup(*list(map(TexMobject, [
"p_%d"%d for d in range(1, 4)
])))
p_coords.set_color(RED)
p_array = Matrix(list(p_coords))
p_array.set_height(dual_vector.get_height())
p_array.move_to(dual_vector, aligned_edge = RIGHT)
p_brace = Brace(p_array, UP)
p_tex = TexMobject(get_vect_tex("p"))
p_tex.set_color(P_COLOR)
p_brace.put_at_tip(p_tex)
self.play(
GrowFromCenter(p_brace),
Write(p_tex)
)
self.play(Transform(
func, p_array,
run_time = 2,
lag_ratio = 0.5
))
self.remove(func)
self.add(p_array)
self.wait()
self.play(FadeOut(title))
self.wait()
self.p_array = p_array
self.input_array = func_input
def expand_dot_product(self):
everything = VGroup(*self.get_mobjects())
self.play(everything.to_edge, UP)
self.remove(everything)
self.add(*everything)
to_fade = VGroup()
p_entries = self.p_array.get_entries()
input_entries = self.input_array.get_entries()
dot_components = VGroup()
for p, x, i in zip(p_entries, input_entries, it.count()):
if i == 2:
x.sym = TexMobject("=")
else:
x.sym = TexMobject("+")
p.sym = TexMobject("\\cdot")
p.target = p.copy().scale(2)
x.target = x.copy().scale(2)
component = VGroup(p.target, p.sym, x.target, x.sym)
component.arrange()
dot_components.add(component)
dot_components.arrange()
dot_components.next_to(ORIGIN, LEFT)
dot_components.shift(1.5*DOWN)
dot_arrow = Arrow(self.p_array.get_corner(DOWN+RIGHT), dot_components)
to_fade.add(dot_arrow)
self.play(ShowCreation(dot_arrow))
new_ps = VGroup()
for p, x in zip(p_entries, input_entries):
self.play(
MoveToTarget(p.copy()),
MoveToTarget(x.copy()),
Write(p.sym),
Write(x.sym)
)
mobs = self.get_mobjects_from_last_animation()
new_ps.add(mobs[0])
to_fade.add(*mobs[1:])
self.wait()
x, y, z = self.u_entries
v1, v2, v3 = self.v_entries
w1, w2, w3 = self.w_entries
cross_components = VGroup()
quints = [
(x, v2, w3, v3, w2),
(y, v3, w1, v1, w3),
(z, v1, w2, v2, w1),
]
quints = [
[m.copy() for m in quint]
for quint in quints
]
for i, quint in enumerate(quints):
sym_strings = ["(", "\\cdot", "-", "\\cdot", ")"]
if i < 2:
sym_strings[-1] += "+"
syms = list(map(TexMobject, sym_strings))
for mob, sym in zip(quint, syms):
mob.target = mob.copy()
mob.target.scale(1.5)
mob.sym = sym
quint_targets = [mob.target for mob in quint]
component = VGroup(*it.chain(*list(zip(quint_targets, syms))))
component.arrange()
cross_components.add(component)
to_fade.add(syms[0], syms[-1], quint[0])
cross_components.arrange(DOWN, aligned_edge = LEFT, buff = MED_SMALL_BUFF)
cross_components.next_to(dot_components, RIGHT)
for quint in quints:
self.play(*[
ApplyMethod(mob.set_color, YELLOW)
for mob in quint
])
self.wait(0.5)
self.play(*[
MoveToTarget(mob)
for mob in quint
] + [
Write(mob.sym)
for mob in quint
])
self.wait()
self.play(
ApplyFunction(
lambda m : m.arrange(
DOWN, buff = MED_SMALL_BUFF+SMALL_BUFF
).next_to(cross_components, LEFT),
new_ps
),
*list(map(FadeOut, to_fade))
)
self.play(*[
Write(TexMobject("=").next_to(p, buff = 2*SMALL_BUFF))
for p in new_ps
])
equals = self.get_mobjects_from_last_animation()
self.wait(2)
everything = everything.copy()
self.play(
FadeOut(VGroup(*self.get_mobjects())),
Animation(everything)
)
self.clear()
self.add(everything)
def ask_question(self):
everything = VGroup(*self.get_mobjects())
p_tex = "$%s$"%get_vect_tex("p")
question = TextMobject(
"What vector",
p_tex,
"has \\\\ the property that"
)
question.to_edge(UP)
question.set_color(YELLOW)
question.set_color_by_tex(p_tex, P_COLOR)
everything.target = everything.copy()
everything.target.next_to(
question, DOWN, buff = MED_SMALL_BUFF
)
self.play(
MoveToTarget(everything),
Write(question)
)
self.wait()
class WhyAreWeDoingThis(TeacherStudentsScene):
def construct(self):
self.student_says(
"Um...why are \\\\ we doing this?",
target_mode = "confused"
)
self.random_blink()
self.play(self.get_teacher().change_mode, "erm")
self.change_student_modes("plain", "confused", "raise_left_hand")
self.random_blink()
self.change_student_modes("pondering", "confused", "raise_left_hand")
self.random_blink(5)
class ThreeDTripleCrossProduct(Scene):
pass #Simple parallelepiped
class ThreeDMovingVariableVector(Scene):
pass #white u moves around
class ThreeDMovingVariableVectorWithCrossShowing(Scene):
pass #white u moves around, red p is present
class NowForTheCoolPart(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Now for the\\\\",
"cool part"
)
self.change_student_modes(*["happy"]*3)
self.random_blink(2)
self.teacher_says(
"Let's answer the same question,\\\\",
"but this time geometrically"
)
self.change_student_modes(*["pondering"]*3)
self.random_blink(2)
class ThreeDDotProductProjection(Scene):
pass #
class DotProductWords(Scene):
def construct(self):
p_tex = "$%s$"%get_vect_tex("p")
p_mob = TextMobject(p_tex)
p_mob.scale(1.5)
p_mob.set_color(P_COLOR)
input_array = Matrix(list("xyz"))
dot_product = VGroup(p_mob, Dot(radius = 0.07), input_array)
dot_product.arrange(buff = MED_SMALL_BUFF/2)
equals = TexMobject("=")
dot_product.next_to(equals, LEFT)
words = VGroup(*it.starmap(TextMobject, [
("(Length of projection)",),
("(Length of ", p_tex, ")",)
]))
times = TexMobject("\\times")
words[1].set_color_by_tex(p_tex, P_COLOR)
words[0].next_to(equals, RIGHT)
words[1].next_to(words[0], DOWN, aligned_edge = LEFT)
times.next_to(words[0], RIGHT)
everyone = VGroup(dot_product, equals, times, words)
everyone.center().set_width(FRAME_X_RADIUS - 1)
self.add(dot_product)
self.play(Write(equals))
self.play(Write(words[0]))
self.wait()
self.play(
Write(times),
Write(words[1])
)
self.wait()
class ThreeDProjectToPerpendicular(Scene):
pass #
class GeometricVolumeWords(Scene):
def construct(self):
v_tex, w_tex = [
"$%s$"%s
for s in get_vect_tex(*"vw")
]
words = VGroup(
TextMobject("(Area of", "parallelogram", ")$\\times$"),
TextMobject(
"(Component of $%s$"%matrix_to_tex_string(list("xyz")),
"perpendicular to", v_tex, "and", w_tex, ")"
)
)
words[0].set_color_by_tex("parallelogram", BLUE)
words[1].set_color_by_tex(v_tex, ORANGE)
words[1].set_color_by_tex(w_tex, W_COLOR)
words.arrange(RIGHT)
words.set_width(FRAME_WIDTH - 1)
words.to_edge(DOWN, buff = SMALL_BUFF)
for word in words:
self.play(Write(word))
self.wait()
class WriteXYZ(Scene):
def construct(self):
self.play(Write(Matrix(list("xyz"))))
self.wait()
class ThreeDDotProductWithCross(Scene):
pass
class CrossVectorEmphasisWords(Scene):
def construct(self):
v_tex, w_tex = ["$%s$"%s for s in get_vect_tex(*"vw")]
words = [
TextMobject("Perpendicular to", v_tex, "and", w_tex),
TextMobject("Length = (Area of ", "parallelogram", ")")
]
for word in words:
word.set_color_by_tex(v_tex, ORANGE)
word.set_color_by_tex(w_tex, W_COLOR)
word.set_color_by_tex("parallelogram", BLUE)
self.play(Write(word))
self.wait()
self.play(FadeOut(word))
class NextVideo(Scene):
def construct(self):
title = TextMobject("""
Next video: Change of basis
""")
title.to_edge(UP, buff = MED_SMALL_BUFF/2)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(6)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait()
class ChangeOfBasisPreview(LinearTransformationScene):
CONFIG = {
"include_background_plane" : False,
"foreground_plane_kwargs" : {
"x_radius" : FRAME_WIDTH,
"y_radius" : FRAME_WIDTH,
"secondary_line_ratio" : 0
},
"t_matrix" : [[2, 1], [-1, 1]],
"i_target_color" : YELLOW,
"j_target_color" : MAROON_B,
"sum_color" : PINK,
"vector" : [-1, 2],
}
def construct(self):
randy = Randolph()
pinky = Mortimer(color = PINK)
randy.to_corner(DOWN+LEFT)
pinky.to_corner(DOWN+RIGHT)
self.plane.fade()
self.add_foreground_mobject(randy, pinky)
coords = Matrix(self.vector)
coords.add_to_back(BackgroundRectangle(coords))
self.add_foreground_mobject(coords)
coords.move_to(
randy.get_corner(UP+RIGHT),
aligned_edge = DOWN+LEFT
)
coords.target = coords.copy()
coords.target.move_to(
pinky.get_corner(UP+LEFT),
aligned_edge = DOWN+RIGHT
)
self.play(
Write(coords),
randy.change_mode, "speaking"
)
self.scale_basis_vectors()
self.apply_transposed_matrix(
self.t_matrix,
added_anims = [
MoveToTarget(coords),
ApplyMethod(pinky.change_mode, "speaking"),
ApplyMethod(randy.change_mode, "plain"),
]
)
self.play(
randy.change_mode, "erm",
self.i_hat.set_color, self.i_target_color,
self.j_hat.set_color, self.j_target_color,
)
self.i_hat.color = self.i_target_color
self.j_hat.color = self.j_target_color
self.scale_basis_vectors()
def scale_basis_vectors(self):
for vect in self.i_hat, self.j_hat:
vect.save_state()
self.play(self.i_hat.scale, self.vector[0])
self.play(self.j_hat.scale, self.vector[1])
self.play(self.j_hat.shift, self.i_hat.get_end())
sum_vect = Vector(self.j_hat.get_end(), color = self.sum_color)
self.play(ShowCreation(sum_vect))
self.wait(2)
self.play(
FadeOut(sum_vect),
self.i_hat.restore,
self.j_hat.restore,
)
self.wait()
| 32.975659 | 82 | 0.557637 | from manimlib.imports import *
from old_projects.eola.chapter5 import get_det_text
from old_projects.eola.chapter8 import *
class OpeningQuote(Scene):
def construct(self):
words = TextMobject(
"From [Grothendieck], I have also learned not",
"to take glory in the ",
"difficulty of a proof:",
"difficulty means we have not understood.",
"The idea is to be able to ",
"paint a landscape",
"in which the proof is obvious.",
arg_separator = " "
)
words.set_color_by_tex("difficulty of a proof:", RED)
words.set_color_by_tex("paint a landscape", GREEN)
words.set_width(FRAME_WIDTH - 2)
words.to_edge(UP)
author = TextMobject("-Pierre Deligne")
author.set_color(YELLOW)
author.next_to(words, DOWN, buff = 0.5)
self.play(FadeIn(words))
self.wait(4)
self.play(Write(author, run_time = 3))
self.wait()
class CrossProductSymbols(Scene):
def construct(self):
v_tex, w_tex, p_tex = get_vect_tex(*"vwp")
equation = TexMobject(
v_tex, "\\times", w_tex, "=", p_tex
)
equation.set_color_by_tex(v_tex, V_COLOR)
equation.set_color_by_tex(w_tex, W_COLOR)
equation.set_color_by_tex(p_tex, P_COLOR)
brace = Brace(equation[-1])
brace.stretch_to_fit_width(0.7)
vector_text = brace.get_text("Vector")
vector_text.set_color(RED)
self.add(equation)
self.play(*list(map(Write, [brace, vector_text])))
self.wait()
class DeterminantTrickCopy(DeterminantTrick):
pass
class BruteForceVerification(Scene):
def construct(self):
v = Matrix(["v_1", "v_2", "v_3"])
w = Matrix(["w_1", "w_2", "w_3"])
v1, v2, v3 = v.get_entries()
w1, w2, w3 = w.get_entries()
v.set_color(V_COLOR)
w.set_color(W_COLOR)
def get_term(e1, e2, e3, e4):
group = VGroup(
e1.copy(), e2.copy(),
TexMobject("-"),
e3.copy(), e4.copy(),
)
group.arrange()
return group
cross = Matrix(list(it.starmap(get_term, [
(v2, w3, v3, w2),
(v3, w1, v1, w3),
(v2, w3, v3, w2),
])))
cross_product = VGroup(
v.copy(), TexMobject("\\times"), w.copy(),
TexMobject("="), cross.copy()
)
cross_product.arrange()
cross_product.scale(0.75)
formula_word = TextMobject("Numerical formula")
computation_words = TextMobject("""
Facts you could (painfully)
verify computationally
""")
computation_words.scale(0.75)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
v_line = Line(UP, DOWN).scale(FRAME_Y_RADIUS)
computation_words.to_edge(UP, buff = MED_SMALL_BUFF/2)
h_line.next_to(computation_words, DOWN)
formula_word.next_to(h_line, UP, buff = MED_SMALL_BUFF)
computation_words.shift(FRAME_X_RADIUS*RIGHT/2)
formula_word.shift(FRAME_X_RADIUS*LEFT/2)
cross_product.next_to(formula_word, DOWN, buff = LARGE_BUFF)
self.add(formula_word, computation_words)
self.play(
ShowCreation(h_line),
ShowCreation(v_line),
Write(cross_product)
)
v_tex, w_tex = get_vect_tex(*"vw")
v_dot, w_dot = [
TexMobject(
tex, "\\cdot",
"(", v_tex, "\\times", w_tex, ")",
"= 0"
)
for tex in (v_tex, w_tex)
]
theta_def = TexMobject(
"\\theta",
"= \\cos^{-1} \\big(", v_tex, "\\cdot", w_tex, "/",
"(||", v_tex, "||", "\\cdot", "||", w_tex, "||)", "\\big)"
)
length_check = TexMobject(
"||", "(", v_tex, "\\times", w_tex, ")", "|| = ",
"(||", v_tex, "||)",
"(||", w_tex, "||)",
"\\sin(", "\\theta", ")"
)
last_point = h_line.get_center()+FRAME_X_RADIUS*RIGHT/2
max_width = FRAME_X_RADIUS-1
for mob in v_dot, w_dot, theta_def, length_check:
mob.set_color_by_tex(v_tex, V_COLOR)
mob.set_color_by_tex(w_tex, W_COLOR)
mob.set_color_by_tex("\\theta", GREEN)
mob.next_to(last_point, DOWN, buff = MED_SMALL_BUFF)
if mob.get_width() > max_width:
mob.set_width(max_width)
last_point = mob
self.play(FadeIn(mob))
self.wait()
class ButWeCanDoBetter(TeacherStudentsScene):
def construct(self):
self.teacher_says("But we can do \\\\ better than that")
self.change_student_modes(*["happy"]*3)
self.random_blink(3)
class Prerequisites(Scene):
def construct(self):
title = TextMobject("Prerequisites")
title.to_edge(UP)
title.set_color(YELLOW)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_width(FRAME_X_RADIUS - 1)
left_rect, right_rect = [
rect.copy().shift(DOWN/2).to_edge(edge)
for edge in (LEFT, RIGHT)
]
chapter5 = TextMobject("""
\\centering
Chapter 5
Determinants
""")
chapter7 = TextMobject("""
\\centering
Chapter 7:
Dot products and duality
""")
self.add(title)
for chapter, rect in (chapter5, left_rect), (chapter7, right_rect):
if chapter.get_width() > rect.get_width():
chapter.set_width(rect.get_width())
chapter.next_to(rect, UP)
self.play(
Write(chapter5),
ShowCreation(left_rect)
)
self.play(
Write(chapter7),
ShowCreation(right_rect)
)
self.wait()
class DualityReview(TeacherStudentsScene):
def construct(self):
words = TextMobject("Quick", "duality", "review")
words[1].set_color_by_gradient(BLUE, YELLOW)
self.teacher_says(words, target_mode = "surprised")
self.change_student_modes("pondering")
self.random_blink(2)
class DotProductToTransformSymbol(Scene):
CONFIG = {
"vect_coords" : [2, 1]
}
def construct(self):
v_mob = TexMobject(get_vect_tex("v"))
v_mob.set_color(V_COLOR)
matrix = Matrix([self.vect_coords])
vector = Matrix(self.vect_coords)
matrix.set_column_colors(X_COLOR, Y_COLOR)
vector.set_column_colors(YELLOW)
_input = Matrix(["x", "y"])
_input.get_entries().set_color_by_gradient(X_COLOR, Y_COLOR)
left_input, right_input = [_input.copy() for x in range(2)]
dot, equals = list(map(TexMobject, ["\\cdot", "="]))
equation = VGroup(
vector, dot, left_input, equals,
matrix, right_input
)
equation.arrange()
left_brace = Brace(VGroup(vector, left_input))
right_brace = Brace(matrix, UP)
left_words = left_brace.get_text("Dot product")
right_words = right_brace.get_text("Transform")
right_words.set_width(right_brace.get_width())
right_v_brace = Brace(right_input, UP)
right_v_mob = v_mob.copy()
right_v_brace.put_at_tip(right_v_mob)
right_input.add(right_v_brace, right_v_mob)
left_v_brace = Brace(left_input, UP)
left_v_mob = v_mob.copy()
left_v_brace.put_at_tip(left_v_mob)
left_input.add(left_v_brace, left_v_mob)
self.add(matrix, right_input)
self.play(
GrowFromCenter(right_brace),
Write(right_words, run_time = 1)
)
self.wait()
self.play(
Write(equals),
Write(dot),
Transform(matrix.copy(), vector),
Transform(right_input.copy(), left_input)
)
self.play(
GrowFromCenter(left_brace),
Write(left_words, run_time = 1)
)
self.wait()
class MathematicalWild(Scene):
def construct(self):
title = TextMobject("In the mathematical wild")
title.to_edge(UP)
self.add(title)
randy = Randolph()
randy.shift(DOWN)
bubble = ThoughtBubble(width = 5, height = 4)
bubble.write("""
\\centering
Some linear
transformation
to the number line
""")
bubble.content.set_color(BLUE)
bubble.content.shift(MED_SMALL_BUFF*UP/2)
bubble.remove(*bubble[:-1])
bubble.add(bubble.content)
bubble.next_to(randy.get_corner(UP+RIGHT), RIGHT)
vector = Vector([1, 2])
vector.move_to(randy.get_corner(UP+LEFT), aligned_edge = DOWN+LEFT)
dual_words = TextMobject("Dual vector")
dual_words.set_color_by_gradient(BLUE, YELLOW)
dual_words.next_to(vector, LEFT)
self.add(randy)
self.play(Blink(randy))
self.play(FadeIn(bubble))
self.play(randy.change_mode, "sassy")
self.play(Blink(randy))
self.wait()
self.play(randy.look, UP+LEFT)
self.play(
ShowCreation(vector),
randy.change_mode, "raise_right_hand"
)
self.wait()
self.play(Write(dual_words))
self.play(Blink(randy))
self.wait()
class ThreeStepPlan(Scene):
def construct(self):
title = TextMobject("The plan")
title.set_color(YELLOW)
title.to_edge(UP)
h_line = Line(LEFT, RIGHT).scale(FRAME_X_RADIUS)
h_line.next_to(title, DOWN)
v_tex, w_tex = get_vect_tex(*"vw")
v_text, w_text, cross_text = [
"$%s$"%s
for s in (v_tex, w_tex, v_tex + "\\times" + w_tex)
]
steps = [
TextMobject(
"1. Define a 3d-to-1d", "linear \\\\", "transformation",
"in terms of", v_text, "and", w_text
),
TextMobject(
"2. Find its", "dual vector"
),
TextMobject(
"3. Show that this dual is", cross_text
)
]
linear, transformation = steps[0][1:1+2]
steps[0].set_color_by_tex(v_text, V_COLOR)
steps[0].set_color_by_tex(w_text, W_COLOR)
steps[1][1].set_color_by_gradient(BLUE, YELLOW)
steps[2].set_color_by_tex(cross_text, P_COLOR)
VGroup(*steps).arrange(
DOWN, aligned_edge = LEFT, buff = LARGE_BUFF
).next_to(h_line, DOWN, buff = MED_SMALL_BUFF)
self.add(title)
self.play(ShowCreation(h_line))
for step in steps:
self.play(Write(step, run_time = 2))
self.wait()
linear_transformation = TextMobject("Linear", "transformation")
linear_transformation.next_to(h_line, DOWN, MED_SMALL_BUFF)
det = self.get_det()
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(3.5)
left_right_arrow = TexMobject("\\Leftrightarrow")
left_right_arrow.shift(DOWN)
det.next_to(left_right_arrow, LEFT)
rect.next_to(left_right_arrow, RIGHT)
steps[0].remove(linear, transformation)
self.play(
Transform(
VGroup(linear, transformation),
linear_transformation
),
*list(map(FadeOut, steps))
)
self.wait()
self.play(Write(left_right_arrow))
self.play(Write(det))
self.play(ShowCreation(rect))
self.wait(0)
def get_det(self):
matrix = Matrix(np.array([
["\\hat{\\imath}", "\\hat{\\jmath}", "\\hat{k}"],
["v_%d"%d for d in range(1, 4)],
["w_%d"%d for d in range(1, 4)],
]).T)
matrix.set_column_colors(X_COLOR, V_COLOR, W_COLOR)
matrix.get_mob_matrix()[1, 0].set_color(Y_COLOR)
matrix.get_mob_matrix()[2, 0].set_color(Z_COLOR)
VGroup(*matrix.get_mob_matrix()[1, 1:]).shift(0.15*DOWN)
VGroup(*matrix.get_mob_matrix()[2, 1:]).shift(0.35*DOWN)
det_text = get_det_text(matrix)
det_text.add(matrix)
return det_text
class DefineDualTransform(Scene):
def construct(self):
self.add_title()
self.show_triple_cross_product()
self.write_function()
self.introduce_dual_vector()
self.expand_dot_product()
self.ask_question()
def add_title(self):
title = TextMobject("What a student might think")
title.not_real = TextMobject("Not the real cross product")
for mob in title, title.not_real:
mob.set_width(FRAME_X_RADIUS - 1)
mob.set_color(RED)
mob.to_edge(UP)
self.add(title)
self.title = title
def show_triple_cross_product(self):
colors = [WHITE, ORANGE, W_COLOR]
tex_mobs = list(map(TexMobject, get_vect_tex(*"uvw")))
u_tex, v_tex, w_tex = tex_mobs
arrays = [
Matrix(["%s_%d"%(s, d) for d in range(1, 4)])
for s in "uvw"
]
defs_equals = VGroup()
definitions = VGroup()
for array, tex_mob, color in zip(arrays, tex_mobs, colors):
array.set_column_colors(color)
tex_mob.set_color(color)
equals = TexMobject("=")
definition = VGroup(tex_mob, equals, array)
definition.arrange(RIGHT)
definitions.add(definition)
defs_equals.add(equals)
definitions.arrange(buff = MED_SMALL_BUFF)
definitions.shift(2*DOWN)
mobs_with_targets = list(it.chain(
tex_mobs, *[a.get_entries() for a in arrays]
))
for mob in mobs_with_targets:
mob.target = mob.copy()
matrix = Matrix(np.array([
[e.target for e in array.get_entries()]
for array in arrays
]).T)
det_text = get_det_text(matrix, background_rect = False)
syms = times1, times2, equals = [
TexMobject(sym)
for sym in ("\\times", "\\times", "=",)
]
triple_cross = VGroup(
u_tex.target, times1, v_tex.target, times2, w_tex.target, equals
)
triple_cross.arrange()
final_mobs = VGroup(triple_cross, VGroup(det_text, matrix))
final_mobs.arrange()
final_mobs.next_to(self.title, DOWN, buff = MED_SMALL_BUFF)
for mob in definitions, final_mobs:
mob.set_width(FRAME_X_RADIUS - 1)
for array in arrays:
brackets = array.get_brackets()
brackets.target = matrix.get_brackets()
mobs_with_targets.append(brackets)
for def_equals in defs_equals:
def_equals.target = equals
mobs_with_targets.append(def_equals)
self.play(FadeIn(
definitions,
run_time = 2,
lag_ratio = 0.5
))
self.wait(2)
self.play(*[
Transform(mob.copy(), mob.target)
for mob in tex_mobs
] + [
Write(times1),
Write(times2),
])
triple_cross.add(*self.get_mobjects_from_last_animation()[:3])
self.play(*[
Transform(mob.copy(), mob.target)
for mob in mobs_with_targets
if mob not in tex_mobs
])
u_entries = self.get_mobjects_from_last_animation()[:3]
v_entries = self.get_mobjects_from_last_animation()[3:6]
w_entries = self.get_mobjects_from_last_animation()[6:9]
self.play(Write(det_text))
self.wait(2)
self.det_text = det_text
self.definitions = definitions
self.u_entries = u_entries
self.v_entries = v_entries
self.w_entries = w_entries
self.matrix = matrix
self.triple_cross = triple_cross
self.v_tex, self.w_tex = v_tex, w_tex
self.equals = equals
def write_function(self):
brace = Brace(self.det_text, DOWN)
number_text = brace.get_text("Number")
self.play(Transform(self.title, self.title.not_real))
self.wait()
self.play(FadeOut(self.definitions))
self.play(
GrowFromCenter(brace),
Write(number_text)
)
self.wait()
x, y, z = variables = list(map(TexMobject, "xyz"))
for var, entry in zip(variables, self.u_entries):
var.scale(0.8)
var.move_to(entry)
entry.target = var
brace.target = Brace(z)
brace.target.stretch_to_fit_width(0.5)
number_text.target = brace.target.get_text("Variable")
v_brace = Brace(self.matrix.get_mob_matrix()[0, 1], UP)
w_brace = Brace(self.matrix.get_mob_matrix()[0, 2], UP)
for vect_brace, tex in (v_brace, self.v_tex), (w_brace, self.w_tex):
vect_brace.stretch_to_fit_width(brace.target.get_width())
new_tex = tex.copy()
vect_brace.put_at_tip(new_tex)
vect_brace.tex = new_tex
func_tex = TexMobject(
"f\\left(%s\\right)"%matrix_to_tex_string(list("xyz"))
)
func_tex.scale(0.7)
func_input = Matrix(list("xyz"))
func_input_template = VGroup(*func_tex[3:-2])
func_input.set_height(func_input_template.get_height())
func_input.next_to(VGroup(*func_tex[:3]), RIGHT)
VGroup(*func_tex[-2:]).next_to(func_input, RIGHT)
func_tex[0].scale_in_place(1.5)
func_tex = VGroup(
VGroup(*[func_tex[i] for i in (0, 1, 2, -2, -1)]),
func_input
)
func_tex.next_to(self.equals, LEFT)
self.play(
FadeOut(self.title),
FadeOut(self.triple_cross),
*[
Transform(mob, mob.target)
for mob in [brace, number_text]
]
)
self.play(*[
Transform(mob, mob.target)
for mob in self.u_entries
])
self.play(*[
Write(VGroup(vect_brace, vect_brace.tex))
for vect_brace in (v_brace, w_brace)
])
self.wait()
self.play(Write(func_tex))
self.wait()
self.func_tex = func_tex
self.variables_text = VGroup(brace, number_text)
def introduce_dual_vector(self):
everything = VGroup(*self.get_mobjects())
colors = [X_COLOR, Y_COLOR, Z_COLOR]
q_marks = VGroup(*list(map(TextMobject, "???")))
q_marks.scale(2)
q_marks.set_color_by_gradient(*colors)
title = VGroup(TextMobject("This function is linear"))
title.set_color(GREEN)
title.to_edge(UP)
matrix = Matrix([list(q_marks.copy())])
matrix.set_height(self.func_tex.get_height()/2)
dual_vector = Matrix(list(q_marks))
dual_vector.set_height(self.func_tex.get_height())
dual_vector.get_brackets()[0].shift(0.2*LEFT)
dual_vector.get_entries().shift(0.1*LEFT)
dual_vector.scale(1.25)
dual_dot = VGroup(
dual_vector,
TexMobject("\\cdot").next_to(dual_vector)
)
matrix_words = TextMobject("""
$1 \\times 3$ matrix encoding the
3d-to-1d linear transformation
""")
self.play(
Write(title, run_time = 2),
everything.shift, DOWN
)
self.remove(everything)
self.add(*everything)
self.wait()
func, func_input = self.func_tex
func_input.target = func_input.copy()
func_input.target.scale(1.2)
func_input.target.move_to(self.func_tex, aligned_edge = RIGHT)
matrix.next_to(func_input.target, LEFT)
dual_dot.next_to(func_input.target, LEFT)
matrix_words.next_to(matrix, DOWN, buff = 1.5)
matrix_words.shift_onto_screen()
matrix_arrow = Arrow(
matrix_words.get_top(),
matrix.get_bottom(),
color = WHITE
)
self.play(
Transform(func, matrix),
MoveToTarget(func_input),
FadeOut(self.variables_text),
)
self.wait()
self.play(
Write(matrix_words),
ShowCreation(matrix_arrow)
)
self.wait(2)
self.play(*list(map(FadeOut, [matrix_words, matrix_arrow])))
self.play(
Transform(func, dual_vector),
Write(dual_dot[1])
)
self.wait()
p_coords = VGroup(*list(map(TexMobject, [
"p_%d"%d for d in range(1, 4)
])))
p_coords.set_color(RED)
p_array = Matrix(list(p_coords))
p_array.set_height(dual_vector.get_height())
p_array.move_to(dual_vector, aligned_edge = RIGHT)
p_brace = Brace(p_array, UP)
p_tex = TexMobject(get_vect_tex("p"))
p_tex.set_color(P_COLOR)
p_brace.put_at_tip(p_tex)
self.play(
GrowFromCenter(p_brace),
Write(p_tex)
)
self.play(Transform(
func, p_array,
run_time = 2,
lag_ratio = 0.5
))
self.remove(func)
self.add(p_array)
self.wait()
self.play(FadeOut(title))
self.wait()
self.p_array = p_array
self.input_array = func_input
def expand_dot_product(self):
everything = VGroup(*self.get_mobjects())
self.play(everything.to_edge, UP)
self.remove(everything)
self.add(*everything)
to_fade = VGroup()
p_entries = self.p_array.get_entries()
input_entries = self.input_array.get_entries()
dot_components = VGroup()
for p, x, i in zip(p_entries, input_entries, it.count()):
if i == 2:
x.sym = TexMobject("=")
else:
x.sym = TexMobject("+")
p.sym = TexMobject("\\cdot")
p.target = p.copy().scale(2)
x.target = x.copy().scale(2)
component = VGroup(p.target, p.sym, x.target, x.sym)
component.arrange()
dot_components.add(component)
dot_components.arrange()
dot_components.next_to(ORIGIN, LEFT)
dot_components.shift(1.5*DOWN)
dot_arrow = Arrow(self.p_array.get_corner(DOWN+RIGHT), dot_components)
to_fade.add(dot_arrow)
self.play(ShowCreation(dot_arrow))
new_ps = VGroup()
for p, x in zip(p_entries, input_entries):
self.play(
MoveToTarget(p.copy()),
MoveToTarget(x.copy()),
Write(p.sym),
Write(x.sym)
)
mobs = self.get_mobjects_from_last_animation()
new_ps.add(mobs[0])
to_fade.add(*mobs[1:])
self.wait()
x, y, z = self.u_entries
v1, v2, v3 = self.v_entries
w1, w2, w3 = self.w_entries
cross_components = VGroup()
quints = [
(x, v2, w3, v3, w2),
(y, v3, w1, v1, w3),
(z, v1, w2, v2, w1),
]
quints = [
[m.copy() for m in quint]
for quint in quints
]
for i, quint in enumerate(quints):
sym_strings = ["(", "\\cdot", "-", "\\cdot", ")"]
if i < 2:
sym_strings[-1] += "+"
syms = list(map(TexMobject, sym_strings))
for mob, sym in zip(quint, syms):
mob.target = mob.copy()
mob.target.scale(1.5)
mob.sym = sym
quint_targets = [mob.target for mob in quint]
component = VGroup(*it.chain(*list(zip(quint_targets, syms))))
component.arrange()
cross_components.add(component)
to_fade.add(syms[0], syms[-1], quint[0])
cross_components.arrange(DOWN, aligned_edge = LEFT, buff = MED_SMALL_BUFF)
cross_components.next_to(dot_components, RIGHT)
for quint in quints:
self.play(*[
ApplyMethod(mob.set_color, YELLOW)
for mob in quint
])
self.wait(0.5)
self.play(*[
MoveToTarget(mob)
for mob in quint
] + [
Write(mob.sym)
for mob in quint
])
self.wait()
self.play(
ApplyFunction(
lambda m : m.arrange(
DOWN, buff = MED_SMALL_BUFF+SMALL_BUFF
).next_to(cross_components, LEFT),
new_ps
),
*list(map(FadeOut, to_fade))
)
self.play(*[
Write(TexMobject("=").next_to(p, buff = 2*SMALL_BUFF))
for p in new_ps
])
equals = self.get_mobjects_from_last_animation()
self.wait(2)
everything = everything.copy()
self.play(
FadeOut(VGroup(*self.get_mobjects())),
Animation(everything)
)
self.clear()
self.add(everything)
def ask_question(self):
everything = VGroup(*self.get_mobjects())
p_tex = "$%s$"%get_vect_tex("p")
question = TextMobject(
"What vector",
p_tex,
"has \\\\ the property that"
)
question.to_edge(UP)
question.set_color(YELLOW)
question.set_color_by_tex(p_tex, P_COLOR)
everything.target = everything.copy()
everything.target.next_to(
question, DOWN, buff = MED_SMALL_BUFF
)
self.play(
MoveToTarget(everything),
Write(question)
)
self.wait()
class WhyAreWeDoingThis(TeacherStudentsScene):
def construct(self):
self.student_says(
"Um...why are \\\\ we doing this?",
target_mode = "confused"
)
self.random_blink()
self.play(self.get_teacher().change_mode, "erm")
self.change_student_modes("plain", "confused", "raise_left_hand")
self.random_blink()
self.change_student_modes("pondering", "confused", "raise_left_hand")
self.random_blink(5)
class ThreeDTripleCrossProduct(Scene):
pass
class ThreeDMovingVariableVector(Scene):
pass
class ThreeDMovingVariableVectorWithCrossShowing(Scene):
pass
class NowForTheCoolPart(TeacherStudentsScene):
def construct(self):
self.teacher_says(
"Now for the\\\\",
"cool part"
)
self.change_student_modes(*["happy"]*3)
self.random_blink(2)
self.teacher_says(
"Let's answer the same question,\\\\",
"but this time geometrically"
)
self.change_student_modes(*["pondering"]*3)
self.random_blink(2)
class ThreeDDotProductProjection(Scene):
pass #
class DotProductWords(Scene):
def construct(self):
p_tex = "$%s$"%get_vect_tex("p")
p_mob = TextMobject(p_tex)
p_mob.scale(1.5)
p_mob.set_color(P_COLOR)
input_array = Matrix(list("xyz"))
dot_product = VGroup(p_mob, Dot(radius = 0.07), input_array)
dot_product.arrange(buff = MED_SMALL_BUFF/2)
equals = TexMobject("=")
dot_product.next_to(equals, LEFT)
words = VGroup(*it.starmap(TextMobject, [
("(Length of projection)",),
("(Length of ", p_tex, ")",)
]))
times = TexMobject("\\times")
words[1].set_color_by_tex(p_tex, P_COLOR)
words[0].next_to(equals, RIGHT)
words[1].next_to(words[0], DOWN, aligned_edge = LEFT)
times.next_to(words[0], RIGHT)
everyone = VGroup(dot_product, equals, times, words)
everyone.center().set_width(FRAME_X_RADIUS - 1)
self.add(dot_product)
self.play(Write(equals))
self.play(Write(words[0]))
self.wait()
self.play(
Write(times),
Write(words[1])
)
self.wait()
class ThreeDProjectToPerpendicular(Scene):
pass #
class GeometricVolumeWords(Scene):
def construct(self):
v_tex, w_tex = [
"$%s$"%s
for s in get_vect_tex(*"vw")
]
words = VGroup(
TextMobject("(Area of", "parallelogram", ")$\\times$"),
TextMobject(
"(Component of $%s$"%matrix_to_tex_string(list("xyz")),
"perpendicular to", v_tex, "and", w_tex, ")"
)
)
words[0].set_color_by_tex("parallelogram", BLUE)
words[1].set_color_by_tex(v_tex, ORANGE)
words[1].set_color_by_tex(w_tex, W_COLOR)
words.arrange(RIGHT)
words.set_width(FRAME_WIDTH - 1)
words.to_edge(DOWN, buff = SMALL_BUFF)
for word in words:
self.play(Write(word))
self.wait()
class WriteXYZ(Scene):
def construct(self):
self.play(Write(Matrix(list("xyz"))))
self.wait()
class ThreeDDotProductWithCross(Scene):
pass
class CrossVectorEmphasisWords(Scene):
def construct(self):
v_tex, w_tex = ["$%s$"%s for s in get_vect_tex(*"vw")]
words = [
TextMobject("Perpendicular to", v_tex, "and", w_tex),
TextMobject("Length = (Area of ", "parallelogram", ")")
]
for word in words:
word.set_color_by_tex(v_tex, ORANGE)
word.set_color_by_tex(w_tex, W_COLOR)
word.set_color_by_tex("parallelogram", BLUE)
self.play(Write(word))
self.wait()
self.play(FadeOut(word))
class NextVideo(Scene):
def construct(self):
title = TextMobject("""
Next video: Change of basis
""")
title.to_edge(UP, buff = MED_SMALL_BUFF/2)
rect = Rectangle(width = 16, height = 9, color = BLUE)
rect.set_height(6)
rect.next_to(title, DOWN)
self.add(title)
self.play(ShowCreation(rect))
self.wait()
class ChangeOfBasisPreview(LinearTransformationScene):
CONFIG = {
"include_background_plane" : False,
"foreground_plane_kwargs" : {
"x_radius" : FRAME_WIDTH,
"y_radius" : FRAME_WIDTH,
"secondary_line_ratio" : 0
},
"t_matrix" : [[2, 1], [-1, 1]],
"i_target_color" : YELLOW,
"j_target_color" : MAROON_B,
"sum_color" : PINK,
"vector" : [-1, 2],
}
def construct(self):
randy = Randolph()
pinky = Mortimer(color = PINK)
randy.to_corner(DOWN+LEFT)
pinky.to_corner(DOWN+RIGHT)
self.plane.fade()
self.add_foreground_mobject(randy, pinky)
coords = Matrix(self.vector)
coords.add_to_back(BackgroundRectangle(coords))
self.add_foreground_mobject(coords)
coords.move_to(
randy.get_corner(UP+RIGHT),
aligned_edge = DOWN+LEFT
)
coords.target = coords.copy()
coords.target.move_to(
pinky.get_corner(UP+LEFT),
aligned_edge = DOWN+RIGHT
)
self.play(
Write(coords),
randy.change_mode, "speaking"
)
self.scale_basis_vectors()
self.apply_transposed_matrix(
self.t_matrix,
added_anims = [
MoveToTarget(coords),
ApplyMethod(pinky.change_mode, "speaking"),
ApplyMethod(randy.change_mode, "plain"),
]
)
self.play(
randy.change_mode, "erm",
self.i_hat.set_color, self.i_target_color,
self.j_hat.set_color, self.j_target_color,
)
self.i_hat.color = self.i_target_color
self.j_hat.color = self.j_target_color
self.scale_basis_vectors()
def scale_basis_vectors(self):
for vect in self.i_hat, self.j_hat:
vect.save_state()
self.play(self.i_hat.scale, self.vector[0])
self.play(self.j_hat.scale, self.vector[1])
self.play(self.j_hat.shift, self.i_hat.get_end())
sum_vect = Vector(self.j_hat.get_end(), color = self.sum_color)
self.play(ShowCreation(sum_vect))
self.wait(2)
self.play(
FadeOut(sum_vect),
self.i_hat.restore,
self.j_hat.restore,
)
self.wait()
| true | true |
f72b264401ddefa4e28e25f16a1019753ba3292c | 1,370 | py | Python | python/coffer/coins/btc.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | python/coffer/coins/btc.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | python/coffer/coins/btc.py | Steve132/wallet_standard | 09c909b24dc17cf6a0a433644d8f1912e886ab1c | [
"MIT"
] | null | null | null | from ..wallet import *
from _coin import *
from ..bip32 import Bip32
from blockchain._insight import InsightBlockchainInterface
from blockchain._interface import MultiBlockchainInterface
from impl._segwitcoin import *
class BTC(SegwitCoin):
def __init__(self,is_testnet=False):
#self.supported=True
if(not is_testnet):
pkh_prefix=0x00
sh_prefix=0x05
wif_prefix=0x80
bech32_prefix="bc"
else:
pkh_prefix=0x6F
sh_prefix=0xC4
wif_prefix=0xEF
bech32_prefix="tb"
sig_prefix=b'Bitcoin Signed Message:\n'
super(BTC,self).__init__('BTC',is_testnet=is_testnet,
pkh_prefix=pkh_prefix,
sh_prefix=sh_prefix,
wif_prefix=wif_prefix,
sig_prefix=sig_prefix,bech32_prefix=bech32_prefix)
def blockchain(self,*args,**kwargs):
subcoins=[]
if(not self.is_testnet):
insighturls=[
"https://insight.bitpay.com/api",
"https://blockexplorer.com/api",
"https://localbitcoinschain.com/api",
"https://bitcore2.trezor.io/api",
"https://btc.blockdozer.com/insight-api"
]
else:
insighturls=[
"https://tbtc.blockdozer.com/insight-api",
"https://testnet.blockexplorer.com/api"
#"https://test-insight.bitpay.com/api" This is testnetv1, doesn't work
]
insights=[InsightBlockchainInterface(self,insighturls)]
subcoins.extend(insights)
return MultiBlockchainInterface(self,subcoins).select()
| 26.346154 | 75 | 0.734307 | from ..wallet import *
from _coin import *
from ..bip32 import Bip32
from blockchain._insight import InsightBlockchainInterface
from blockchain._interface import MultiBlockchainInterface
from impl._segwitcoin import *
class BTC(SegwitCoin):
def __init__(self,is_testnet=False):
if(not is_testnet):
pkh_prefix=0x00
sh_prefix=0x05
wif_prefix=0x80
bech32_prefix="bc"
else:
pkh_prefix=0x6F
sh_prefix=0xC4
wif_prefix=0xEF
bech32_prefix="tb"
sig_prefix=b'Bitcoin Signed Message:\n'
super(BTC,self).__init__('BTC',is_testnet=is_testnet,
pkh_prefix=pkh_prefix,
sh_prefix=sh_prefix,
wif_prefix=wif_prefix,
sig_prefix=sig_prefix,bech32_prefix=bech32_prefix)
def blockchain(self,*args,**kwargs):
subcoins=[]
if(not self.is_testnet):
insighturls=[
"https://insight.bitpay.com/api",
"https://blockexplorer.com/api",
"https://localbitcoinschain.com/api",
"https://bitcore2.trezor.io/api",
"https://btc.blockdozer.com/insight-api"
]
else:
insighturls=[
"https://tbtc.blockdozer.com/insight-api",
"https://testnet.blockexplorer.com/api"
]
insights=[InsightBlockchainInterface(self,insighturls)]
subcoins.extend(insights)
return MultiBlockchainInterface(self,subcoins).select()
| true | true |
f72b27956bef78d99560b5b1289b72d9c87c03d4 | 1,672 | py | Python | adminmgr/media/code/A3/task2/BD_151_987_1496_1503_KYP9LpV.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 9 | 2019-11-08T02:05:27.000Z | 2021-12-13T12:06:35.000Z | adminmgr/media/code/A3/task2/BD_151_987_1496_1503_KYP9LpV.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 6 | 2019-11-27T03:23:16.000Z | 2021-06-10T19:15:13.000Z | adminmgr/media/code/A3/task2/BD_151_987_1496_1503_KYP9LpV.py | IamMayankThakur/test-bigdata | cef633eb394419b955bdce479699d0115d8f99c3 | [
"Apache-2.0"
] | 4 | 2019-11-26T17:04:27.000Z | 2021-12-13T11:57:03.000Z | from pyspark.sql import SparkSession
from pyspark.sql.functions import explode,split,desc,max
from pyspark.sql.types import *
from pyspark.sql.types import StringType, StructType, StructField
spark = SparkSession \
.builder \
.appName("StructuredStreaming") \
.getOrCreate()
inputpath="hdfs://localhost:9000/stream/"
schema = StructType([ StructField("ID", StringType(), True),
StructField("Lang", StringType(), True),
StructField("Date", StringType(), True),
StructField("Source", StringType(), True),
StructField("Len", StringType(), True),
StructField("Likes", StringType(), True),
StructField("RTs", StringType(), True),
StructField("Hashtags", StringType(), True),
StructField("UserMentionNames", StringType(), True),
StructField("UserMentionID", StringType(), True),
StructField("name", StringType(), True),
StructField("Place", StringType(), True),
StructField("Followers", StringType(), True),
StructField("Friends", StringType(), True)])
lines = spark \
.readStream \
.schema(schema) \
.option("sep", ";") \
.csv(inputpath)
inputDF = lines.withColumn("FRRatio",lines.Followers/lines.Friends)
inputDF = inputDF.groupBy("name").agg(max("FRRatio").alias("FRRatio")).sort(desc("FRRatio")).select("name","FRRatio")
query=inputDF.writeStream.outputMode("complete").option("numRows",1).format("console").start()
query.awaitTermination(60)
query.stop()
| 44 | 117 | 0.600478 | from pyspark.sql import SparkSession
from pyspark.sql.functions import explode,split,desc,max
from pyspark.sql.types import *
from pyspark.sql.types import StringType, StructType, StructField
spark = SparkSession \
.builder \
.appName("StructuredStreaming") \
.getOrCreate()
inputpath="hdfs://localhost:9000/stream/"
schema = StructType([ StructField("ID", StringType(), True),
StructField("Lang", StringType(), True),
StructField("Date", StringType(), True),
StructField("Source", StringType(), True),
StructField("Len", StringType(), True),
StructField("Likes", StringType(), True),
StructField("RTs", StringType(), True),
StructField("Hashtags", StringType(), True),
StructField("UserMentionNames", StringType(), True),
StructField("UserMentionID", StringType(), True),
StructField("name", StringType(), True),
StructField("Place", StringType(), True),
StructField("Followers", StringType(), True),
StructField("Friends", StringType(), True)])
lines = spark \
.readStream \
.schema(schema) \
.option("sep", ";") \
.csv(inputpath)
inputDF = lines.withColumn("FRRatio",lines.Followers/lines.Friends)
inputDF = inputDF.groupBy("name").agg(max("FRRatio").alias("FRRatio")).sort(desc("FRRatio")).select("name","FRRatio")
query=inputDF.writeStream.outputMode("complete").option("numRows",1).format("console").start()
query.awaitTermination(60)
query.stop()
| true | true |
f72b2840162bfc1b4ca923abd4640365761a2d0e | 19,645 | py | Python | wagtail_wordpress_import/test/tests/test_wordpress_item.py | fabienheureux/wagtail-wordpress-import | 3c27330258e24a6b52f3d580060f607706bbc9d0 | [
"MIT"
] | null | null | null | wagtail_wordpress_import/test/tests/test_wordpress_item.py | fabienheureux/wagtail-wordpress-import | 3c27330258e24a6b52f3d580060f607706bbc9d0 | [
"MIT"
] | null | null | null | wagtail_wordpress_import/test/tests/test_wordpress_item.py | fabienheureux/wagtail-wordpress-import | 3c27330258e24a6b52f3d580060f607706bbc9d0 | [
"MIT"
] | null | null | null | import json
import os
import re
import unittest
from collections import Counter
from datetime import datetime
from unittest import mock
from xml.dom import pulldom
from django.test import TestCase, override_settings
from wagtail.core.models import Page
from example.models import Category
from wagtail_wordpress_import.functions import node_to_dict
from wagtail_wordpress_import.importers.wordpress import (
DEFAULT_PREFILTERS,
WordpressImporter,
WordpressItem,
)
from wagtail_wordpress_import.logger import Logger
BASE_PATH = os.path.dirname(os.path.dirname(__file__))
FIXTURES_PATH = BASE_PATH + "/fixtures"
LOG_DIR = "fakedir"
IMPORTER_RUN_PARAMS_TEST = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post", "page"],
"page_statuses": ["publish", "draft"],
}
class WordpressItemTests(TestCase):
def setUp(self):
self.logger = Logger("fakedir")
body_html = """<p>Dummmy text</p><p>Dummmy text</p><p>Dummmy text</p>"""
self.good_node = {
"title": "Page Title",
"wp:post_name": "page-title",
"wp:post_date_gmt": "2017-03-12 17:53:57",
"wp:post_modified_gmt": "2018-12-04 11:49:24",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "http://www.example.com",
}
self.bad_node = {
"title": "Page Title",
"wp:post_name": "",
"wp:post_date_gmt": "0000-00-00 00:00:00",
"wp:post_modified_gmt": "0000-00-00 00:00:00",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "",
}
def test_all_fields_with_good_data(self):
wordpress_item = WordpressItem(self.good_node, self.logger)
title = wordpress_item.cleaned_data["title"]
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
body = wordpress_item.cleaned_data["body"]
wp_post_id = wordpress_item.cleaned_data["wp_post_id"]
wp_post_type = wordpress_item.cleaned_data["wp_post_type"]
wp_link = wordpress_item.cleaned_data["wp_link"]
wp_raw_content = wordpress_item.debug_content["filter_linebreaks_wp"]
wp_processed_content = wordpress_item.debug_content[
"filter_transform_inline_styles"
]
wp_block_json = wordpress_item.debug_content["block_json"]
self.assertEqual(title, "Page Title")
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertIsInstance(json.dumps(body), str)
self.assertEqual(wp_post_id, 1000)
self.assertEqual(wp_post_type, "post")
self.assertEqual(wp_link, "http://www.example.com")
self.assertIsInstance(wp_raw_content, str)
self.assertIsInstance(wp_processed_content, str)
self.assertIsInstance(wp_block_json, list)
self.assertTrue(
len(wp_block_json), 1
) # we are only parsing consecutive paragraphs so the will only be one block (rich_text)
def test_cleaned_fields(self):
wordpress_item = WordpressItem(self.bad_node, self.logger)
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
wp_link = wordpress_item.cleaned_data["wp_link"]
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertEqual(wp_link, "")
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
) # testing requires a live domain for requests to use, this is something I need to change before package release
# mocking of somesort, using localhost:8000 for now
class WordpressItemImportTests(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=IMPORTER_RUN_PARAMS_TEST["page_types"],
page_statuses=IMPORTER_RUN_PARAMS_TEST["page_statuses"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_category_snippets_are_saved(self):
snippets = Category.objects.all()
self.assertEqual(len(snippets), 4)
def test_page_one_has_categories(self):
page_one = self.imported_pages.get(title="Item one title")
categories = page_one.specific.categories.all()
self.assertEqual(2, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Life")
def test_page_two_has_categories(self):
page_two = self.imported_pages.get(title="Item two title")
categories = page_two.specific.categories.all()
self.assertEqual(3, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Cars")
self.assertEqual(categories[2].name, "Computing")
def test_short_category_is_not_imported(self):
page_one = self.imported_pages.get(title="Item one title")
categories = [category.name for category in page_one.specific.categories.all()]
self.assertNotIn("A", categories)
def test_categories_have_no_duplicate_entries(self):
categories = [category.name for category in Category.objects.all()]
duplicates = [
k for k, v in Counter(categories).items() if v > 1
] # duplicates will be empty if no duplicate category names exist
self.assertEqual(len(duplicates), 0)
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
) # testing requires a live domain for requests to use, this is something I need to change before package release
# mocking of somesort, using localhost:8000 for now
class WordpressItemImportTestsNoCategories(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=["hasnocategories"],
page_statuses=["hasnocategories"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_page_has_no_categories(self):
page = self.imported_pages.first()
categories = page.specific.categories.all()
self.assertEqual(0, categories.count())
def test_categories_count_is_zero(self):
count = Category.objects.count()
self.assertEqual(count, 0)
IMPORTER_RUN_PARAMS_TEST_OVERRIDE_1 = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post"],
"page_statuses": ["publish"],
}
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_YOAST_PLUGIN_ENABLED=True,
)
class WordpressImporterTestsYoastMetaDescriptions(TestCase):
"""
This tests when a wp:postmeta for none single or multiple keys in the XML file.
If the meta key for yoast is not present the <description></description> content is returned.
"""
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
# self.items_dict[0] = the single item wp:post_meta without yoast
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_1(self):
# self.items_dict[1] = the multiple item wp:post_meta
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_2(self):
# self.items_dict[2] = the single item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_3(self):
# self.items_dict[3] = the multiple item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_4(self):
# self.items_dict[3] = the multiple item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
class WordpressImporterTestsCleanWpPostMeta(TestCase):
"""
This tests the wp_post_meta field contents after cleaning in
WordpressItem().clean_wp_post_meta()
"""
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
# self.items_dict[0] = the single item wp:post_meta without yoast
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
thumbnail_id = wordpress_item.clean_wp_post_meta()["thumbnail_id"]
self.assertEqual(thumbnail_id, 43124)
def test_items_dict_1(self):
# self.items_dict[1] = the multiple item wp:post_meta
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 100)
self.assertEqual(post_meta["pinterest_shares"], 200)
self.assertEqual(post_meta["twitter_shares"], 300)
def test_items_dict_2(self):
# self.items_dict[2] = the single item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_3(self):
# self.items_dict[3] = the multiple item wp:post_meta with yoast
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 10)
self.assertEqual(post_meta["pinterest_shares"], 20)
self.assertEqual(post_meta["twitter_shares"], 30)
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_4(self):
# self.items_dict[4] = has no wp:post_meta items
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
with self.assertRaises(KeyError):
wordpress_item.clean_wp_post_meta()["wp:postmeta"]
def test_items_dict_1_excluded_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
with self.assertRaises(KeyError):
cleaned_postmeta["wp:postmeta"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp_post_meta"]
with self.assertRaises(KeyError):
cleaned_postmeta["content:encoded"]
with self.assertRaises(KeyError):
cleaned_postmeta["dc:creator"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp:post_id"]
def test_items_dict_1_included_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
self.assertTrue("title" in cleaned_postmeta)
self.assertTrue("dc_creator" in cleaned_postmeta)
self.assertTrue("guid" in cleaned_postmeta)
self.assertTrue("description" in cleaned_postmeta)
self.assertTrue("wp_post_id" in cleaned_postmeta)
self.assertTrue("wp_post_date" in cleaned_postmeta)
self.assertTrue("category" in cleaned_postmeta)
self.assertTrue("facebook_shares" in cleaned_postmeta)
self.assertTrue("pinterest_shares" in cleaned_postmeta)
self.assertTrue("twitter_shares" in cleaned_postmeta)
class TestWordpressItemPrefilterConfig(TestCase):
def test_prefilter_content_default(self):
# The expected output should be transformed after passing through the
# the default prefilters
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "<p>foo bar baz</p>\n")
class TestWordpressPrefilterDefaults(TestCase):
def test_default_prefilters(self):
self.assertIsInstance(DEFAULT_PREFILTERS, list)
self.assertTrue(len(DEFAULT_PREFILTERS), 4)
self.assertEqual(
DEFAULT_PREFILTERS[0]["FUNCTION"],
"wagtail_wordpress_import.prefilters.linebreaks_wp",
)
self.assertEqual(
DEFAULT_PREFILTERS[1]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_shortcodes",
)
self.assertEqual(
DEFAULT_PREFILTERS[2]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_inline_styles",
)
self.assertEqual(
DEFAULT_PREFILTERS[3]["FUNCTION"],
"wagtail_wordpress_import.prefilters.bleach_clean",
)
def foo_filter(content, options):
return content, options
def transform_foo(soup, tag):
new_tag = soup.new_tag("foo")
new_tag.string = tag.string
tag.replace_with(new_tag)
class TestWordpressItemPrefilterOverride(TestCase):
"""Test developers' ability to edit settings.WAGTAIL_WORDPRESS_IMPORT_PREFILTERS"""
@override_settings(WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[])
def test_prefilter_content_no_filters(self):
"""Remove all pre-filters
The expected output is the same as the input because there are no prefilters to
apply to the content
"""
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "foo bar baz")
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter"
}
]
)
def test_custom_provided_prefilter(self):
"""Provide a custom pre-filter
The expected output is the same as the input because the applied filters do
nothing and return the same value.
"""
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], None)
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter",
"OPTIONS": {"foo": "bar"},
}
]
)
def test_custom_provided_prefilter_with_options(self):
"""Provide a custom pre-filter with options
The expected output is the same as the input because the applied filters do
nothing and return the same value.
"""
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], {"foo": "bar"})
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.prefilters.transform_inline_styles",
"OPTIONS": {
"TRANSFORM_STYLES_MAPPING": [
(
re.compile(r"font-weight:bold", re.IGNORECASE),
"wagtail_wordpress_import.test.tests.test_wordpress_item.transform_foo",
)
],
},
},
]
)
def test_transform_styles_filter_add_options(self):
"""Test that a developer can pass custom OPTIONS to transform_inline_styles.
Here WAGTAIL_WORDPRESS_IMPORT_PREFILTERS contains only config for
transform_inline_styles, so that other prefilters are not run, and it's easier
to test the output.
"""
node = {"content:encoded": '<p style="font-weight: bold">foo bar baz</p>'}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output.strip(), "<foo>foo bar baz</foo>")
| 40.256148 | 114 | 0.67269 | import json
import os
import re
import unittest
from collections import Counter
from datetime import datetime
from unittest import mock
from xml.dom import pulldom
from django.test import TestCase, override_settings
from wagtail.core.models import Page
from example.models import Category
from wagtail_wordpress_import.functions import node_to_dict
from wagtail_wordpress_import.importers.wordpress import (
DEFAULT_PREFILTERS,
WordpressImporter,
WordpressItem,
)
from wagtail_wordpress_import.logger import Logger
BASE_PATH = os.path.dirname(os.path.dirname(__file__))
FIXTURES_PATH = BASE_PATH + "/fixtures"
LOG_DIR = "fakedir"
IMPORTER_RUN_PARAMS_TEST = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post", "page"],
"page_statuses": ["publish", "draft"],
}
class WordpressItemTests(TestCase):
def setUp(self):
self.logger = Logger("fakedir")
body_html = """<p>Dummmy text</p><p>Dummmy text</p><p>Dummmy text</p>"""
self.good_node = {
"title": "Page Title",
"wp:post_name": "page-title",
"wp:post_date_gmt": "2017-03-12 17:53:57",
"wp:post_modified_gmt": "2018-12-04 11:49:24",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "http://www.example.com",
}
self.bad_node = {
"title": "Page Title",
"wp:post_name": "",
"wp:post_date_gmt": "0000-00-00 00:00:00",
"wp:post_modified_gmt": "0000-00-00 00:00:00",
"content:encoded": body_html,
"wp:post_id": "1000",
"wp:post_type": "post",
"link": "",
}
def test_all_fields_with_good_data(self):
wordpress_item = WordpressItem(self.good_node, self.logger)
title = wordpress_item.cleaned_data["title"]
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
body = wordpress_item.cleaned_data["body"]
wp_post_id = wordpress_item.cleaned_data["wp_post_id"]
wp_post_type = wordpress_item.cleaned_data["wp_post_type"]
wp_link = wordpress_item.cleaned_data["wp_link"]
wp_raw_content = wordpress_item.debug_content["filter_linebreaks_wp"]
wp_processed_content = wordpress_item.debug_content[
"filter_transform_inline_styles"
]
wp_block_json = wordpress_item.debug_content["block_json"]
self.assertEqual(title, "Page Title")
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertIsInstance(json.dumps(body), str)
self.assertEqual(wp_post_id, 1000)
self.assertEqual(wp_post_type, "post")
self.assertEqual(wp_link, "http://www.example.com")
self.assertIsInstance(wp_raw_content, str)
self.assertIsInstance(wp_processed_content, str)
self.assertIsInstance(wp_block_json, list)
self.assertTrue(
len(wp_block_json), 1
)
def test_cleaned_fields(self):
wordpress_item = WordpressItem(self.bad_node, self.logger)
slug = wordpress_item.cleaned_data["slug"]
first_published_at = wordpress_item.cleaned_data["first_published_at"]
last_published_at = wordpress_item.cleaned_data["last_published_at"]
latest_revision_created_at = wordpress_item.cleaned_data[
"latest_revision_created_at"
]
wp_link = wordpress_item.cleaned_data["wp_link"]
self.assertEqual(slug, "page-title")
self.assertIsInstance(first_published_at, datetime)
self.assertIsInstance(last_published_at, datetime)
self.assertIsInstance(latest_revision_created_at, datetime)
self.assertEqual(wp_link, "")
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
)
class WordpressItemImportTests(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=IMPORTER_RUN_PARAMS_TEST["page_types"],
page_statuses=IMPORTER_RUN_PARAMS_TEST["page_statuses"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_category_snippets_are_saved(self):
snippets = Category.objects.all()
self.assertEqual(len(snippets), 4)
def test_page_one_has_categories(self):
page_one = self.imported_pages.get(title="Item one title")
categories = page_one.specific.categories.all()
self.assertEqual(2, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Life")
def test_page_two_has_categories(self):
page_two = self.imported_pages.get(title="Item two title")
categories = page_two.specific.categories.all()
self.assertEqual(3, categories.count())
self.assertEqual(categories[0].name, "Blogging")
self.assertEqual(categories[1].name, "Cars")
self.assertEqual(categories[2].name, "Computing")
def test_short_category_is_not_imported(self):
page_one = self.imported_pages.get(title="Item one title")
categories = [category.name for category in page_one.specific.categories.all()]
self.assertNotIn("A", categories)
def test_categories_have_no_duplicate_entries(self):
categories = [category.name for category in Category.objects.all()]
duplicates = [
k for k, v in Counter(categories).items() if v > 1
]
self.assertEqual(len(duplicates), 0)
@override_settings(
WAGTAIL_WORDPRESS_IMPORTER_SOURCE_DOMAIN="http://localhost:8000",
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_ENABLED=True,
WAGTAIL_WORDPRESS_IMPORT_CATEGORY_PLUGIN_MODEL="example.models.Category",
)
class WordpressItemImportTestsNoCategories(TestCase):
from example.models import Category
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.importer = WordpressImporter(f"{FIXTURES_PATH}/raw_xml.xml")
self.logger = Logger(LOG_DIR)
self.importer.run(
logger=self.logger,
app_for_pages=IMPORTER_RUN_PARAMS_TEST["app_for_pages"],
model_for_pages=IMPORTER_RUN_PARAMS_TEST["model_for_pages"],
parent_id=IMPORTER_RUN_PARAMS_TEST["parent_id"],
page_types=["hasnocategories"],
page_statuses=["hasnocategories"],
)
self.parent_page = Page.objects.get(id=IMPORTER_RUN_PARAMS_TEST["parent_id"])
self.imported_pages = self.parent_page.get_children().all()
def test_page_has_no_categories(self):
page = self.imported_pages.first()
categories = page.specific.categories.all()
self.assertEqual(0, categories.count())
def test_categories_count_is_zero(self):
count = Category.objects.count()
self.assertEqual(count, 0)
IMPORTER_RUN_PARAMS_TEST_OVERRIDE_1 = {
"app_for_pages": "example",
"model_for_pages": "TestPage",
"parent_id": "2",
"page_types": ["post"],
"page_statuses": ["publish"],
}
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_YOAST_PLUGIN_ENABLED=True,
)
class WordpressImporterTestsYoastMetaDescriptions(TestCase):
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_1(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
def test_items_dict_2(self):
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_3(self):
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This is a yoast metadesc!",
)
def test_items_dict_4(self):
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
self.assertEqual(
wordpress_item.get_yoast_description_value(),
"This page has a default description",
)
class WordpressImporterTestsCleanWpPostMeta(TestCase):
fixtures = [
f"{FIXTURES_PATH}/dump.json",
]
def setUp(self):
self.logger = Logger("fakedir")
xml_file = open(f"{FIXTURES_PATH}/post_meta.xml", "rb")
xml_doc = pulldom.parse(xml_file)
self.items_dict = []
for event, node in xml_doc:
if event == pulldom.START_ELEMENT and node.tagName == "item":
xml_doc.expandNode(node)
self.items_dict.append(node_to_dict(node))
def test_items_dict_0(self):
wordpress_item = WordpressItem(self.items_dict[0], self.logger)
thumbnail_id = wordpress_item.clean_wp_post_meta()["thumbnail_id"]
self.assertEqual(thumbnail_id, 43124)
def test_items_dict_1(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 100)
self.assertEqual(post_meta["pinterest_shares"], 200)
self.assertEqual(post_meta["twitter_shares"], 300)
def test_items_dict_2(self):
wordpress_item = WordpressItem(self.items_dict[2], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_3(self):
wordpress_item = WordpressItem(self.items_dict[3], self.logger)
post_meta = wordpress_item.clean_wp_post_meta()
self.assertEqual(post_meta["facebook_shares"], 10)
self.assertEqual(post_meta["pinterest_shares"], 20)
self.assertEqual(post_meta["twitter_shares"], 30)
self.assertEqual(post_meta["yoast_wpseo_metadesc"], "This is a yoast metadesc!")
def test_items_dict_4(self):
wordpress_item = WordpressItem(self.items_dict[4], self.logger)
with self.assertRaises(KeyError):
wordpress_item.clean_wp_post_meta()["wp:postmeta"]
def test_items_dict_1_excluded_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
with self.assertRaises(KeyError):
cleaned_postmeta["wp:postmeta"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp_post_meta"]
with self.assertRaises(KeyError):
cleaned_postmeta["content:encoded"]
with self.assertRaises(KeyError):
cleaned_postmeta["dc:creator"]
with self.assertRaises(KeyError):
cleaned_postmeta["wp:post_id"]
def test_items_dict_1_included_keys(self):
wordpress_item = WordpressItem(self.items_dict[1], self.logger)
cleaned_postmeta = wordpress_item.clean_wp_post_meta()
self.assertTrue("title" in cleaned_postmeta)
self.assertTrue("dc_creator" in cleaned_postmeta)
self.assertTrue("guid" in cleaned_postmeta)
self.assertTrue("description" in cleaned_postmeta)
self.assertTrue("wp_post_id" in cleaned_postmeta)
self.assertTrue("wp_post_date" in cleaned_postmeta)
self.assertTrue("category" in cleaned_postmeta)
self.assertTrue("facebook_shares" in cleaned_postmeta)
self.assertTrue("pinterest_shares" in cleaned_postmeta)
self.assertTrue("twitter_shares" in cleaned_postmeta)
class TestWordpressItemPrefilterConfig(TestCase):
def test_prefilter_content_default(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "<p>foo bar baz</p>\n")
class TestWordpressPrefilterDefaults(TestCase):
def test_default_prefilters(self):
self.assertIsInstance(DEFAULT_PREFILTERS, list)
self.assertTrue(len(DEFAULT_PREFILTERS), 4)
self.assertEqual(
DEFAULT_PREFILTERS[0]["FUNCTION"],
"wagtail_wordpress_import.prefilters.linebreaks_wp",
)
self.assertEqual(
DEFAULT_PREFILTERS[1]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_shortcodes",
)
self.assertEqual(
DEFAULT_PREFILTERS[2]["FUNCTION"],
"wagtail_wordpress_import.prefilters.transform_inline_styles",
)
self.assertEqual(
DEFAULT_PREFILTERS[3]["FUNCTION"],
"wagtail_wordpress_import.prefilters.bleach_clean",
)
def foo_filter(content, options):
return content, options
def transform_foo(soup, tag):
new_tag = soup.new_tag("foo")
new_tag.string = tag.string
tag.replace_with(new_tag)
class TestWordpressItemPrefilterOverride(TestCase):
@override_settings(WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[])
def test_prefilter_content_no_filters(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output, "foo bar baz")
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter"
}
]
)
def test_custom_provided_prefilter(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], None)
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.test.tests.test_wordpress_item.foo_filter",
"OPTIONS": {"foo": "bar"},
}
]
)
def test_custom_provided_prefilter_with_options(self):
node = {"content:encoded": "foo bar baz"}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output[0], "foo bar baz")
self.assertEqual(output[1], {"foo": "bar"})
@override_settings(
WAGTAIL_WORDPRESS_IMPORT_PREFILTERS=[
{
"FUNCTION": "wagtail_wordpress_import.prefilters.transform_inline_styles",
"OPTIONS": {
"TRANSFORM_STYLES_MAPPING": [
(
re.compile(r"font-weight:bold", re.IGNORECASE),
"wagtail_wordpress_import.test.tests.test_wordpress_item.transform_foo",
)
],
},
},
]
)
def test_transform_styles_filter_add_options(self):
node = {"content:encoded": '<p style="font-weight: bold">foo bar baz</p>'}
wordpress_item = WordpressItem(node, "")
output = wordpress_item.prefilter_content(wordpress_item.raw_body)
self.assertEqual(output.strip(), "<foo>foo bar baz</foo>")
| true | true |
f72b287c0755998110f1fa14c9a7bd080f42dee2 | 1,251 | py | Python | azure/mgmt/network/v2016_09_01/models/express_route_circuits_routes_table_summary_list_result.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 2 | 2020-07-29T14:22:17.000Z | 2020-11-06T18:47:40.000Z | azure/mgmt/network/v2016_09_01/models/express_route_circuits_routes_table_summary_list_result.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2016-08-01T07:37:04.000Z | 2016-08-01T07:37:04.000Z | azure/mgmt/network/v2016_09_01/models/express_route_circuits_routes_table_summary_list_result.py | EnjoyLifeFund/py36pkgs | 0ac677fbbfa7b6d8c527fe2c759ba05117b07fd2 | [
"MIT",
"BSD-2-Clause",
"BSD-3-Clause"
] | 1 | 2020-12-12T21:04:41.000Z | 2020-12-12T21:04:41.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableSummaryListResult(Model):
"""Response for ListRoutesTable associated with the Express Route Circuits
API.
:param value: A list of the routes table.
:type value: list of :class:`ExpressRouteCircuitRoutesTableSummary
<azure.mgmt.network.v2016_09_01.models.ExpressRouteCircuitRoutesTableSummary>`
:param next_link: The URL to get the next set of results.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
self.value = value
self.next_link = next_link
| 36.794118 | 85 | 0.631495 |
from msrest.serialization import Model
class ExpressRouteCircuitsRoutesTableSummaryListResult(Model):
_attribute_map = {
'value': {'key': 'value', 'type': '[ExpressRouteCircuitRoutesTableSummary]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self, value=None, next_link=None):
self.value = value
self.next_link = next_link
| true | true |
f72b28a897f88f7a2835dba9ffb1efe2af6ae2d4 | 4,626 | py | Python | purity_fb/purity_fb_1dot8dot1/models/alert_watcher_test_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 5 | 2017-09-08T20:47:22.000Z | 2021-06-29T02:11:05.000Z | purity_fb/purity_fb_1dot8dot1/models/alert_watcher_test_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 16 | 2017-11-27T20:57:48.000Z | 2021-11-23T18:46:43.000Z | purity_fb/purity_fb_1dot8dot1/models/alert_watcher_test_response.py | tlewis-ps/purity_fb_python_client | 652835cbd485c95a86da27f8b661679727ec6ea0 | [
"Apache-2.0"
] | 22 | 2017-10-13T15:33:05.000Z | 2021-11-08T19:56:21.000Z | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.8.1 Python SDK
Pure Storage FlashBlade REST 1.8.1 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8.1
Contact: info@purestorage.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AlertWatcherTestResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[AlertWatcherTest]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None): # noqa: E501
"""AlertWatcherTestResponse - a model defined in Swagger""" # noqa: E501
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""Gets the pagination_info of this AlertWatcherTestResponse. # noqa: E501
pagination information, only available in GET requests # noqa: E501
:return: The pagination_info of this AlertWatcherTestResponse. # noqa: E501
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""Sets the pagination_info of this AlertWatcherTestResponse.
pagination information, only available in GET requests # noqa: E501
:param pagination_info: The pagination_info of this AlertWatcherTestResponse. # noqa: E501
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""Gets the items of this AlertWatcherTestResponse. # noqa: E501
a list of alert watcher test results # noqa: E501
:return: The items of this AlertWatcherTestResponse. # noqa: E501
:rtype: list[AlertWatcherTest]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this AlertWatcherTestResponse.
a list of alert watcher test results # noqa: E501
:param items: The items of this AlertWatcherTestResponse. # noqa: E501
:type: list[AlertWatcherTest]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertWatcherTestResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertWatcherTestResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.635762 | 206 | 0.607436 |
import pprint
import re
import six
class AlertWatcherTestResponse(object):
__test__ = False
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[AlertWatcherTest]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None):
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
self._pagination_info = pagination_info
@property
def items(self):
return self._items
@items.setter
def items(self, items):
self._items = items
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AlertWatcherTestResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, AlertWatcherTestResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72b28d1d6a43d1771d51b5748f4617a33315439 | 7,739 | py | Python | code/searchJeuxDeMots.py | AnthonySigogne/HackatonIWCS2017 | d0683a1c8246b75d110984207ec1f1cee67accef | [
"MIT"
] | 1 | 2017-11-20T17:30:31.000Z | 2017-11-20T17:30:31.000Z | code/searchJeuxDeMots.py | AnthonySigogne/HackatonIWCS2017 | d0683a1c8246b75d110984207ec1f1cee67accef | [
"MIT"
] | null | null | null | code/searchJeuxDeMots.py | AnthonySigogne/HackatonIWCS2017 | d0683a1c8246b75d110984207ec1f1cee67accef | [
"MIT"
] | null | null | null | #!/usr/sfw/bin/python
# -*- coding: utf-8 -*-
#C:\python27\python.exe C:\Dropbox\Work\2012ExpressionsComposees\CreateGraph.py
import sys, os, re, string, time
from math import *
#------------------------------
# Chargement des paramètres
#------------------------------
args={}
i=1;
selectedRelations = {}
selectedRelations[6] = "r_isa"
selectedRelations[9] = "r_has_part"
selectedRelations[16] = "r_instr"
selectedRelations[17] = "r_carac"
selectedRelations[23] = "r_carac-1"
selectedRelations[15] = "r_lieu"
selectedRelations[24] = "r_agent-1"
selectedRelations[26] = "r_patient-1"
selectedRelations[41] = "r_conseq"
selectedRelations[53] = "r_make"
inputFolder = os.path.abspath(os.path.dirname(sys.argv[0]))
# Addess of the tagged text containing (almost) all text files of the Hackathon:
inputTaggedTexts = inputFolder + "\\tagged.txt"
# Address of the JeuxDeMots data file
# huge one :
#inputJeuxDeMots = inputFolder + "\\09032017-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
# big one :
#inputJeuxDeMots = inputFolder + "\\06252017-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
# small one :
inputJeuxDeMots = inputFolder + "\\08152011-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
letters = {}
letters["a"] = 1
letters["b"] = 1
letters["c"] = 1
letters["d"] = 1
letters["e"] = 1
letters["f"] = 1
letters["g"] = 1
letters["h"] = 1
letters["i"] = 1
letters["j"] = 1
letters["k"] = 1
letters["l"] = 1
letters["m"] = 1
letters["n"] = 1
letters["o"] = 1
letters["p"] = 1
letters["q"] = 1
letters["r"] = 1
letters["s"] = 1
letters["t"] = 1
letters["u"] = 1
letters["v"] = 1
letters["w"] = 1
letters["x"] = 1
letters["y"] = 1
letters["z"] = 1
replacements = {}
replacements["æ"] = "ae"
replacements["à"] = "a"
replacements["á"] = "a"
replacements["á"] = "a"
replacements["ã"] = "a"
replacements["ä"] = "a"
replacements["â"] = "a"
replacements["ç"] = "c"
replacements["é"] = "e"
replacements["è"] = "e"
replacements["ë"] = "e"
replacements["ê"] = "e"
replacements["ï"] = "i"
replacements["î"] = "i"
replacements["ì"] = "i"
replacements["ñ"] = "n"
replacements["ô"] = "o"
replacements["ö"] = "o"
replacements["ó"] = "o"
replacements["œ"] = "oe"
replacements["ü"] = "u"
replacements["ù"] = "u"
replacements["ú"] = "u"
def removeAccent(word, replacements):
for letter in replacements:
word = word.replace(letter, replacements[letter])
return word
def readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters):
allWords = {}
i = 0
# Associate all word indices with words in a dictionary
try :
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
# only take words with t=1 (real words)
res = re.search("eid=([0-9]*).n=.(.+)..t=1.w=([0-9]*).*",line)
if res:
id = res.group(1)
word = res.group(2)
# only take words whose first character is a letter
firstLetter = word[0].lower()
weight = int(res.group(3))
if firstLetter in letters or firstLetter in replacements:
allWords[id] = word
except ValueError:
print(str(ValueError))
pass
# Create a dictionary of the neighborhoods of all words according to the relations in selectedRelations
if 0 == 0:
i = 0
nbRelations = 0
neighbors = {}
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
# extract the edges of the graph, including type and weight
res = re.search("rid=([0-9]*).n1=([0-9]*).n2=([0-9]*).t=([0-9]*).w=([0-9]+).*",line)
if res:
try :
id1 = res.group(2)
id2 = res.group(3)
type = int(res.group(4))
weight = int(res.group(5))
edgeInfo = []
edgeInfo.append(type)
edgeInfo.append(weight)
# if the relation has positive weight, is of one of the expected types
# and links two indexed words, we memorize it by saving its weight and type in a dict of dict
if (weight>0) and (type in selectedRelations) and (id1 in allWords) and (id2 in allWords):
firstWord = allWords[id1]
secondWord = allWords[id2]
if firstWord not in neighbors:
neighbors[firstWord] = {}
neighbors[firstWord][secondWord] = edgeInfo
nbRelations += 1
#print(str(nbRelations) + "relations")
except ValueError:
print(str(ValueError) + line)
pass
print(str(nbRelations) + "relations")
# Extract all sentences of the tagged text, then check which words are indexed (themselves or their lemma) in JeuxDeMots
# and are in relation in JeuxDeMots
sentence = []
results = []
sentenceString = ""
for line in open(inputTaggedTexts,"r"):
res = re.search("([^;]+);([^;]+);([^;]+)",line)
if res:
token = res.group(1)
lemma = res.group(2)
pos = res.group(3)
position = []
position.append(token)
position.append(lemma)
# if the sentence is finished:
if token[0] == token[0].upper():
# check for each pair of token if it is in the dict of relations of JeuxDeMots
for loc1 in sentence:
for loc2 in sentence:
if not (loc1 == loc2):
word1 = ""
word2 = ""
if (loc1[0] in neighbors and loc2[0] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[0]
if (loc1[1] in neighbors and loc2[0] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[0]
if (loc1[0] in neighbors and loc2[1] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[1]
if (loc1[1] in neighbors and loc2[1] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[1]
if len(word1) > 0:
result = []
#print(word1+" found! ")
result.append(word1)
result.append(word2)
result.append(selectedRelations[neighbors[word1][word2][0]])
result.append(sentenceString)
results.append(result)
sentence = []
sentenceString = ""
if position[0] in neighbors or position[1] in neighbors :
sentence.append(position)
sentenceString += token+" "
outputFile = open(inputTaggedTexts+".output.txt","w")
for result in results:
for element in result:
outputFile.writelines(element+";")
outputFile.writelines("\n")
outputFile.close()
readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters) | 35.663594 | 124 | 0.505492 |
import sys, os, re, string, time
from math import *
args={}
i=1;
selectedRelations = {}
selectedRelations[6] = "r_isa"
selectedRelations[9] = "r_has_part"
selectedRelations[16] = "r_instr"
selectedRelations[17] = "r_carac"
selectedRelations[23] = "r_carac-1"
selectedRelations[15] = "r_lieu"
selectedRelations[24] = "r_agent-1"
selectedRelations[26] = "r_patient-1"
selectedRelations[41] = "r_conseq"
selectedRelations[53] = "r_make"
inputFolder = os.path.abspath(os.path.dirname(sys.argv[0]))
inputTaggedTexts = inputFolder + "\\tagged.txt"
inputJeuxDeMots = inputFolder + "\\08152011-LEXICALNET-JEUXDEMOTS-FR-NOHTML.txt";
letters = {}
letters["a"] = 1
letters["b"] = 1
letters["c"] = 1
letters["d"] = 1
letters["e"] = 1
letters["f"] = 1
letters["g"] = 1
letters["h"] = 1
letters["i"] = 1
letters["j"] = 1
letters["k"] = 1
letters["l"] = 1
letters["m"] = 1
letters["n"] = 1
letters["o"] = 1
letters["p"] = 1
letters["q"] = 1
letters["r"] = 1
letters["s"] = 1
letters["t"] = 1
letters["u"] = 1
letters["v"] = 1
letters["w"] = 1
letters["x"] = 1
letters["y"] = 1
letters["z"] = 1
replacements = {}
replacements["æ"] = "ae"
replacements["à"] = "a"
replacements["á"] = "a"
replacements["á"] = "a"
replacements["ã"] = "a"
replacements["ä"] = "a"
replacements["â"] = "a"
replacements["ç"] = "c"
replacements["é"] = "e"
replacements["è"] = "e"
replacements["ë"] = "e"
replacements["ê"] = "e"
replacements["ï"] = "i"
replacements["î"] = "i"
replacements["ì"] = "i"
replacements["ñ"] = "n"
replacements["ô"] = "o"
replacements["ö"] = "o"
replacements["ó"] = "o"
replacements["œ"] = "oe"
replacements["ü"] = "u"
replacements["ù"] = "u"
replacements["ú"] = "u"
def removeAccent(word, replacements):
for letter in replacements:
word = word.replace(letter, replacements[letter])
return word
def readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters):
allWords = {}
i = 0
try :
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
res = re.search("eid=([0-9]*).n=.(.+)..t=1.w=([0-9]*).*",line)
if res:
id = res.group(1)
word = res.group(2)
firstLetter = word[0].lower()
weight = int(res.group(3))
if firstLetter in letters or firstLetter in replacements:
allWords[id] = word
except ValueError:
print(str(ValueError))
pass
if 0 == 0:
i = 0
nbRelations = 0
neighbors = {}
for line in open(inputJeuxDeMots,"r"):
if i % 1000 == 0:
print("ligne "+str(i))
i+=1
res = re.search("rid=([0-9]*).n1=([0-9]*).n2=([0-9]*).t=([0-9]*).w=([0-9]+).*",line)
if res:
try :
id1 = res.group(2)
id2 = res.group(3)
type = int(res.group(4))
weight = int(res.group(5))
edgeInfo = []
edgeInfo.append(type)
edgeInfo.append(weight)
if (weight>0) and (type in selectedRelations) and (id1 in allWords) and (id2 in allWords):
firstWord = allWords[id1]
secondWord = allWords[id2]
if firstWord not in neighbors:
neighbors[firstWord] = {}
neighbors[firstWord][secondWord] = edgeInfo
nbRelations += 1
except ValueError:
print(str(ValueError) + line)
pass
print(str(nbRelations) + "relations")
sentence = []
results = []
sentenceString = ""
for line in open(inputTaggedTexts,"r"):
res = re.search("([^;]+);([^;]+);([^;]+)",line)
if res:
token = res.group(1)
lemma = res.group(2)
pos = res.group(3)
position = []
position.append(token)
position.append(lemma)
if token[0] == token[0].upper():
for loc1 in sentence:
for loc2 in sentence:
if not (loc1 == loc2):
word1 = ""
word2 = ""
if (loc1[0] in neighbors and loc2[0] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[0]
if (loc1[1] in neighbors and loc2[0] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[0]
if (loc1[0] in neighbors and loc2[1] in neighbors[loc1[0]]):
word1 = loc1[0]
word2 = loc2[1]
if (loc1[1] in neighbors and loc2[1] in neighbors[loc1[1]]):
word1 = loc1[1]
word2 = loc2[1]
if len(word1) > 0:
result = []
result.append(word1)
result.append(word2)
result.append(selectedRelations[neighbors[word1][word2][0]])
result.append(sentenceString)
results.append(result)
sentence = []
sentenceString = ""
if position[0] in neighbors or position[1] in neighbors :
sentence.append(position)
sentenceString += token+" "
outputFile = open(inputTaggedTexts+".output.txt","w")
for result in results:
for element in result:
outputFile.writelines(element+";")
outputFile.writelines("\n")
outputFile.close()
readFile(inputJeuxDeMots, inputFolder, inputTaggedTexts, replacements, letters) | true | true |
f72b28ec393014292fff2aac3ffa0f3a488e9bda | 170 | py | Python | handlers/sr.py | flaviopicci/xen-backup | 306667f6ce3fd81d98b7a73312e37ad01f91c287 | [
"Apache-2.0"
] | null | null | null | handlers/sr.py | flaviopicci/xen-backup | 306667f6ce3fd81d98b7a73312e37ad01f91c287 | [
"Apache-2.0"
] | null | null | null | handlers/sr.py | flaviopicci/xen-backup | 306667f6ce3fd81d98b7a73312e37ad01f91c287 | [
"Apache-2.0"
] | null | null | null | from handlers.common import Common
class SR(Common):
_type = "SR"
def __init__(self, xapi, ref=None, params=None):
super().__init__(xapi, ref, params)
| 18.888889 | 52 | 0.658824 | from handlers.common import Common
class SR(Common):
_type = "SR"
def __init__(self, xapi, ref=None, params=None):
super().__init__(xapi, ref, params)
| true | true |
f72b296dc9ecbc509d9451f3cf12c463f5785fef | 790 | py | Python | junk/pull_photos.py | simplegeo/betashapes | 25d964c6dc20281b8f4c0b9049cd417af3e21e35 | [
"PostgreSQL",
"Unlicense"
] | 14 | 2015-02-13T16:35:28.000Z | 2021-01-18T04:20:50.000Z | junk/pull_photos.py | simplegeo/betashapes | 25d964c6dc20281b8f4c0b9049cd417af3e21e35 | [
"PostgreSQL",
"Unlicense"
] | null | null | null | junk/pull_photos.py | simplegeo/betashapes | 25d964c6dc20281b8f4c0b9049cd417af3e21e35 | [
"PostgreSQL",
"Unlicense"
] | 1 | 2017-03-23T22:09:36.000Z | 2017-03-23T22:09:36.000Z | #!/usr/bin/python
import sys
import csv
#first arg: input file, csv. column woe_id should be the list of woe_ids we want to pull out of photos.txt
#second arg: output file, txt subset of photos.txt (also remove photoid. samplr not expecting it)
def main():
infile = sys.argv[1]
outfile = sys.argv[2]
photofile = "photos.txt"
woes = []
ireader = csv.DictReader(open(infile, 'r'))
for line in ireader:
woes.append(line['woe_id'])
pfh = open(photofile, 'r')
ofh = open(outfile, 'w')
outstr = "%s\t%s\t%s\n"
for row in pfh:
photoid, placeid, lon, lat = row.strip().split()
if placeid in woes:
out = outstr % (placeid, lon, lat)
ofh.write(out)
if __name__ == "__main__":
sys.exit(main())
| 22.571429 | 106 | 0.605063 |
import sys
import csv
def main():
infile = sys.argv[1]
outfile = sys.argv[2]
photofile = "photos.txt"
woes = []
ireader = csv.DictReader(open(infile, 'r'))
for line in ireader:
woes.append(line['woe_id'])
pfh = open(photofile, 'r')
ofh = open(outfile, 'w')
outstr = "%s\t%s\t%s\n"
for row in pfh:
photoid, placeid, lon, lat = row.strip().split()
if placeid in woes:
out = outstr % (placeid, lon, lat)
ofh.write(out)
if __name__ == "__main__":
sys.exit(main())
| true | true |
f72b29d93a56efc5fafb086551352e0cba9256da | 7,352 | py | Python | electrum/plugins/labels/labels.py | hodlwave/electrum | 52f8aafb604d05487a0612f65bacb966c0d0f569 | [
"MIT"
] | 4 | 2020-06-27T22:43:34.000Z | 2021-04-12T02:29:30.000Z | electrum/plugins/labels/labels.py | hodlwave/electrum | 52f8aafb604d05487a0612f65bacb966c0d0f569 | [
"MIT"
] | 21 | 2020-06-20T15:02:50.000Z | 2021-04-07T10:14:59.000Z | electrum/plugins/labels/labels.py | hodlwave/electrum | 52f8aafb604d05487a0612f65bacb966c0d0f569 | [
"MIT"
] | 13 | 2020-06-28T08:13:28.000Z | 2021-12-28T00:11:56.000Z | import asyncio
import hashlib
import json
import sys
import traceback
from typing import Union, TYPE_CHECKING
import base64
from electrum.plugin import BasePlugin, hook
from electrum.crypto import aes_encrypt_with_iv, aes_decrypt_with_iv
from electrum.i18n import _
from electrum.util import log_exceptions, ignore_exceptions, make_aiohttp_session
from electrum.network import Network
if TYPE_CHECKING:
from electrum.wallet import Abstract_Wallet
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('Labels')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}: {reason}" if reason else header
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = aes_encrypt_with_iv(password, iv, msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
# nonce is the nonce to be used with the next change
nonce = wallet.db.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.logger.info(f"set {wallet.basename()} nonce to {nonce}")
wallet.db.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if wallet not in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
asyncio.run_coroutine_threadsafe(self.do_post_safe("/label", bundle), wallet.network.asyncio_loop)
# Caller will write the wallet
self.set_nonce(wallet, nonce + 1)
@ignore_exceptions
@log_exceptions
async def do_post_safe(self, *args):
await self.do_post(*args)
async def do_get(self, url = "/labels"):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.get(url) as result:
return await result.json()
async def do_post(self, url = "/labels", data=None):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.post(url, json=data) as result:
try:
return await result.json()
except Exception as e:
raise Exception('Could not decode: ' + await result.text()) from e
async def push_thread(self, wallet):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.logger.info(f'cannot encode {repr(key)} {repr(value)}')
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
await self.do_post("/labels", bundle)
async def pull_thread(self, wallet, force):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.logger.info(f"asking for labels since nonce {nonce}")
try:
response = await self.do_get("/labels/since/%d/for/%s" % (nonce, wallet_id))
except Exception as e:
raise ErrorConnectingServer(e) from e
if response["labels"] is None:
self.logger.info('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f'error: no json {key}')
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.logger.info(f"received {len(response)} labels")
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
def on_pulled(self, wallet: 'Abstract_Wallet') -> None:
raise NotImplementedError()
@ignore_exceptions
@log_exceptions
async def pull_safe_thread(self, wallet, force):
try:
await self.pull_thread(wallet, force)
except ErrorConnectingServer as e:
self.logger.info(repr(e))
def pull(self, wallet, force):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.pull_thread(wallet, force), wallet.network.asyncio_loop).result()
def push(self, wallet):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.push_thread(wallet), wallet.network.asyncio_loop).result()
def start_wallet(self, wallet):
if not wallet.network: return # 'offline' mode
nonce = self.get_nonce(wallet)
self.logger.info(f"wallet {wallet.basename()} nonce is {nonce}")
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
# If there is an auth token we can try to actually start syncing
asyncio.run_coroutine_threadsafe(self.pull_safe_thread(wallet, False), wallet.network.asyncio_loop)
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
| 37.896907 | 118 | 0.616159 | import asyncio
import hashlib
import json
import sys
import traceback
from typing import Union, TYPE_CHECKING
import base64
from electrum.plugin import BasePlugin, hook
from electrum.crypto import aes_encrypt_with_iv, aes_decrypt_with_iv
from electrum.i18n import _
from electrum.util import log_exceptions, ignore_exceptions, make_aiohttp_session
from electrum.network import Network
if TYPE_CHECKING:
from electrum.wallet import Abstract_Wallet
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('Labels')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}: {reason}" if reason else header
class LabelsPlugin(BasePlugin):
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.target_host = 'labels.electrum.org'
self.wallets = {}
def encode(self, wallet, msg):
password, iv, wallet_id = self.wallets[wallet]
encrypted = aes_encrypt_with_iv(password, iv, msg.encode('utf8'))
return base64.b64encode(encrypted).decode()
def decode(self, wallet, message):
password, iv, wallet_id = self.wallets[wallet]
decoded = base64.b64decode(message)
decrypted = aes_decrypt_with_iv(password, iv, decoded)
return decrypted.decode('utf8')
def get_nonce(self, wallet):
nonce = wallet.db.get('wallet_nonce')
if nonce is None:
nonce = 1
self.set_nonce(wallet, nonce)
return nonce
def set_nonce(self, wallet, nonce):
self.logger.info(f"set {wallet.basename()} nonce to {nonce}")
wallet.db.put("wallet_nonce", nonce)
@hook
def set_label(self, wallet, item, label):
if wallet not in self.wallets:
return
if not item:
return
nonce = self.get_nonce(wallet)
wallet_id = self.wallets[wallet][2]
bundle = {"walletId": wallet_id,
"walletNonce": nonce,
"externalId": self.encode(wallet, item),
"encryptedLabel": self.encode(wallet, label)}
asyncio.run_coroutine_threadsafe(self.do_post_safe("/label", bundle), wallet.network.asyncio_loop)
self.set_nonce(wallet, nonce + 1)
@ignore_exceptions
@log_exceptions
async def do_post_safe(self, *args):
await self.do_post(*args)
async def do_get(self, url = "/labels"):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.get(url) as result:
return await result.json()
async def do_post(self, url = "/labels", data=None):
url = 'https://' + self.target_host + url
network = Network.get_instance()
proxy = network.proxy if network else None
async with make_aiohttp_session(proxy) as session:
async with session.post(url, json=data) as result:
try:
return await result.json()
except Exception as e:
raise Exception('Could not decode: ' + await result.text()) from e
async def push_thread(self, wallet):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
bundle = {"labels": [],
"walletId": wallet_id,
"walletNonce": self.get_nonce(wallet)}
for key, value in wallet.labels.items():
try:
encoded_key = self.encode(wallet, key)
encoded_value = self.encode(wallet, value)
except:
self.logger.info(f'cannot encode {repr(key)} {repr(value)}')
continue
bundle["labels"].append({'encryptedLabel': encoded_value,
'externalId': encoded_key})
await self.do_post("/labels", bundle)
async def pull_thread(self, wallet, force):
wallet_data = self.wallets.get(wallet, None)
if not wallet_data:
raise Exception('Wallet {} not loaded'.format(wallet))
wallet_id = wallet_data[2]
nonce = 1 if force else self.get_nonce(wallet) - 1
self.logger.info(f"asking for labels since nonce {nonce}")
try:
response = await self.do_get("/labels/since/%d/for/%s" % (nonce, wallet_id))
except Exception as e:
raise ErrorConnectingServer(e) from e
if response["labels"] is None:
self.logger.info('no new labels')
return
result = {}
for label in response["labels"]:
try:
key = self.decode(wallet, label["externalId"])
value = self.decode(wallet, label["encryptedLabel"])
except:
continue
try:
json.dumps(key)
json.dumps(value)
except:
self.logger.info(f'error: no json {key}')
continue
result[key] = value
for key, value in result.items():
if force or not wallet.labels.get(key):
wallet.labels[key] = value
self.logger.info(f"received {len(response)} labels")
self.set_nonce(wallet, response["nonce"] + 1)
self.on_pulled(wallet)
def on_pulled(self, wallet: 'Abstract_Wallet') -> None:
raise NotImplementedError()
@ignore_exceptions
@log_exceptions
async def pull_safe_thread(self, wallet, force):
try:
await self.pull_thread(wallet, force)
except ErrorConnectingServer as e:
self.logger.info(repr(e))
def pull(self, wallet, force):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.pull_thread(wallet, force), wallet.network.asyncio_loop).result()
def push(self, wallet):
if not wallet.network: raise Exception(_('You are offline.'))
return asyncio.run_coroutine_threadsafe(self.push_thread(wallet), wallet.network.asyncio_loop).result()
def start_wallet(self, wallet):
if not wallet.network: return
nonce = self.get_nonce(wallet)
self.logger.info(f"wallet {wallet.basename()} nonce is {nonce}")
mpk = wallet.get_fingerprint()
if not mpk:
return
mpk = mpk.encode('ascii')
password = hashlib.sha1(mpk).hexdigest()[:32].encode('ascii')
iv = hashlib.sha256(password).digest()[:16]
wallet_id = hashlib.sha256(mpk).hexdigest()
self.wallets[wallet] = (password, iv, wallet_id)
asyncio.run_coroutine_threadsafe(self.pull_safe_thread(wallet, False), wallet.network.asyncio_loop)
def stop_wallet(self, wallet):
self.wallets.pop(wallet, None)
| true | true |
f72b2ad2a58898693037001dda7e833ae44efbc4 | 682 | py | Python | pyntcloud/structures/kdtree.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 1,142 | 2016-10-10T08:55:30.000Z | 2022-03-30T04:46:16.000Z | pyntcloud/structures/kdtree.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 195 | 2016-10-10T08:30:37.000Z | 2022-02-17T12:51:17.000Z | pyntcloud/structures/kdtree.py | bernssolg/pyntcloud-master | 84cf000b7a7f69a2c1b36f9624f05f65160bf992 | [
"MIT"
] | 215 | 2017-02-28T00:50:29.000Z | 2022-03-22T17:01:31.000Z | from scipy.spatial import cKDTree
from .base import Structure
class KDTree(cKDTree, Structure):
def __init__(self, *, points, leafsize=16, compact_nodes=False, balanced_tree=False):
Structure.__init__(self, points=points)
self._leafsize = leafsize
self._compact_nodes = compact_nodes
self._balanced_tree = balanced_tree
def compute(self):
self.id = "K({},{},{})".format(self._leafsize, self._compact_nodes, self._balanced_tree)
cKDTree.__init__(
self,
self._points,
leafsize=self._leafsize,
compact_nodes=self._compact_nodes,
balanced_tree=self._balanced_tree)
| 31 | 96 | 0.66129 | from scipy.spatial import cKDTree
from .base import Structure
class KDTree(cKDTree, Structure):
def __init__(self, *, points, leafsize=16, compact_nodes=False, balanced_tree=False):
Structure.__init__(self, points=points)
self._leafsize = leafsize
self._compact_nodes = compact_nodes
self._balanced_tree = balanced_tree
def compute(self):
self.id = "K({},{},{})".format(self._leafsize, self._compact_nodes, self._balanced_tree)
cKDTree.__init__(
self,
self._points,
leafsize=self._leafsize,
compact_nodes=self._compact_nodes,
balanced_tree=self._balanced_tree)
| true | true |
f72b2b40cbc83a0f7d47d5e52998f5659b19648e | 1,216 | py | Python | facemask.py | bhargavyagnik/FaceMaskDetection | 990c41a921a2a8a7760492a8dd21e4ab51391e51 | [
"MIT"
] | null | null | null | facemask.py | bhargavyagnik/FaceMaskDetection | 990c41a921a2a8a7760492a8dd21e4ab51391e51 | [
"MIT"
] | null | null | null | facemask.py | bhargavyagnik/FaceMaskDetection | 990c41a921a2a8a7760492a8dd21e4ab51391e51 | [
"MIT"
] | null | null | null | import tensorflow as tf
import cv2
import numpy as np
model = tf.keras.models.load_model('saved_model/model_3.h5')
face_clsfr = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
source = cv2.VideoCapture(1)
labels_dict = {0: 'with_mask', 1: 'without_mask'}
color_dict = {0: (0, 255, 0), 1: (0, 0, 255)}
while (True):
ret, img = source.read()
faces = face_clsfr.detectMultiScale(img)
print(img.shape)
for x, y, w, h in faces:
face_img = img[y:y + w, x:x + w]
resized = cv2.resize(face_img, (128, 128))
normalized = resized / 255.0
reshaped = np.reshape(normalized, (1, 128, 128, 3))
result = model.predict(reshaped)
print(result)
label=int(result.round().flatten())
cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)
cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)
cv2.putText(
img, labels_dict[label],
(x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.imshow('LIVE', img)
key = cv2.waitKey(1)
if (key == 27):
break
cv2.destroyAllWindows()
source.release() | 30.4 | 75 | 0.578947 | import tensorflow as tf
import cv2
import numpy as np
model = tf.keras.models.load_model('saved_model/model_3.h5')
face_clsfr = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
source = cv2.VideoCapture(1)
labels_dict = {0: 'with_mask', 1: 'without_mask'}
color_dict = {0: (0, 255, 0), 1: (0, 0, 255)}
while (True):
ret, img = source.read()
faces = face_clsfr.detectMultiScale(img)
print(img.shape)
for x, y, w, h in faces:
face_img = img[y:y + w, x:x + w]
resized = cv2.resize(face_img, (128, 128))
normalized = resized / 255.0
reshaped = np.reshape(normalized, (1, 128, 128, 3))
result = model.predict(reshaped)
print(result)
label=int(result.round().flatten())
cv2.rectangle(img, (x, y), (x + w, y + h), color_dict[label], 2)
cv2.rectangle(img, (x, y - 40), (x + w, y), color_dict[label], -1)
cv2.putText(
img, labels_dict[label],
(x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255), 2)
cv2.imshow('LIVE', img)
key = cv2.waitKey(1)
if (key == 27):
break
cv2.destroyAllWindows()
source.release() | true | true |
f72b2cd039ad9416819b474d149c3f6fbea635ff | 20,451 | py | Python | archive/canvas_test_6.py | bperez7/moments_models | d83e67b5d85f611ebf8dc10bc0d7569c962a37c2 | [
"BSD-2-Clause"
] | null | null | null | archive/canvas_test_6.py | bperez7/moments_models | d83e67b5d85f611ebf8dc10bc0d7569c962a37c2 | [
"BSD-2-Clause"
] | null | null | null | archive/canvas_test_6.py | bperez7/moments_models | d83e67b5d85f611ebf8dc10bc0d7569c962a37c2 | [
"BSD-2-Clause"
] | null | null | null | import cv2
import os
import time
import subprocess
#from matplotlib import pyplot as plt
import numpy as np
#from test_video import get_predictions_results
#cam_capture = cv2.VideoCapture(0)
#cv2.destroyAllWindows()
""" TODO:
1. Start video at specified time
2. Right click to indicate trimming points
3. Output file name
"""
frame_time = 10
frame_count = 0
global_trim_time = None
crop_started = False
class VideoCropTool:
def __init__(self, video_path, output_file, output_folder, video_start_time,
capture, output_label, time_window_on = False,time_window=3):
"""
Args:
video_path:
output_file:
output_folder:
video_start_time:
capture:
output_label:
time_window_on:
time_window:
"""
self.video_path = video_path
self.output_file = output_file
self.output_folder = output_folder
self.output_label=output_label
self.video_start_time = video_start_time
self.cap = capture
# self.video_start_frame = video_start_frame
#for clikc box
#self.start = (0,0)
self.box_started = False
self.box_created = False
self.box_finished = False
self.start = None
self.end = None
#for cropping time
self.global_trim_time = None
self.global_trim_time_secs = None
self.crop_started = False
self.start_trim_time = None
self.end_trim_time = None
self.start_trim_time_secs = None
self.end_trim_time_secs = None
self.time_window = time_window
self.time_crop_secs = 0
self.recording = False
#result
self.result_text = ""
#frame properties
self.frame_width = 0
self.frame_height = 0
def click_box(self,event, x,y, flags, param):
"""
Detects and processes left and right clicks of the mouse on the opencv frame
Args:
event:
x:
y:
flags:
param:
Returns: None
"""
#Start drawing the box if the left button is clicked
if event == cv2.EVENT_LBUTTONDOWN:
self.start = (x, y)
self.box_started = True
#Drag the box if the mouse is moving
elif event == cv2.EVENT_MOUSEMOVE:
self.end = (x, y)
#Finalize the box if the left button is raised
elif event == cv2.EVENT_LBUTTONUP:
# global box_created
self.final_end = (x, y)
self.box_created = True
elif event == cv2.EVENT_RBUTTONDOWN:
# cropping time starts
# global crop_started
if self.crop_started != True:
self.crop_started = True
self.start_trim_time = self.global_trim_time
self.start_trim_time_secs = self.global_trim_time_secs
self.recording = True
else:
self.crop_started = False
self.trim_end_time = self.global_trim_time
#self.box_created = True
self.box_finished = True
self.end_trim_time = self.global_trim_time
self.end_trim_time_secs = self.global_trim_time_secs
self.time_crop_secs = self.end_trim_time_secs-self.start_trim_time_secs
print('crop time')
print(self.time_crop_secs)
self.recording = False
def crop_and_label(self):
"""
- Plays back the selected video in an opencv frame and allows for cropping/time selection
- Sorts the cropped video into a folder named after the given label
Returns: None
"""
while (self.cap.isOpened()):
# Capture frame-by-frame
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
# get vcap property (height and width)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float `width`
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float `height`
# global frame_count
# frame_count += 1
# r = cv2.selectROI("Image", frame, fromCenter, showCrosshair)
if ret == True:
if self.box_started:
rectangle_thickness=30
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness,color=333)
else:
cv2.rectangle(frame, self.start, self.end,thickness=rectangle_thickness, color=333)
# except:
# cv2.rectangle(frame, self.start, self.end, color=333)
# Display the resulting frame
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
self.global_trim_time_secs = current_time_in_secs
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0): # single digit
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0): # single digit
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0): # single digit
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0): # single digit
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
# if (self.time_window ): # single digit
if (self.time_crop_secs<10):
#TIME_WINDOW_STR = "0" + str(self.time_window)
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
# global global_trim_time
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
# cut_time = "00:00:"+TIME_WINDOW_STR
text = str(round(current_time, 2))
# try:
# result_text = get_predictions_results()
# except:
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
#Red dot while cropping
if self.recording:
# Radius of circle
radius = 20
# Center coordinates
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
# Red color in BGR
circle_color = (0, 0, 255)
# Line thickness of -1 px
circle_thickness = -1
# Using cv2.circle() method
# Draw a circle of red color of thickness -1 px
image = cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_label + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs) + " "
end_arg = "-e " + TIME_WINDOW_STR
# print("beginning and end ")
# print(beginning_arg)
# print(end_arg)
crop_time_start = time.time()
if not os.path.exists(self.output_folder+"/"+self.output_label):
os.makedirs(self.output_folder+"/"+self.output_label)
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
# video_model_command = "python test_video.py --draw_crop_test.mp4 --arch resnet3d50"
# reset
self.box_created = False
self.box_started = False
self.box_finished = False
with open("custom_labels.txt", "a+") as text_file:
# all_labels = text_file.read()
label_exists = False
# print('all labels')
# print(all_labels)
for line in text_file:
if line==self.output_label:
label_exists=True
break
if not label_exists:
text_file.write("\n")
text_file.write(self.output_label)
print(self.output_label)
# Press Q on keyboard to exit
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
# Break the loop
else:
break
self.cap.release()
cv2.destroyAllWindows()
def crop_and_predict(self):
"""
- Plays back the selected video in an opencv frame and allows for cropping/time selection
- Runs the moments in time model and gives the top 5 predictions for the selected segment in the terminal
Returns: None
"""
while (self.cap.isOpened()):
# Capture frame-by-frame
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
# get vcap property (height and width)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float `width`
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float `height`
# global frame_count
# frame_count += 1
# r = cv2.selectROI("Image", frame, fromCenter, showCrosshair)
if ret == True:
if self.box_started:
# print('boxes')
# print(self.start)
# print(self.end)
rectangle_thickness = 10
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness, color=333)
else:
cv2.rectangle(frame, self.start, self.end, thickness=rectangle_thickness, color=333)
# except:
# cv2.rectangle(frame, self.start, self.end, color=333)
# Display the resulting frame
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
self.global_trim_time_secs = current_time_in_secs
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0): # single digit
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0): # single digit
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0): # single digit
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0): # single digit
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
#if (self.time_window // 10 == 0 and self.time_window!=10): # single digit
if (self.time_crop_secs < 10):
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
# global global_trim_time
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
# cut_time = "00:00:"+TIME_WINDOW_STR
text = str(round(current_time, 2))
# try:
# result_text = get_predictions_results()
# print(result_text)
# except:
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
# Red dot while cropping
if self.recording:
#print('recording')
# Radius of circle
radius = 20
# Center coordinates
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
# Red color in BGR
circle_color = (0, 0, 255)
# Line thickness of -1 px
circle_thickness = -1
# Using cv2.circle() method
# Draw a circle of red color of thickness -1 px
cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs)+ " "
end_arg = "-e " + TIME_WINDOW_STR
#
print("beginning and end ")
print(beginning_arg)
print(end_arg)
crop_time_start = time.time()
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
# video_model_command = "python test_video.py --draw_crop_test.mp4 --arch resnet3d50"
prediction_time_start = time.time()
os.system("python test_video.py --video_file " + self.output_folder+"/"+self.output_file + ".mp4 " + "--arch resnet3d50")
prediction_time_end = time.time()
prediction_elapsed_time = prediction_time_end - prediction_time_start
print("Prediction Time: " + str(prediction_elapsed_time))
# Opening prediction file
file1 = open('predictions.txt', 'r')
result_text = ""
for line in file1:
print(line)
result_text += line
break # just first prediction
# result_text += "\n"
# reset
self.box_created = False
self.box_started = False
self.box_finished = False
# Press Q on keyboard to exit
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
# Break the loop
else:
break
self.cap.release()
cv2.destroyAllWindows()
def main():
TIME_WINDOW = 3 # seconds
#video_file_path = 'videos/whats_app_vid_1.mp4'
video_file_path = 'videos/IMG_4884.MOV'
output_file = "demo_clip"
output_folder = "trimmed_videos"
output_label = "tossing"
result_text = ""
video_start_time = 0 # in secs
fps = 30
video_start_frame = video_start_time*fps
cap = cv2.VideoCapture(video_file_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, video_start_frame)
my_crop_tool = VideoCropTool(video_file_path, output_file, output_folder, 0, cap, output_label)
my_crop_tool.crop_and_predict()
#my_crop_tool.crop_and_label()
if __name__=="__main__":
main()
| 33.691928 | 156 | 0.509413 | import cv2
import os
import time
import subprocess
import numpy as np
frame_time = 10
frame_count = 0
global_trim_time = None
crop_started = False
class VideoCropTool:
def __init__(self, video_path, output_file, output_folder, video_start_time,
capture, output_label, time_window_on = False,time_window=3):
self.video_path = video_path
self.output_file = output_file
self.output_folder = output_folder
self.output_label=output_label
self.video_start_time = video_start_time
self.cap = capture
self.box_started = False
self.box_created = False
self.box_finished = False
self.start = None
self.end = None
self.global_trim_time = None
self.global_trim_time_secs = None
self.crop_started = False
self.start_trim_time = None
self.end_trim_time = None
self.start_trim_time_secs = None
self.end_trim_time_secs = None
self.time_window = time_window
self.time_crop_secs = 0
self.recording = False
self.result_text = ""
self.frame_width = 0
self.frame_height = 0
def click_box(self,event, x,y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.start = (x, y)
self.box_started = True
elif event == cv2.EVENT_MOUSEMOVE:
self.end = (x, y)
elif event == cv2.EVENT_LBUTTONUP:
self.final_end = (x, y)
self.box_created = True
elif event == cv2.EVENT_RBUTTONDOWN:
if self.crop_started != True:
self.crop_started = True
self.start_trim_time = self.global_trim_time
self.start_trim_time_secs = self.global_trim_time_secs
self.recording = True
else:
self.crop_started = False
self.trim_end_time = self.global_trim_time
self.box_finished = True
self.end_trim_time = self.global_trim_time
self.end_trim_time_secs = self.global_trim_time_secs
self.time_crop_secs = self.end_trim_time_secs-self.start_trim_time_secs
print('crop time')
print(self.time_crop_secs)
self.recording = False
def crop_and_label(self):
while (self.cap.isOpened()):
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if ret == True:
if self.box_started:
rectangle_thickness=30
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness,color=333)
else:
cv2.rectangle(frame, self.start, self.end,thickness=rectangle_thickness, color=333)
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
self.global_trim_time_secs = current_time_in_secs
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0):
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0):
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0):
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0):
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
if (self.time_crop_secs<10):
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
text = str(round(current_time, 2))
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
if self.recording:
radius = 20
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
circle_color = (0, 0, 255)
circle_thickness = -1
image = cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_label + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs) + " "
end_arg = "-e " + TIME_WINDOW_STR
crop_time_start = time.time()
if not os.path.exists(self.output_folder+"/"+self.output_label):
os.makedirs(self.output_folder+"/"+self.output_label)
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
self.box_created = False
self.box_started = False
self.box_finished = False
with open("custom_labels.txt", "a+") as text_file:
label_exists = False
for line in text_file:
if line==self.output_label:
label_exists=True
break
if not label_exists:
text_file.write("\n")
text_file.write(self.output_label)
print(self.output_label)
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
else:
break
self.cap.release()
cv2.destroyAllWindows()
def crop_and_predict(self):
while (self.cap.isOpened()):
ret, frame = self.cap.read()
cv2.namedWindow("Frame")
cv2.setMouseCallback("Frame", self.click_box)
self.frame_width = self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
self.frame_height = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
if ret == True:
if self.box_started:
rectangle_thickness = 10
if self.box_created:
cv2.rectangle(frame, self.start, self.final_end, thickness=rectangle_thickness, color=333)
else:
cv2.rectangle(frame, self.start, self.end, thickness=rectangle_thickness, color=333)
current_time = self.cap.get(cv2.CAP_PROP_POS_MSEC)
current_time_in_secs = round(current_time / 1000)
current_time_secs = current_time_in_secs % 60
current_time_mins = current_time_in_secs // 60
self.global_trim_time_secs = current_time_in_secs
prev_time_in_secs = current_time_in_secs - self.time_window
prev_time_secs = prev_time_in_secs % 60
prev_time_mins = prev_time_in_secs // 60
if (current_time_mins // 10 == 0):
current_time_mins_str = "0" + str(current_time_mins)
else:
current_time_mins_str = str(current_time_mins)
if (current_time_secs // 10 == 0):
current_time_secs_str = "0" + str(current_time_secs)
else:
current_time_secs_str = str(current_time_secs)
if (prev_time_mins // 10 == 0):
prev_time_mins_str = "0" + str(prev_time_mins)
else:
prev_time_mins_str = str(prev_time_mins)
if (prev_time_secs // 10 == 0):
prev_time_secs_str = "0" + str(prev_time_secs)
else:
prev_time_secs_str = str(prev_time_secs)
if (self.time_crop_secs < 10):
TIME_WINDOW_STR = "00:00:"+"0" + str(self.time_crop_secs)
else:
TIME_WINDOW_STR = "00:00:"+str(self.time_crop_secs)
end_time = "00:" + current_time_mins_str + ":" + current_time_secs_str
self.global_trim_time = end_time
start_time = "00:" + prev_time_mins_str + ":" + prev_time_secs_str
text = str(round(current_time, 2))
org = (50, 50)
result_origin = (50, 200)
color = (255, 0, 0)
thickness = 2
fontScale = 1
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(frame, text, org, font,
fontScale, color, thickness, cv2.LINE_AA)
cv2.putText(frame, self.result_text, result_origin, font,
fontScale, color, thickness, cv2.LINE_AA)
if self.recording:
radius = 20
circle_center_coordinates = (int(self.frame_width) - radius - 20, 50)
circle_color = (0, 0, 255)
circle_thickness = -1
cv2.circle(frame, circle_center_coordinates, radius, circle_color, circle_thickness)
cv2.imshow('Frame', frame)
if self.box_finished:
left_arg = "-l " + str(self.start[0]) + " "
top_arg = "-t " + str(self.start[1]) + " "
width_arg = "-w " + str(self.final_end[0] - self.start[0]) + " "
height_arg = "-h " + str(self.final_end[1] -self.start[1]) + " "
video_arg = "-f " + self.video_path + " "
output_arg = "-o " + self.output_folder + "/" + self.output_file + " "
beginning_arg = "-b " + str(self.start_trim_time_secs)+ " "
end_arg = "-e " + TIME_WINDOW_STR
print("beginning and end ")
print(beginning_arg)
print(end_arg)
crop_time_start = time.time()
command = "bash " + "crop_tool.sh " + video_arg + left_arg + top_arg + width_arg + height_arg + output_arg + beginning_arg + end_arg
os.chmod("./output_command.sh", 0o755)
with open("output_command.sh", "w") as text_file:
text_file.write('#!/bin/bash')
text_file.write("\n")
text_file.write(command + "\n")
text_file.write('#hello')
os.chmod("./output_command.sh", 0o755)
subprocess.check_call(["./output_command.sh"])
crop_time_end = time.time()
crop_elapsed_time = crop_time_end - crop_time_start
print("Crop Time: " + str(crop_elapsed_time))
prediction_time_start = time.time()
os.system("python test_video.py --video_file " + self.output_folder+"/"+self.output_file + ".mp4 " + "--arch resnet3d50")
prediction_time_end = time.time()
prediction_elapsed_time = prediction_time_end - prediction_time_start
print("Prediction Time: " + str(prediction_elapsed_time))
file1 = open('predictions.txt', 'r')
result_text = ""
for line in file1:
print(line)
result_text += line
break
self.box_created = False
self.box_started = False
self.box_finished = False
if cv2.waitKey(frame_time) & 0xFF == ord('q'):
break
else:
break
self.cap.release()
cv2.destroyAllWindows()
def main():
TIME_WINDOW = 3
video_file_path = 'videos/IMG_4884.MOV'
output_file = "demo_clip"
output_folder = "trimmed_videos"
output_label = "tossing"
result_text = ""
video_start_time = 0
fps = 30
video_start_frame = video_start_time*fps
cap = cv2.VideoCapture(video_file_path)
cap.set(cv2.CAP_PROP_POS_FRAMES, video_start_frame)
my_crop_tool = VideoCropTool(video_file_path, output_file, output_folder, 0, cap, output_label)
my_crop_tool.crop_and_predict()
if __name__=="__main__":
main()
| true | true |
f72b2cffb7796783443939305fa1035e7ad944b2 | 13,043 | py | Python | cltk/tests/test_nlp/test_tag.py | mcnorton05/cltk | 80dbbd6ee378ed4a6dd1723e4405e314b25f1638 | [
"MIT"
] | 1 | 2020-05-01T08:21:22.000Z | 2020-05-01T08:21:22.000Z | cltk/tests/test_nlp/test_tag.py | ecomp-shONgit/cltk | 7bc3ffd1bbbfa5d036297395d7e51b99b25b81ea | [
"MIT"
] | null | null | null | cltk/tests/test_nlp/test_tag.py | ecomp-shONgit/cltk | 7bc3ffd1bbbfa5d036297395d7e51b99b25b81ea | [
"MIT"
] | null | null | null | """Test cltk.tag."""
import os
import shutil
import unittest
from cltk.corpus.utils.importer import CorpusImporter
from cltk.stem.latin.j_v import JVReplacer
from cltk.tag import ner
from cltk.tag.ner import NamedEntityReplacer
from cltk.tag.pos import POSTag
__license__ = 'MIT License. See LICENSE.'
class TestSequenceFunctions(unittest.TestCase): # pylint: disable=R0904
"""Class for unittest"""
def setUp(self):
"""Clone Greek models in order to test pull function and other model
tests later.
"""
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/greek/model/greek_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('latin')
corpus_importer.import_corpus('latin_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/latin/model/latin_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('french')
corpus_importer.import_corpus('french_data_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/french/text/french_data_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter("old_norse")
corpus_importer.import_corpus("old_norse_models_cltk")
file_rel = os.path.join(get_cltk_data_dir() + '/old_norse/model/old_norse_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('middle_low_german')
corpus_importer.import_corpus('middle_low_german_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/middle_low_german/model/middle_low_german_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('old_english')
corpus_importer.import_corpus('old_english_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/old_english/model/old_english_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
def test_pos_unigram_greek(self):
"""Test tagging Greek POS with unigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_unigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_bigram_greek(self):
"""Test tagging Greek POS with bigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_bigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_trigram_greek(self):
"""Test tagging Greek POS with trigram tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_trigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_ngram123_tagger_greek(self):
"""Test tagging Greek POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_ngram_123_backoff('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_tnt_tagger_greek(self):
"""Test tagging Greek POS with TnT tagger."""
tagger = POSTag('greek')
tagged = tagger.tag_tnt('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_unigram_latin(self):
"""Test tagging Latin POS with unigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_unigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_bigram_latin(self):
"""Test tagging Latin POS with bigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_bigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_trigram_latin(self):
"""Test tagging Latin POS with trigram tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_trigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_latin(self):
"""Test tagging Latin POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_ngram_123_backoff('Gallia est omnis divisa in partes tres') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_tnt_tagger_latin(self):
"""Test tagging Latin POS with TnT tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_tnt('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_crf_tagger_latin(self):
"""Test tagging Latin POS with CRF tagger."""
tagger = POSTag('latin')
tagged = tagger.tag_crf('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
ner._check_latest_data('latin')
names_path = os.path.normpath(get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_check_latest_latin(self):
"""Test _check_latest_data()"""
path = get_cltk_data_dir() + '/latin/model/latin_models_cltk'
#p = get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt'
names_dir = os.path.expanduser(path)
shutil.rmtree(names_dir, ignore_errors=True)
ner._check_latest_data('latin')
names_path = os.path.join(names_dir, 'ner', 'proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_tag_ner_str_list_latin(self):
"""Test make_ner(), str, list."""
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
tokens = ner.tag_ner('latin', input_text=text_str_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity'), (',',), ('ut',), ('aliae',), ('quae',), ('primae',), ('dicuntur',), ('esse',), ('mangitudinis',), ('.',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_latin(self):
"""Test make_ner(), list, list."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
tokens = ner.tag_ner('latin', input_text=text_list_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), ('Sirius', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_latin(self):
"""Test make_ner(), list, str."""
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
text = ner.tag_ner('latin', input_text=text_list_iu, output_type=str)
target = ' ut Uenus/Entity Sirius/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_latin(self):
"""Test make_ner(), str, str."""
jv_replacer = JVReplacer()
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
text = ner.tag_ner('latin', input_text=text_str_iu, output_type=str)
target = ' ut Uenus/Entity, ut Sirius/Entity, ut Spica/Entity, ut aliae quae primae dicuntur esse mangitudinis.'
self.assertEqual(text, target)
def test_tag_ner_str_list_greek(self):
"""Test make_ner(), str, list."""
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
tokens = ner.tag_ner('greek', input_text=text_str, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity'), ('Κάππαρος', 'Entity'), ('Πρωτογενείας', 'Entity'), ('Διονυσιάδες', 'Entity'), ('τὴν',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_greek(self):
"""Test make_ner(), list, list."""
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
tokens = ner.tag_ner('greek', input_text=text_list, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_greek(self):
"""Test make_ner(), list, str."""
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
text = ner.tag_ner('greek', input_text=text_list, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_greek(self):
"""Test make_ner(), str, str."""
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
text = ner.tag_ner('greek', input_text=text_str, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity Κάππαρος/Entity Πρωτογενείας/Entity Διονυσιάδες/Entity τὴν'
self.assertEqual(text, target)
def test_tag_ner_str_list_french(self):
"""Test make_ner(), str, list."""
text_str = """Berte fu mere Charlemaine, qui pukis tint France et tot le Maine."""
ner_replacer = NamedEntityReplacer()
tokens = ner_replacer.tag_ner_fr(input_text=text_str, output_type=list)
target = [[('Berte', 'entity', 'CHI')], ('fu',), ('mere',), [('Charlemaine', 'entity', 'CHI')], (',',), ('qui',), ('pukis',),
('tint',), [('France', 'entity', 'LOC')], ('et',), ('tot',), ('le',), [('Maine', 'entity', 'LOC')], ('.',)]
self.assertEqual(tokens, target)
def test_pos_tnt_tagger_old_norse(self):
"""Test tagging Old Norse POS with TnT tagger."""
tagger = POSTag('old_norse')
tagged = tagger.tag_tnt('Hlióðs bið ek allar.')
print(tagged)
self.assertTrue(tagged)
def test_pos_ngram12_tagger_middle_low_german(self):
""" Test MOG POS 12-backoff tagger"""
tagger = POSTag('middle_low_german')
tagged = tagger.tag_ngram_12_backoff('Jck Johannes preister verwarer vnde voirs tender des Juncfrouwen kloisters to Mariendale')
self.assertTrue(tagged)
def test_pos_unigram_old_english(self):
"""Test tagging Old English POS with unigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_unigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_bigram_old_english(self):
"""Test tagging Old English POS with bigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_bigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_trigram_old_english(self):
"""Test tagging old_english POS with trigram tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_trigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_old_english(self):
"""Test tagging Old English POS with a 1-, 2-, and 3-gram backoff tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_ngram_123_backoff('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.') # pylint: disable=line-too-long
self.assertTrue(tagged)
def test_pos_crf_tagger_old_english(self):
"""Test tagging Old English POS with CRF tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_crf('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_perceptron_tagger_old_english(self):
"""Test tagging Old English POS with Perceptron tagger."""
tagger = POSTag('old_english')
tagged = tagger.tag_perceptron('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
if __name__ == '__main__':
unittest.main()
| 47.952206 | 230 | 0.662501 |
import os
import shutil
import unittest
from cltk.corpus.utils.importer import CorpusImporter
from cltk.stem.latin.j_v import JVReplacer
from cltk.tag import ner
from cltk.tag.ner import NamedEntityReplacer
from cltk.tag.pos import POSTag
__license__ = 'MIT License. See LICENSE.'
class TestSequenceFunctions(unittest.TestCase):
def setUp(self):
corpus_importer = CorpusImporter('greek')
corpus_importer.import_corpus('greek_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/greek/model/greek_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('latin')
corpus_importer.import_corpus('latin_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/latin/model/latin_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('french')
corpus_importer.import_corpus('french_data_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/french/text/french_data_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter("old_norse")
corpus_importer.import_corpus("old_norse_models_cltk")
file_rel = os.path.join(get_cltk_data_dir() + '/old_norse/model/old_norse_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('middle_low_german')
corpus_importer.import_corpus('middle_low_german_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/middle_low_german/model/middle_low_german_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
corpus_importer = CorpusImporter('old_english')
corpus_importer.import_corpus('old_english_models_cltk')
file_rel = os.path.join(get_cltk_data_dir() + '/old_english/model/old_english_models_cltk/README.md')
file = os.path.expanduser(file_rel)
file_exists = os.path.isfile(file)
self.assertTrue(file_exists)
def test_pos_unigram_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_unigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_bigram_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_bigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_trigram_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_trigram('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_ngram_123_backoff('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_tnt_tagger_greek(self):
tagger = POSTag('greek')
tagged = tagger.tag_tnt('θεοὺς μὲν αἰτῶ τῶνδ᾽ ἀπαλλαγὴν πόνων φρουρᾶς ἐτείας μῆκος')
self.assertTrue(tagged)
def test_pos_unigram_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_unigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_bigram_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_bigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_trigram_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_trigram('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_ngram_123_backoff('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_tnt_tagger_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_tnt('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_pos_crf_tagger_latin(self):
tagger = POSTag('latin')
tagged = tagger.tag_crf('Gallia est omnis divisa in partes tres')
self.assertTrue(tagged)
def test_check_latest_latin(self):
ner._check_latest_data('latin')
names_path = os.path.normpath(get_cltk_data_dir() + '/latin/model/latin_models_cltk/ner/proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_check_latest_latin(self):
path = get_cltk_data_dir() + '/latin/model/latin_models_cltk'
names_dir = os.path.expanduser(path)
shutil.rmtree(names_dir, ignore_errors=True)
ner._check_latest_data('latin')
names_path = os.path.join(names_dir, 'ner', 'proper_names.txt')
self.assertTrue(os.path.isfile(names_path))
def test_tag_ner_str_list_latin(self):
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
tokens = ner.tag_ner('latin', input_text=text_str_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), (',',), ('ut',), ('Sirius', 'Entity'), (',',), ('ut',), ('Spica', 'Entity'), (',',), ('ut',), ('aliae',), ('quae',), ('primae',), ('dicuntur',), ('esse',), ('mangitudinis',), ('.',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_latin(self):
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
tokens = ner.tag_ner('latin', input_text=text_list_iu, output_type=list)
target = [('ut',), ('Uenus', 'Entity'), ('Sirius', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_latin(self):
text_list = ['ut', 'Venus', 'Sirius']
jv_replacer = JVReplacer()
text_list_iu = [jv_replacer.replace(x) for x in text_list]
text = ner.tag_ner('latin', input_text=text_list_iu, output_type=str)
target = ' ut Uenus/Entity Sirius/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_latin(self):
jv_replacer = JVReplacer()
text_str = """ut Venus, ut Sirius, ut Spica, ut aliae quae primae dicuntur esse mangitudinis."""
jv_replacer = JVReplacer()
text_str_iu = jv_replacer.replace(text_str)
text = ner.tag_ner('latin', input_text=text_str_iu, output_type=str)
target = ' ut Uenus/Entity, ut Sirius/Entity, ut Spica/Entity, ut aliae quae primae dicuntur esse mangitudinis.'
self.assertEqual(text, target)
def test_tag_ner_str_list_greek(self):
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
tokens = ner.tag_ner('greek', input_text=text_str, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity'), ('Κάππαρος', 'Entity'), ('Πρωτογενείας', 'Entity'), ('Διονυσιάδες', 'Entity'), ('τὴν',)]
self.assertEqual(tokens, target)
def test_tag_ner_list_list_greek(self):
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
tokens = ner.tag_ner('greek', input_text=text_list, output_type=list)
target = [('τὰ',), ('Σίλαριν', 'Entity'), ('Σιννᾶν', 'Entity')]
self.assertEqual(tokens, target)
def test_tag_ner_list_str_greek(self):
text_list = ['τὰ', 'Σίλαριν', 'Σιννᾶν']
text = ner.tag_ner('greek', input_text=text_list, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity'
self.assertEqual(text, target)
def test_tag_ner_str_str_greek(self):
text_str = 'τὰ Σίλαριν Σιννᾶν Κάππαρος Πρωτογενείας Διονυσιάδες τὴν'
text = ner.tag_ner('greek', input_text=text_str, output_type=str)
target = ' τὰ Σίλαριν/Entity Σιννᾶν/Entity Κάππαρος/Entity Πρωτογενείας/Entity Διονυσιάδες/Entity τὴν'
self.assertEqual(text, target)
def test_tag_ner_str_list_french(self):
text_str = """Berte fu mere Charlemaine, qui pukis tint France et tot le Maine."""
ner_replacer = NamedEntityReplacer()
tokens = ner_replacer.tag_ner_fr(input_text=text_str, output_type=list)
target = [[('Berte', 'entity', 'CHI')], ('fu',), ('mere',), [('Charlemaine', 'entity', 'CHI')], (',',), ('qui',), ('pukis',),
('tint',), [('France', 'entity', 'LOC')], ('et',), ('tot',), ('le',), [('Maine', 'entity', 'LOC')], ('.',)]
self.assertEqual(tokens, target)
def test_pos_tnt_tagger_old_norse(self):
tagger = POSTag('old_norse')
tagged = tagger.tag_tnt('Hlióðs bið ek allar.')
print(tagged)
self.assertTrue(tagged)
def test_pos_ngram12_tagger_middle_low_german(self):
tagger = POSTag('middle_low_german')
tagged = tagger.tag_ngram_12_backoff('Jck Johannes preister verwarer vnde voirs tender des Juncfrouwen kloisters to Mariendale')
self.assertTrue(tagged)
def test_pos_unigram_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_unigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_bigram_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_bigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_trigram_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_trigram('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_ngram123_tagger_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_ngram_123_backoff('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_crf_tagger_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_crf('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
def test_pos_perceptron_tagger_old_english(self):
tagger = POSTag('old_english')
tagged = tagger.tag_perceptron('Hwæt! We Gardena in geardagum, þeodcyninga, þrym gefrunon, hu ða æþelingas ellen fremedon.')
self.assertTrue(tagged)
if __name__ == '__main__':
unittest.main()
| true | true |
f72b2f24626e265d01ae282b3f14a253aa950b3b | 307 | py | Python | src/dataleach/__init__.py | janies/dataleach | cf8c8784f3fe44cf8f89b7174ba36cb6c56d49d7 | [
"BSD-3-Clause"
] | 1 | 2021-11-08T13:57:52.000Z | 2021-11-08T13:57:52.000Z | src/dataleach/tests/dataleach/sources/__init__.py | janies/dataleach | cf8c8784f3fe44cf8f89b7174ba36cb6c56d49d7 | [
"BSD-3-Clause"
] | null | null | null | src/dataleach/tests/dataleach/sources/__init__.py | janies/dataleach | cf8c8784f3fe44cf8f89b7174ba36cb6c56d49d7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Copyright © 2010, RedJack, LLC.
# All rights reserved.
#
# Please see the LICENSE.txt file in this distribution for license
# details.
# ----------------------------------------------------------------------
| 34.111111 | 72 | 0.361564 | true | true | |
f72b2fafee0e530b65dccaf38409dffa74760181 | 3,545 | py | Python | hddcoin/timelord/timelord_launcher.py | JakubSido/hddcoin-blockchain | 7b9da03edee3512295c0f142c07c4759512ccbca | [
"Apache-2.0"
] | null | null | null | hddcoin/timelord/timelord_launcher.py | JakubSido/hddcoin-blockchain | 7b9da03edee3512295c0f142c07c4759512ccbca | [
"Apache-2.0"
] | null | null | null | hddcoin/timelord/timelord_launcher.py | JakubSido/hddcoin-blockchain | 7b9da03edee3512295c0f142c07c4759512ccbca | [
"Apache-2.0"
] | null | null | null | import asyncio
import logging
import pathlib
import signal
import socket
import time
from typing import Dict, List
import pkg_resources
from hddcoin.util.hddcoin_logging import initialize_logging
from hddcoin.util.config import load_config
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
from hddcoin.util.setproctitle import setproctitle
active_processes: List = []
stopped = False
lock = asyncio.Lock()
log = logging.getLogger(__name__)
async def kill_processes():
global stopped
global active_processes
async with lock:
stopped = True
for process in active_processes:
try:
process.kill()
except ProcessLookupError:
pass
def find_vdf_client() -> pathlib.Path:
p = pathlib.Path(pkg_resources.get_distribution("chiavdf").location) / "vdf_client"
if p.is_file():
return p
raise FileNotFoundError("can't find vdf_client binary")
async def spawn_process(host: str, port: int, counter: int):
global stopped
global active_processes
path_to_vdf_client = find_vdf_client()
first_10_seconds = True
start_time = time.time()
while not stopped:
try:
dirname = path_to_vdf_client.parent
basename = path_to_vdf_client.name
resolved = socket.gethostbyname(host)
proc = await asyncio.create_subprocess_shell(
f"{basename} {resolved} {port} {counter}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env={"PATH": dirname},
)
except Exception as e:
log.warning(f"Exception while spawning process {counter}: {(e)}")
continue
async with lock:
active_processes.append(proc)
stdout, stderr = await proc.communicate()
if stdout:
log.info(f"VDF client {counter}: {stdout.decode().rstrip()}")
if stderr:
if first_10_seconds:
if time.time() - start_time > 10:
first_10_seconds = False
else:
log.error(f"VDF client {counter}: {stderr.decode().rstrip()}")
log.info(f"Process number {counter} ended.")
async with lock:
if proc in active_processes:
active_processes.remove(proc)
await asyncio.sleep(0.1)
async def spawn_all_processes(config: Dict, net_config: Dict):
await asyncio.sleep(5)
hostname = net_config["self_hostname"] if "host" not in config else config["host"]
port = config["port"]
process_count = config["process_count"]
awaitables = [spawn_process(hostname, port, i) for i in range(process_count)]
await asyncio.gather(*awaitables)
def main():
root_path = DEFAULT_ROOT_PATH
setproctitle("hddcoin_timelord_launcher")
net_config = load_config(root_path, "config.yaml")
config = net_config["timelord_launcher"]
initialize_logging("TLauncher", config["logging"], root_path)
def signal_received():
asyncio.create_task(kill_processes())
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGINT, signal_received)
loop.add_signal_handler(signal.SIGTERM, signal_received)
except NotImplementedError:
log.info("signal handlers unsupported")
try:
loop.run_until_complete(spawn_all_processes(config, net_config))
finally:
log.info("Launcher fully closed.")
loop.close()
if __name__ == "__main__":
main()
| 30.560345 | 87 | 0.655289 | import asyncio
import logging
import pathlib
import signal
import socket
import time
from typing import Dict, List
import pkg_resources
from hddcoin.util.hddcoin_logging import initialize_logging
from hddcoin.util.config import load_config
from hddcoin.util.default_root import DEFAULT_ROOT_PATH
from hddcoin.util.setproctitle import setproctitle
active_processes: List = []
stopped = False
lock = asyncio.Lock()
log = logging.getLogger(__name__)
async def kill_processes():
global stopped
global active_processes
async with lock:
stopped = True
for process in active_processes:
try:
process.kill()
except ProcessLookupError:
pass
def find_vdf_client() -> pathlib.Path:
p = pathlib.Path(pkg_resources.get_distribution("chiavdf").location) / "vdf_client"
if p.is_file():
return p
raise FileNotFoundError("can't find vdf_client binary")
async def spawn_process(host: str, port: int, counter: int):
global stopped
global active_processes
path_to_vdf_client = find_vdf_client()
first_10_seconds = True
start_time = time.time()
while not stopped:
try:
dirname = path_to_vdf_client.parent
basename = path_to_vdf_client.name
resolved = socket.gethostbyname(host)
proc = await asyncio.create_subprocess_shell(
f"{basename} {resolved} {port} {counter}",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
env={"PATH": dirname},
)
except Exception as e:
log.warning(f"Exception while spawning process {counter}: {(e)}")
continue
async with lock:
active_processes.append(proc)
stdout, stderr = await proc.communicate()
if stdout:
log.info(f"VDF client {counter}: {stdout.decode().rstrip()}")
if stderr:
if first_10_seconds:
if time.time() - start_time > 10:
first_10_seconds = False
else:
log.error(f"VDF client {counter}: {stderr.decode().rstrip()}")
log.info(f"Process number {counter} ended.")
async with lock:
if proc in active_processes:
active_processes.remove(proc)
await asyncio.sleep(0.1)
async def spawn_all_processes(config: Dict, net_config: Dict):
await asyncio.sleep(5)
hostname = net_config["self_hostname"] if "host" not in config else config["host"]
port = config["port"]
process_count = config["process_count"]
awaitables = [spawn_process(hostname, port, i) for i in range(process_count)]
await asyncio.gather(*awaitables)
def main():
root_path = DEFAULT_ROOT_PATH
setproctitle("hddcoin_timelord_launcher")
net_config = load_config(root_path, "config.yaml")
config = net_config["timelord_launcher"]
initialize_logging("TLauncher", config["logging"], root_path)
def signal_received():
asyncio.create_task(kill_processes())
loop = asyncio.get_event_loop()
try:
loop.add_signal_handler(signal.SIGINT, signal_received)
loop.add_signal_handler(signal.SIGTERM, signal_received)
except NotImplementedError:
log.info("signal handlers unsupported")
try:
loop.run_until_complete(spawn_all_processes(config, net_config))
finally:
log.info("Launcher fully closed.")
loop.close()
if __name__ == "__main__":
main()
| true | true |
f72b3050bfccbe4c42d8488a0a707b9ddf77dbd2 | 485 | py | Python | scripts/venv/Scripts/easy_install-3.7-script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | null | null | null | scripts/venv/Scripts/easy_install-3.7-script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | 3 | 2021-12-10T01:22:05.000Z | 2021-12-14T21:33:16.000Z | scripts/venv/Scripts/easy_install-3.7-script.py | michaelfaerber/Agnos | b4b6ff9cdca9090fb426f1fc2cead8e5ef4ad9bf | [
"MIT"
] | null | null | null | #!K:\2018_SS\BMW_Thesis\workspace_bmw\Thesis_KG_Agnostic_EL\scripts\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install-3.7'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| 37.307692 | 91 | 0.709278 |
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install-3.7')()
)
| true | true |
f72b30581d8ef30df8d3b88fde755c65a6390087 | 15,737 | py | Python | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | dssm/data_input.py | nlpming/tensorflow-DSMM | dc982cc49bf03f474da2895e4dd4fb37061c0271 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding=utf-8
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
# 配置文件
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
"""
:param words_list: [[w11, w12, ...], [w21, w22, ...], ...]
:return:
"""
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
# 将多标签意图转为 one_hot
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
# 截断
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
# 插入 [CLS], [SEP]
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
"""Converts `text` to Unicode (if it's not already), assuming utf-8 input."""
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
# only 4 negative sample
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, pos sample, 4 neg sample]], shape = [n, 6]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
"""
gen datasets, convert word into word ids.
:param file_path:
:return: [[query, prefix, label]], shape = [n, 3]
"""
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
# t2_len = len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
# out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label, t1, t2])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
"""
使用LCQMC数据集,并将其转为word_id
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
# return test_set, test_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
"""
最大长度
"""
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
"""
使用LCQMC数据集,并将每个query其转为word_id,
"""
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
# test_set = test_set[:100]
# return test_set, test_set, test_set
def get_test(file_:str, vocab:Vocabulary):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
t1_ids = vocab._transform_seq2id(t1, padding=1)
t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)
t2_ids = vocab._transform_seq2id(t2, padding=1)
t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len])
return out_arr, test_arr
def get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_, '\t') # [[q1, q2],...]
out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)
return out_arr, test_arr
def get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):
# test_arr # [[q1, q2],...]
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])
return out_arr, test_arr
def get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_) # [q1,...]
out_arr = []
for line in test_arr:
t1 = line # [t1_ids, t1_len, t2_ids, t2_len, label]
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
return out_arr, test_arr
def get_batch(dataset, batch_size=None, is_test=0):
# tf Dataset太难用,不如自己实现
# https://stackoverflow.com/questions/50539342/getting-batches-in-tensorflow
# dataset:每个元素是一个特征,[[x1, x2, x3,...], ...], 如果是测试集,可能就没有标签
if not batch_size:
batch_size = 32
if not is_test:
random.shuffle(dataset)
steps = int(math.ceil(float(len(dataset)) / batch_size))
for i in range(steps):
idx = i * batch_size
cur_set = dataset[idx: idx + batch_size]
cur_set = zip(*cur_set)
yield cur_set
if __name__ == '__main__':
# prefix, query_prediction, title, tag, label
# query_prediction 为json格式。
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
# data_train = get_data(file_train)
# data_train = get_data(file_vali)
# print(len(data_train['query']), len(data_train['doc_pos']), len(data_train['doc_neg']))
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
| 37.20331 | 129 | 0.599797 |
from inspect import getblock
import json
import os
from os import read
from numpy.core.fromnumeric import mean
import numpy as np
import paddlehub as hub
import six
import math
import random
import sys
from util import read_file
from config import Config
conf = Config()
class Vocabulary(object):
def __init__(self, meta_file, max_len, allow_unk=0, unk="$UNK$", pad="$PAD$",):
self.voc2id = {}
self.id2voc = {}
self.unk = unk
self.pad = pad
self.max_len = max_len
self.allow_unk = allow_unk
with open(meta_file, encoding='utf-8') as f:
for i, line in enumerate(f):
line = convert_to_unicode(line.strip("\n"))
self.voc2id[line] = i
self.id2voc[i] = line
self.size = len(self.voc2id)
self.oov_num = self.size + 1
def fit(self, words_list):
word_lst = []
word_lst_append = word_lst.append
for words in words_list:
if not isinstance(words, list):
print(words)
continue
for word in words:
word = convert_to_unicode(word)
word_lst_append(word)
word_counts = Counter(word_lst)
if self.max_num_word < 0:
self.max_num_word = len(word_counts)
sorted_voc = [w for w, c in word_counts.most_common(self.max_num_word)]
self.max_num_word = len(sorted_voc)
self.oov_index = self.max_num_word + 1
self.voc2id = dict(zip(sorted_voc, range(1, self.max_num_word + 1)))
return self
def _transform2id(self, word):
word = convert_to_unicode(word)
if word in self.voc2id:
return self.voc2id[word]
elif self.allow_unk:
return self.voc2id[self.unk]
else:
print(word)
raise ValueError("word:{} Not in voc2id, please check".format(word))
def _transform_seq2id(self, words, padding=0):
out_ids = []
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
for w in words:
out_ids.append(self._transform2id(w))
if padding and self.max_len:
while len(out_ids) < self.max_len:
out_ids.append(0)
return out_ids
def _transform_intent2ont_hot(self, words, padding=0):
out_ids = np.zeros(self.size, dtype=np.float32)
words = convert_to_unicode(words)
for w in words:
out_ids[self._transform2id(w)] = 1.0
return out_ids
def _transform_seq2bert_id(self, words, padding=0):
out_ids, seq_len = [], 0
words = convert_to_unicode(words)
if self.max_len:
words = words[:self.max_len]
seq_len = len(words)
out_ids.append(self._transform2id("[CLS]"))
for w in words:
out_ids.append(self._transform2id(w))
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids = [0 for _ in out_ids]
return out_ids, mask_ids, seg_ids, seq_len
@staticmethod
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _transform_2seq2bert_id(self, seq1, seq2, padding=0):
out_ids, seg_ids, seq_len = [], [1], 0
seq1 = [x for x in convert_to_unicode(seq1)]
seq2 = [x for x in convert_to_unicode(seq2)]
self._truncate_seq_pair(seq1, seq2, self.max_len - 2)
out_ids.append(self._transform2id("[CLS]"))
for w in seq1:
out_ids.append(self._transform2id(w))
seg_ids.append(0)
out_ids.append(self._transform2id("[SEP]"))
seg_ids.append(0)
for w in seq2:
out_ids.append(self._transform2id(w))
seg_ids.append(1)
mask_ids = [1 for _ in out_ids]
if padding and self.max_len:
while len(out_ids) < self.max_len + 1:
out_ids.append(0)
mask_ids.append(0)
seg_ids.append(0)
return out_ids, mask_ids, seg_ids, seq_len
def transform(self, seq_list, is_bert=0):
if is_bert:
return [self._transform_seq2bert_id(seq) for seq in seq_list]
else:
return [self._transform_seq2id(seq) for seq in seq_list]
def __len__(self):
return len(self.voc2id)
def convert_to_unicode(text):
if six.PY3:
if isinstance(text, str):
return text
elif isinstance(text, bytes):
return text.decode("utf-8", "ignore")
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
elif six.PY2:
if isinstance(text, str):
return text.decode("utf-8", "ignore")
elif isinstance(text, unicode):
return text
else:
raise ValueError("Unsupported string type: %s" % (type(text)))
else:
raise ValueError("Not running on Python2 or Python 3?")
def gen_word_set(file_path, out_path='./data/words.txt'):
word_set = set()
with open(file_path, encoding='utf-8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr = [prefix, title]
query_pred = json.loads(query_pred)
for w in prefix:
word_set.add(w)
for each in query_pred:
for w in each:
word_set.add(w)
with open(word_set, 'w', encoding='utf-8') as o:
for w in word_set:
o.write(w + '\n')
pass
def convert_word2id(query, vocab_map):
ids = []
for w in query:
if w in vocab_map:
ids.append(vocab_map[w])
else:
ids.append(vocab_map[conf.unk])
while len(ids) < conf.max_seq_len:
ids.append(vocab_map[conf.pad])
return ids[:conf.max_seq_len]
def convert_seq2bow(query, vocab_map):
bow_ids = np.zeros(conf.nwords)
for w in query:
if w in vocab_map:
bow_ids[vocab_map[w]] += 1
else:
bow_ids[vocab_map[conf.unk]] += 1
return bow_ids
def get_data(file_path):
data_map = {'query': [], 'query_len': [], 'doc_pos': [], 'doc_pos_len': [], 'doc_neg': [], 'doc_neg_len': []}
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, query_pred, title, tag, label = spline
if label == '0':
continue
cur_arr, cur_len = [], []
query_pred = json.loads(query_pred)
for each in query_pred:
if each == title:
continue
cur_arr.append(convert_word2id(each, conf.vocab_map))
each_len = len(each) if len(each) < conf.max_seq_len else conf.max_seq_len
cur_len.append(each_len)
if len(cur_arr) >= 4:
data_map['query'].append(convert_word2id(prefix, conf.vocab_map))
data_map['query_len'].append(len(prefix) if len(prefix) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_pos'].append(convert_word2id(title, conf.vocab_map))
data_map['doc_pos_len'].append(len(title) if len(title) < conf.max_seq_len else conf.max_seq_len)
data_map['doc_neg'].extend(cur_arr[:4])
data_map['doc_neg_len'].extend(cur_len[:4])
pass
return data_map
def get_data_siamese_rnn(file_path):
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_seq = convert_word2id(prefix, conf.vocab_map)
title_seq = convert_word2id(title, conf.vocab_map)
data_arr.append([prefix_seq, title_seq, int(label)])
return data_arr
def get_data_bow(file_path):
data_arr = []
with open(file_path, encoding='utf8') as f:
for line in f.readlines():
spline = line.strip().split('\t')
if len(spline) < 4:
continue
prefix, _, title, tag, label = spline
prefix_ids = convert_seq2bow(prefix, conf.vocab_map)
title_ids = convert_seq2bow(title, conf.vocab_map)
data_arr.append([prefix_ids, title_ids, int(label)])
return data_arr
def trans_lcqmc(dataset):
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
t1_ids = convert_word2id(t1, conf.vocab_map)
t1_len = conf.max_seq_len if len(t1) > conf.max_seq_len else len(t1)
t2_ids = convert_word2id(t2, conf.vocab_map)
t2_len = conf.max_seq_len if len(t2) > conf.max_seq_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc():
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc(dataset.train_examples)
dev_set = trans_lcqmc(dataset.dev_examples)
test_set = trans_lcqmc(dataset.test_examples)
return train_set, dev_set, test_set
def trans_lcqmc_bert(dataset:list, vocab:Vocabulary, is_merge=0):
out_arr, text_len = [], []
for each in dataset:
t1, t2, label = each.text_a, each.text_b, int(each.label)
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, label])
text_len.extend([len(t1) + len(t2)])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2, label])
text_len.extend([len(t1), len(t2)])
pass
print("max len", max(text_len), "avg len", mean(text_len), "cover rate:", np.mean([x <= conf.max_seq_len for x in text_len]))
return out_arr
def get_lcqmc_bert(vocab:Vocabulary, is_merge=0):
dataset = hub.dataset.LCQMC()
train_set = trans_lcqmc_bert(dataset.train_examples, vocab, is_merge)
dev_set = trans_lcqmc_bert(dataset.dev_examples, vocab, is_merge)
test_set = trans_lcqmc_bert(dataset.test_examples, vocab, is_merge)
return train_set, dev_set, test_set
def get_test(file_:str, vocab:Vocabulary):
test_arr = read_file(file_, '\t')
out_arr = []
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line
t1_ids = vocab._transform_seq2id(t1, padding=1)
t1_len = vocab.max_len if len(t1) > vocab.max_len else len(t1)
t2_ids = vocab._transform_seq2id(t2, padding=1)
t2_len = vocab.max_len if len(t2) > vocab.max_len else len(t2)
out_arr.append([t1_ids, t1_len, t2_ids, t2_len])
return out_arr, test_arr
def get_test_bert(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_, '\t')
out_arr, _ = get_test_bert_by_arr(test_arr, vocab, is_merge)
return out_arr, test_arr
def get_test_bert_by_arr(test_arr:list, vocab:Vocabulary, is_merge=0):
]
for line in test_arr:
if len(line) != 2:
print('wrong line size=', len(line))
t1, t2 = line
if is_merge:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_2seq2bert_id(t1, t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
else:
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_ids2, mask_ids2, seg_ids2, seq_len2 = vocab._transform_seq2bert_id(t2, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1, out_ids2, mask_ids2, seg_ids2, seq_len2])
return out_arr, test_arr
def get_test_bert_single(file_:str, vocab:Vocabulary, is_merge=0):
test_arr = read_file(file_)
out_arr = []
for line in test_arr:
t1 = line
out_ids1, mask_ids1, seg_ids1, seq_len1 = vocab._transform_seq2bert_id(t1, padding=1)
out_arr.append([out_ids1, mask_ids1, seg_ids1, seq_len1])
return out_arr, test_arr
def get_batch(dataset, batch_size=None, is_test=0):
if not batch_size:
batch_size = 32
if not is_test:
random.shuffle(dataset)
steps = int(math.ceil(float(len(dataset)) / batch_size))
for i in range(steps):
idx = i * batch_size
cur_set = dataset[idx: idx + batch_size]
cur_set = zip(*cur_set)
yield cur_set
if __name__ == '__main__':
file_train = './data/oppo_round1_train_20180929.txt'
file_vali = './data/oppo_round1_vali_20180929.txt'
dataset = get_lcqmc()
print(dataset[1][:3])
for each in get_batch(dataset[1][:3], batch_size=2):
t1_ids, t1_len, t2_ids, t2_len, label = each
print(each)
pass
| true | true |
f72b319c6f56785827dd2160e2b9d041dde23ada | 5,281 | py | Python | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | experiments/ashvin/icml2020/hand/brac/test_video1.py | Asap7772/railrl_evalsawyer | baba8ce634d32a48c7dfe4dc03b123e18e96e0a3 | [
"MIT"
] | null | null | null | """
AWR + SAC from demo experiment
"""
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=5001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
# num_gaussians=1,
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
brac=True,
reward_transform_kwargs=None, # r' = r + 1
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
save_video=True,
image_env_kwargs=dict(
imsize=84,
init_camera=None, # the environment initializes the camera already
transpose=True,
normalize=True,
recompute_reward=False,
non_presampled_goal_img_is_garbage=True, # do not set_to_goal
),
dump_video_kwargs=dict(
exploration_goal_image_key="image_observation",
evaluation_goal_image_key="image_observation",
image_format="CWH",
),
# renderer_kwargs=dict(
# # width=84,
# # height=84,
# init_camera=None, # the environment initializes the camera already
# # transpose=True,
# create_image_format="HWC",
# output_image_format="CHW",
# # normalize=True,
# ),
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-binary-old-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
'trainer_kwargs.clip_score': [2, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| 30.883041 | 80 | 0.566938 |
from rlkit.demos.source.dict_to_mdp_path_loader import DictToMDPPathLoader
from rlkit.launchers.experiments.awac.awac_rl import experiment, process_args
import rlkit.misc.hyperparameter as hyp
from rlkit.launchers.arglauncher import run_variants
from rlkit.torch.sac.policies import GaussianPolicy
from rlkit.torch.networks import Clamp
if __name__ == "__main__":
variant = dict(
num_epochs=5001,
num_eval_steps_per_epoch=1000,
num_trains_per_train_loop=1000,
num_expl_steps_per_train_loop=1000,
min_num_steps_before_training=1000,
max_path_length=1000,
batch_size=1024,
replay_buffer_size=int(1E6),
layer_size=256,
policy_class=GaussianPolicy,
policy_kwargs=dict(
hidden_sizes=[256, 256, 256, 256],
max_log_std=0,
min_log_std=-6,
std_architecture="values",
),
qf_kwargs=dict(
hidden_sizes=[256, 256, ],
),
algorithm="SAC",
version="normal",
collection_mode='batch',
trainer_kwargs=dict(
discount=0.99,
soft_target_tau=5e-3,
target_update_period=1,
policy_lr=3E-4,
qf_lr=3E-4,
reward_scale=1,
beta=1,
use_automatic_entropy_tuning=False,
alpha=0,
compute_bc=False,
bc_num_pretrain_steps=0,
q_num_pretrain1_steps=0,
q_num_pretrain2_steps=25000,
policy_weight_decay=1e-4,
q_weight_decay=0,
bc_loss_type="mse",
rl_weight=1.0,
use_awr_update=True,
use_reparam_update=False,
reparam_weight=0.0,
awr_weight=0.0,
bc_weight=1.0,
post_bc_pretrain_hyperparams=dict(
bc_weight=0.0,
compute_bc=False,
),
brac=True,
reward_transform_kwargs=None,
terminal_transform_kwargs=None, # t = 0
),
launcher_config=dict(
num_exps_per_instance=1,
region='us-west-2',
),
path_loader_class=DictToMDPPathLoader,
path_loader_kwargs=dict(
obs_key="state_observation",
demo_paths=[
# dict(
# path="demos/icml2020/hand/pen2_sparse.npy",
# obs_dict=True,
# is_demo=True,
# ),
# dict(
# path="demos/icml2020/hand/pen_bc5.npy",
# obs_dict=False,
# is_demo=False,
# train_split=0.9,
# ),
],
),
add_env_demos=True,
add_env_offpolicy_data=True,
save_video=True,
image_env_kwargs=dict(
imsize=84,
init_camera=None, # the environment initializes the camera already
transpose=True,
normalize=True,
recompute_reward=False,
non_presampled_goal_img_is_garbage=True, # do not set_to_goal
),
dump_video_kwargs=dict(
exploration_goal_image_key="image_observation",
evaluation_goal_image_key="image_observation",
image_format="CWH",
),
# renderer_kwargs=dict(
# # width=84,
# # height=84,
# init_camera=None, # the environment initializes the camera already
# # transpose=True,
# create_image_format="HWC",
# output_image_format="CHW",
# # normalize=True,
# ),
# logger_variant=dict(
# tensorboard=True,
# ),
load_demos=True,
pretrain_policy=True,
pretrain_rl=True,
# save_pretrained_algorithm=True,
# snapshot_mode="all",
)
search_space = {
'env': ["relocate-binary-old-v0", ],
'trainer_kwargs.bc_loss_type': ["mle"],
'trainer_kwargs.awr_loss_type': ["mle"],
'seedid': range(3),
'trainer_kwargs.beta': [0.1, ],
'trainer_kwargs.reparam_weight': [0.0, ],
'trainer_kwargs.awr_weight': [1.0],
'trainer_kwargs.bc_weight': [1.0, ],
'policy_kwargs.std_architecture': ["values", ],
'trainer_kwargs.clip_score': [2, ],
# 'trainer_kwargs.compute_bc': [True, ],
'trainer_kwargs.awr_use_mle_for_vf': [True, ],
'trainer_kwargs.awr_sample_actions': [False, ],
'trainer_kwargs.awr_min_q': [True, ],
'trainer_kwargs.q_weight_decay': [0, ],
'trainer_kwargs.reward_transform_kwargs': [None, ],
'trainer_kwargs.terminal_transform_kwargs': [dict(m=0, b=0), ],
'qf_kwargs.output_activation': [Clamp(max=0)],
'trainer_kwargs.train_bc_on_rl_buffer':[True],
# 'policy_kwargs.num_gaussians': [1, ],
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(experiment, variants, process_args)
| true | true |
f72b32a4095f35d7bed6ab5e19378d3c4f4d06be | 1,876 | py | Python | tests/test_airconditioning.py | izumi-system-arai/builelib | ae7c36df1ef7477e9a0356559b2694aabff11bb3 | [
"MIT"
] | 5 | 2020-09-04T13:56:45.000Z | 2022-03-06T05:46:55.000Z | tests/test_airconditioning.py | izumi-system-arai/builelib | ae7c36df1ef7477e9a0356559b2694aabff11bb3 | [
"MIT"
] | 1 | 2021-08-17T07:11:42.000Z | 2021-08-17T07:11:42.000Z | tests/test_airconditioning.py | izumi-system-arai/builelib | ae7c36df1ef7477e9a0356559b2694aabff11bb3 | [
"MIT"
] | 2 | 2021-07-06T09:41:20.000Z | 2021-08-02T08:47:13.000Z | import pandas as pd
import csv
from builelib import airconditioning
import pytest
import json
import xlrd
### テストファイル名 ###
# 辞書型 テスト名とファイル名
testcase_dict = {
"AHU_basic": "./tests/airconditioning/★空調設備テストケース一覧.xlsx",
}
def convert2number(x, default):
'''
空欄にデフォルト値を代入する
'''
if x == "":
x = default
else:
x = float(x)
return x
def read_testcasefile(filename):
'''
テストケースファイルを読み込む関数
'''
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name("Sheet1")
testdata = [sheet.row_values(row) for row in range(sheet.nrows)]
return testdata
#### テストケースファイルの読み込み
test_to_try = [] # テスト用入力ファイルと期待値のリスト
testcase_id = [] # テスト名称のリスト
for case_name in testcase_dict:
# テストファイルの読み込み
testfiledata = read_testcasefile(testcase_dict[case_name])
# ヘッダーの削除
testfiledata.pop(0)
# テストケース(行)に対するループ
for testdata in testfiledata:
filename = "./tests/airconditioning/ACtest_" + testdata[0] + ".json"
# 入力データの作成
with open(filename, 'r', encoding='utf-8') as f:
inputdata = json.load(f)
# 期待値
expectedvalue = (testdata[4])
# テストケースの集約
test_to_try.append( (inputdata, expectedvalue) )
# テストケース名
testcase_id.append(case_name + testdata[0])
# テストの実施
@pytest.mark.parametrize('inputdata, expectedvalue', test_to_try, ids=testcase_id)
def test_calc(inputdata, expectedvalue):
# 検証用
with open("inputdata.json",'w', encoding='utf-8') as fw:
json.dump(inputdata, fw, indent=4, ensure_ascii=False)
# 計算実行
resultJson = airconditioning.calc_energy(inputdata)
diff_Eac = (abs(resultJson["E_airconditioning"] - expectedvalue)) / abs( expectedvalue )
# 比較(0.01%まで)
assert diff_Eac < 0.0001
if __name__ == '__main__':
print('--- test_airconditioning.py ---')
| 21.563218 | 92 | 0.647122 | import pandas as pd
import csv
from builelib import airconditioning
import pytest
import json
import xlrd
": "./tests/airconditioning/★空調設備テストケース一覧.xlsx",
}
def convert2number(x, default):
if x == "":
x = default
else:
x = float(x)
return x
def read_testcasefile(filename):
wb = xlrd.open_workbook(filename)
sheet = wb.sheet_by_name("Sheet1")
testdata = [sheet.row_values(row) for row in range(sheet.nrows)]
return testdata
me in testcase_dict:
testfiledata = read_testcasefile(testcase_dict[case_name])
testfiledata.pop(0)
for testdata in testfiledata:
filename = "./tests/airconditioning/ACtest_" + testdata[0] + ".json"
with open(filename, 'r', encoding='utf-8') as f:
inputdata = json.load(f)
expectedvalue = (testdata[4])
test_to_try.append( (inputdata, expectedvalue) )
testcase_id.append(case_name + testdata[0])
@pytest.mark.parametrize('inputdata, expectedvalue', test_to_try, ids=testcase_id)
def test_calc(inputdata, expectedvalue):
with open("inputdata.json",'w', encoding='utf-8') as fw:
json.dump(inputdata, fw, indent=4, ensure_ascii=False)
resultJson = airconditioning.calc_energy(inputdata)
diff_Eac = (abs(resultJson["E_airconditioning"] - expectedvalue)) / abs( expectedvalue )
assert diff_Eac < 0.0001
if __name__ == '__main__':
print('--- test_airconditioning.py ---')
| true | true |
f72b338ae3488cd29a445fe80006558b89a53eb0 | 3,209 | py | Python | API/moviepiapi/utils.py | theoarmengou/MoviePi | b889ed1609e3db096b86452e3ca608822edcdb1a | [
"MIT"
] | 1 | 2020-01-08T12:09:14.000Z | 2020-01-08T12:09:14.000Z | API/moviepiapi/utils.py | theoarmengou/MoviePi | b889ed1609e3db096b86452e3ca608822edcdb1a | [
"MIT"
] | null | null | null | API/moviepiapi/utils.py | theoarmengou/MoviePi | b889ed1609e3db096b86452e3ca608822edcdb1a | [
"MIT"
] | 1 | 2020-10-30T10:33:19.000Z | 2020-10-30T10:33:19.000Z | ##
# EPITECH PROJECT, 2019
# MoviePi
# File description:
# utils.py
##
import datetime
import jwt
from moviepiapi.dbHelper import dbHelper
from moviepiapi.userHelper import userHelper
ret_packet = {'responseStatus': 0, 'message': "", 'data': any}
Key = 'MoviePiTheoAudreyHicham'
LEN_MAX_USER = 255
db = dbHelper('moviepi_api', 'moviepi_api', 'moviepi', '51.75.141.254')
userH = userHelper(db, LEN_MAX_USER)
def fill_return_packet(iswork, typeoferror, data):
ret_packet['responseStatus'] = iswork
ret_packet['message'] = typeoferror
ret_packet['data'] = data
return ret_packet
def encode_auth_token(user_id):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
Key,
algorithm='HS256'
).decode('utf-8')
except Exception as e:
return e
def check_auth_token(request):
auth_headers = request.headers.get('Authorization', '').split()
if len(auth_headers) != 2:
return None
try:
payload = jwt.decode(auth_headers[1], Key)
return payload['sub']
except jwt.ExpiredSignatureError:
return False
except jwt.InvalidTokenError:
return False
return False
def make_average_weight(list):
result = 0.0
if not list:
return -1
for i in range(len(list)):
result = result + float(list[i])
result = result / len(list)
print(result)
return(result)
def adjust_weight_user(film_id, note, id_user):
weight_list = []
idgenre_list = []
already_genre = []
all_genre_user = []
new_weight = []
result = db.request(
"SELECT fk_genres FROM films_genres WHERE fk_films=%s", str(film_id))
if not result:
return fill_return_packet(0, "Pas de genre trouvés pour ce film", None)
idgenre_list = result[0]['fk_genres'].split(',')
for i in range(len(idgenre_list)):
idgenre_list[i] = int(idgenre_list[i])
result_user = db.request(
"SELECT fk_genres, weight FROM users_genres WHERE fk_users=%s", str(id_user))
if not result_user:
return False
for i in range(len(result_user)):
already_genre.append(int(result_user[i]['fk_genres']))
final_list = list(set(idgenre_list).union(set(already_genre)))
for i in range(len(final_list)):
for y in range(len(result)):
if final_list[i] == result_user[y]['fk_genres']:
new_weight.append(
(int(result_user[y]['weight']) / len(final_list)) * int(note))
else:
new_weight.append(1)
for i in range(len(new_weight)):
print(id_user, final_list[i], new_weight[i])
if final_list[i] in already_genre:
db.request("UPDATE users_genres SET weight = %s WHERE fk_users = %s AND fk_genres = %s",
new_weight[i], id_user, final_list[i])
else:
db.insert("INSERT INTO users_genres (fk_users, fk_genres, weight) VALUES (%s, %s, %s)",
id_user, final_list[i], new_weight[i])
return True
| 30.561905 | 100 | 0.621377 |
import datetime
import jwt
from moviepiapi.dbHelper import dbHelper
from moviepiapi.userHelper import userHelper
ret_packet = {'responseStatus': 0, 'message': "", 'data': any}
Key = 'MoviePiTheoAudreyHicham'
LEN_MAX_USER = 255
db = dbHelper('moviepi_api', 'moviepi_api', 'moviepi', '51.75.141.254')
userH = userHelper(db, LEN_MAX_USER)
def fill_return_packet(iswork, typeoferror, data):
ret_packet['responseStatus'] = iswork
ret_packet['message'] = typeoferror
ret_packet['data'] = data
return ret_packet
def encode_auth_token(user_id):
try:
payload = {
'exp': datetime.datetime.utcnow() + datetime.timedelta(days=1),
'iat': datetime.datetime.utcnow(),
'sub': user_id
}
return jwt.encode(
payload,
Key,
algorithm='HS256'
).decode('utf-8')
except Exception as e:
return e
def check_auth_token(request):
auth_headers = request.headers.get('Authorization', '').split()
if len(auth_headers) != 2:
return None
try:
payload = jwt.decode(auth_headers[1], Key)
return payload['sub']
except jwt.ExpiredSignatureError:
return False
except jwt.InvalidTokenError:
return False
return False
def make_average_weight(list):
result = 0.0
if not list:
return -1
for i in range(len(list)):
result = result + float(list[i])
result = result / len(list)
print(result)
return(result)
def adjust_weight_user(film_id, note, id_user):
weight_list = []
idgenre_list = []
already_genre = []
all_genre_user = []
new_weight = []
result = db.request(
"SELECT fk_genres FROM films_genres WHERE fk_films=%s", str(film_id))
if not result:
return fill_return_packet(0, "Pas de genre trouvés pour ce film", None)
idgenre_list = result[0]['fk_genres'].split(',')
for i in range(len(idgenre_list)):
idgenre_list[i] = int(idgenre_list[i])
result_user = db.request(
"SELECT fk_genres, weight FROM users_genres WHERE fk_users=%s", str(id_user))
if not result_user:
return False
for i in range(len(result_user)):
already_genre.append(int(result_user[i]['fk_genres']))
final_list = list(set(idgenre_list).union(set(already_genre)))
for i in range(len(final_list)):
for y in range(len(result)):
if final_list[i] == result_user[y]['fk_genres']:
new_weight.append(
(int(result_user[y]['weight']) / len(final_list)) * int(note))
else:
new_weight.append(1)
for i in range(len(new_weight)):
print(id_user, final_list[i], new_weight[i])
if final_list[i] in already_genre:
db.request("UPDATE users_genres SET weight = %s WHERE fk_users = %s AND fk_genres = %s",
new_weight[i], id_user, final_list[i])
else:
db.insert("INSERT INTO users_genres (fk_users, fk_genres, weight) VALUES (%s, %s, %s)",
id_user, final_list[i], new_weight[i])
return True
| true | true |
f72b33a87fd87b89f914f36a973b364e5a397d6d | 471 | py | Python | basics/src/simple_action_client.py | jescasany/rosbook | a79258e7fa80eb4f8745850125d6b2e462a62dee | [
"Apache-2.0"
] | null | null | null | basics/src/simple_action_client.py | jescasany/rosbook | a79258e7fa80eb4f8745850125d6b2e462a62dee | [
"Apache-2.0"
] | null | null | null | basics/src/simple_action_client.py | jescasany/rosbook | a79258e7fa80eb4f8745850125d6b2e462a62dee | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import roslib; roslib.load_manifest('basics')
import rospy
import actionlib
from basics.msg import TimerAction, TimerGoal, TimerResult
rospy.init_node('timer_action_client')
client = actionlib.SimpleActionClient('timer', TimerAction)
client.wait_for_server()
goal = TimerGoal()
goal.time_to_wait = rospy.Duration.from_sec(5.0)
client.send_goal(goal)
client.wait_for_result()
print('Time elapsed: %f'%(client.get_result().time_elapsed.to_sec()))
| 27.705882 | 69 | 0.794055 |
import roslib; roslib.load_manifest('basics')
import rospy
import actionlib
from basics.msg import TimerAction, TimerGoal, TimerResult
rospy.init_node('timer_action_client')
client = actionlib.SimpleActionClient('timer', TimerAction)
client.wait_for_server()
goal = TimerGoal()
goal.time_to_wait = rospy.Duration.from_sec(5.0)
client.send_goal(goal)
client.wait_for_result()
print('Time elapsed: %f'%(client.get_result().time_elapsed.to_sec()))
| true | true |
f72b34ac6ea7004cf31e6dccd1805b12ef0d95bf | 2,106 | py | Python | gmprocess/waveform_processing/clipping/clipping_check.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | null | null | null | gmprocess/waveform_processing/clipping/clipping_check.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | null | null | null | gmprocess/waveform_processing/clipping/clipping_check.py | baagaard-usgs/groundmotion-processing | 6be2b4460d598bba0935135efa85af2655578565 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from obspy.geodetics.base import gps2dist_azimuth
from gmprocess.waveform_processing.clipping.clipping_ann import clipNet
from gmprocess.waveform_processing.clipping.max_amp import Max_Amp
from gmprocess.waveform_processing.clipping.histogram import Histogram
from gmprocess.waveform_processing.clipping.ping import Ping
M_TO_KM = 1.0 / 1000
def check_clipping(st, origin, threshold=0.2):
"""Apply clicking check.
Lower thresholds will pass fewer streams but will give less false negatives
(i.e., streams in which clipping actually occurred but were missed).
Args:
st (StationStream):
Trace of data.
origin (ScalarEvent):
ScalarEvent object.
threshold (float):
Threshold probability.
Returns:
StationStream checked for clipping.
"""
# Don't bother with test for strong motion instruments
chan_code = st.get_id().split(".")[2]
if chan_code[1] == "N":
return st
# Don't bother with test if it has already failed
if not st.passed:
return st
event_mag = origin.magnitude
event_lon = origin.longitude
event_lat = origin.latitude
dist = (
gps2dist_azimuth(
lat1=event_lat,
lon1=event_lon,
lat2=st[0].stats["coordinates"]["latitude"],
lon2=st[0].stats["coordinates"]["longitude"],
)[0]
* M_TO_KM
)
# Clip mag/dist to range of training dataset
event_mag = np.clip(event_mag, 4.0, 8.8)
dist = np.clip(dist, 0.0, 445.0)
clip_nnet = clipNet()
max_amp_method = Max_Amp(st, max_amp_thresh=6e6)
hist_method = Histogram(st)
ping_method = Ping(st)
inputs = [
event_mag,
dist,
max_amp_method.is_clipped,
hist_method.is_clipped,
ping_method.is_clipped,
]
prob_clip = clip_nnet.evaluate(inputs)[0][0]
if prob_clip >= threshold:
for tr in st:
tr.fail(f"Failed clipping check: prob_clip = {prob_clip:.2f}.")
return st
| 26.658228 | 79 | 0.646724 |
import numpy as np
from obspy.geodetics.base import gps2dist_azimuth
from gmprocess.waveform_processing.clipping.clipping_ann import clipNet
from gmprocess.waveform_processing.clipping.max_amp import Max_Amp
from gmprocess.waveform_processing.clipping.histogram import Histogram
from gmprocess.waveform_processing.clipping.ping import Ping
M_TO_KM = 1.0 / 1000
def check_clipping(st, origin, threshold=0.2):
chan_code = st.get_id().split(".")[2]
if chan_code[1] == "N":
return st
# Don't bother with test if it has already failed
if not st.passed:
return st
event_mag = origin.magnitude
event_lon = origin.longitude
event_lat = origin.latitude
dist = (
gps2dist_azimuth(
lat1=event_lat,
lon1=event_lon,
lat2=st[0].stats["coordinates"]["latitude"],
lon2=st[0].stats["coordinates"]["longitude"],
)[0]
* M_TO_KM
)
event_mag = np.clip(event_mag, 4.0, 8.8)
dist = np.clip(dist, 0.0, 445.0)
clip_nnet = clipNet()
max_amp_method = Max_Amp(st, max_amp_thresh=6e6)
hist_method = Histogram(st)
ping_method = Ping(st)
inputs = [
event_mag,
dist,
max_amp_method.is_clipped,
hist_method.is_clipped,
ping_method.is_clipped,
]
prob_clip = clip_nnet.evaluate(inputs)[0][0]
if prob_clip >= threshold:
for tr in st:
tr.fail(f"Failed clipping check: prob_clip = {prob_clip:.2f}.")
return st
| true | true |
f72b35700339e44cd46bed837a41ec9eb436c1cc | 12,143 | py | Python | Lab0/Example3/top_block.py | RadiumScriptTang/Wireless-communication-systems-Lab | 37afc4e3cc9fa8759b22ec2737b747d2628e01df | [
"MIT"
] | 47 | 2019-08-01T12:24:20.000Z | 2022-03-22T14:21:54.000Z | Lab0/Example3/top_block.py | aboulogeorgos/Wireless-communication-systems-Lab | 37afc4e3cc9fa8759b22ec2737b747d2628e01df | [
"MIT"
] | null | null | null | Lab0/Example3/top_block.py | aboulogeorgos/Wireless-communication-systems-Lab | 37afc4e3cc9fa8759b22ec2737b747d2628e01df | [
"MIT"
] | 13 | 2020-03-04T20:20:10.000Z | 2022-02-23T14:22:02.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
##################################################
# GNU Radio Python Flow Graph
# Title: Combination of two signal sources
# Author: Alexandros-Apostolos A. Boulogeorgos
# Generated: Tue Nov 5 13:35:41 2019
##################################################
if __name__ == '__main__':
import ctypes
import sys
if sys.platform.startswith('linux'):
try:
x11 = ctypes.cdll.LoadLibrary('libX11.so')
x11.XInitThreads()
except:
print "Warning: failed to XInitThreads()"
from PyQt4 import Qt
from PyQt4.QtCore import QObject, pyqtSlot
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio import qtgui
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import sip
import sys
class top_block(gr.top_block, Qt.QWidget):
def __init__(self):
gr.top_block.__init__(self, "Combination of two signal sources")
Qt.QWidget.__init__(self)
self.setWindowTitle("Combination of two signal sources")
try:
self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
except:
pass
self.top_scroll_layout = Qt.QVBoxLayout()
self.setLayout(self.top_scroll_layout)
self.top_scroll = Qt.QScrollArea()
self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
self.top_scroll_layout.addWidget(self.top_scroll)
self.top_scroll.setWidgetResizable(True)
self.top_widget = Qt.QWidget()
self.top_scroll.setWidget(self.top_widget)
self.top_layout = Qt.QVBoxLayout(self.top_widget)
self.top_grid_layout = Qt.QGridLayout()
self.top_layout.addLayout(self.top_grid_layout)
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.restoreGeometry(self.settings.value("geometry").toByteArray())
##################################################
# Variables
##################################################
self.waveform2 = waveform2 = 102
self.waveform1 = waveform1 = 102
self.samp_rate = samp_rate = 48000
##################################################
# Blocks
##################################################
self._waveform2_options = (101, 102, 103, 104, 105, )
self._waveform2_labels = ('Sine', 'Cosine', 'Rectangular', 'Triangular', 'Saw tooth', )
self._waveform2_tool_bar = Qt.QToolBar(self)
self._waveform2_tool_bar.addWidget(Qt.QLabel('Waveform of signal source 2'+": "))
self._waveform2_combo_box = Qt.QComboBox()
self._waveform2_tool_bar.addWidget(self._waveform2_combo_box)
for label in self._waveform2_labels: self._waveform2_combo_box.addItem(label)
self._waveform2_callback = lambda i: Qt.QMetaObject.invokeMethod(self._waveform2_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._waveform2_options.index(i)))
self._waveform2_callback(self.waveform2)
self._waveform2_combo_box.currentIndexChanged.connect(
lambda i: self.set_waveform2(self._waveform2_options[i]))
self.top_grid_layout.addWidget(self._waveform2_tool_bar, 0,1,1,1)
self._waveform1_options = (101, 102, 103, 104, 105, )
self._waveform1_labels = ('Sine', 'Cosine', 'Rectangular', 'Triangular', 'Saw tooth', )
self._waveform1_tool_bar = Qt.QToolBar(self)
self._waveform1_tool_bar.addWidget(Qt.QLabel('Waveform of signal source 1'+": "))
self._waveform1_combo_box = Qt.QComboBox()
self._waveform1_tool_bar.addWidget(self._waveform1_combo_box)
for label in self._waveform1_labels: self._waveform1_combo_box.addItem(label)
self._waveform1_callback = lambda i: Qt.QMetaObject.invokeMethod(self._waveform1_combo_box, "setCurrentIndex", Qt.Q_ARG("int", self._waveform1_options.index(i)))
self._waveform1_callback(self.waveform1)
self._waveform1_combo_box.currentIndexChanged.connect(
lambda i: self.set_waveform1(self._waveform1_options[i]))
self.top_grid_layout.addWidget(self._waveform1_tool_bar, 0,0,1,1)
self.qtgui_time_sink_x_0_0 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0_0.set_update_time(0.10)
self.qtgui_time_sink_x_0_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0_0.enable_autoscale(False)
self.qtgui_time_sink_x_0_0.enable_grid(False)
self.qtgui_time_sink_x_0_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2*1):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_0_win, 1,1,1,1)
self.qtgui_time_sink_x_0 = qtgui.time_sink_c(
1024, #size
samp_rate, #samp_rate
"", #name
1 #number of inputs
)
self.qtgui_time_sink_x_0.set_update_time(0.10)
self.qtgui_time_sink_x_0.set_y_axis(-1, 1)
self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")
self.qtgui_time_sink_x_0.enable_tags(-1, True)
self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, qtgui.TRIG_SLOPE_POS, 0.0, 0, 0, "")
self.qtgui_time_sink_x_0.enable_autoscale(False)
self.qtgui_time_sink_x_0.enable_grid(False)
self.qtgui_time_sink_x_0.enable_axis_labels(True)
self.qtgui_time_sink_x_0.enable_control_panel(False)
if not True:
self.qtgui_time_sink_x_0.disable_legend()
labels = ['', '', '', '', '',
'', '', '', '', '']
widths = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
colors = ["blue", "red", "green", "black", "cyan",
"magenta", "yellow", "dark red", "dark green", "blue"]
styles = [1, 1, 1, 1, 1,
1, 1, 1, 1, 1]
markers = [-1, -1, -1, -1, -1,
-1, -1, -1, -1, -1]
alphas = [1.0, 1.0, 1.0, 1.0, 1.0,
1.0, 1.0, 1.0, 1.0, 1.0]
for i in xrange(2*1):
if len(labels[i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_win, 1,0,1,1)
self.qtgui_sink_x_0 = qtgui.sink_c(
1024, #fftsize
firdes.WIN_BLACKMAN_hARRIS, #wintype
0, #fc
samp_rate, #bw
"", #name
True, #plotfreq
True, #plotwaterfall
True, #plottime
True, #plotconst
)
self.qtgui_sink_x_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_win = sip.wrapinstance(self.qtgui_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_sink_x_0_win)
self.qtgui_sink_x_0.enable_rf_freq(False)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_add_xx_0 = blocks.add_vcc(1)
self.analog_sig_source_x_0_0 = analog.sig_source_c(samp_rate, waveform2, 800, 0.500, 0)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, waveform1, 1000, 0.500, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_sig_source_x_0, 0), (self.blocks_add_xx_0, 0))
self.connect((self.analog_sig_source_x_0, 0), (self.qtgui_time_sink_x_0, 0))
self.connect((self.analog_sig_source_x_0_0, 0), (self.blocks_add_xx_0, 1))
self.connect((self.analog_sig_source_x_0_0, 0), (self.qtgui_time_sink_x_0_0, 0))
self.connect((self.blocks_add_xx_0, 0), (self.blocks_throttle_0, 0))
self.connect((self.blocks_throttle_0, 0), (self.qtgui_sink_x_0, 0))
def closeEvent(self, event):
self.settings = Qt.QSettings("GNU Radio", "top_block")
self.settings.setValue("geometry", self.saveGeometry())
event.accept()
def get_waveform2(self):
return self.waveform2
def set_waveform2(self, waveform2):
self.waveform2 = waveform2
self._waveform2_callback(self.waveform2)
self.analog_sig_source_x_0_0.set_waveform(self.waveform2)
def get_waveform1(self):
return self.waveform1
def set_waveform1(self, waveform1):
self.waveform1 = waveform1
self._waveform1_callback(self.waveform1)
self.analog_sig_source_x_0.set_waveform(self.waveform1)
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.qtgui_time_sink_x_0_0.set_samp_rate(self.samp_rate)
self.qtgui_time_sink_x_0.set_samp_rate(self.samp_rate)
self.qtgui_sink_x_0.set_frequency_range(0, self.samp_rate)
self.blocks_throttle_0.set_sample_rate(self.samp_rate)
self.analog_sig_source_x_0_0.set_sampling_freq(self.samp_rate)
self.analog_sig_source_x_0.set_sampling_freq(self.samp_rate)
def main(top_block_cls=top_block, options=None):
from distutils.version import StrictVersion
if StrictVersion(Qt.qVersion()) >= StrictVersion("4.5.0"):
style = gr.prefs().get_string('qtgui', 'style', 'raster')
Qt.QApplication.setGraphicsSystem(style)
qapp = Qt.QApplication(sys.argv)
tb = top_block_cls()
tb.start()
tb.show()
def quitting():
tb.stop()
tb.wait()
qapp.connect(qapp, Qt.SIGNAL("aboutToQuit()"), quitting)
qapp.exec_()
if __name__ == '__main__':
main()
| 43.060284 | 169 | 0.612781 |
i]) == 0:
if(i % 2 == 0):
self.qtgui_time_sink_x_0.set_line_label(i, "Re{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, "Im{{Data {0}}}".format(i/2))
else:
self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])
self._qtgui_time_sink_x_0_win = sip.wrapinstance(self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_grid_layout.addWidget(self._qtgui_time_sink_x_0_win, 1,0,1,1)
self.qtgui_sink_x_0 = qtgui.sink_c(
1024,
firdes.WIN_BLACKMAN_hARRIS,
0,
samp_rate,
"",
True,
True,
True,
True,
)
self.qtgui_sink_x_0.set_update_time(1.0/10)
self._qtgui_sink_x_0_win = sip.wrapinstance(self.qtgui_sink_x_0.pyqwidget(), Qt.QWidget)
self.top_layout.addWidget(self._qtgui_sink_x_0_win)
self.qtgui_sink_x_0.enable_rf_freq(False)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, samp_rate,True)
self.blocks_add_xx_0 = blocks.add_vcc(1)
self.analog_sig_source_x_0_0 = analog.sig_source_c(samp_rate, waveform2, 800, 0.500, 0)
self.analog_sig_source_x_0 = analog.sig_source_c(samp_rate, waveform1, 1000, 0.500, 0)
| false | true |
f72b36d5fb0cf98eeb2d459b179cfde55b038f13 | 2,311 | py | Python | model.py | bhardwajRahul/RestaurantAPI | 28d7fcd3fbe0524750321102625d8475515f54ed | [
"MIT"
] | 15 | 2018-06-03T16:35:16.000Z | 2022-02-13T16:36:37.000Z | model.py | bhardwajRahul/RestaurantAPI | 28d7fcd3fbe0524750321102625d8475515f54ed | [
"MIT"
] | 2 | 2019-02-11T07:03:09.000Z | 2021-02-25T09:16:15.000Z | model.py | navi25/RestaurantAPI | 28d7fcd3fbe0524750321102625d8475515f54ed | [
"MIT"
] | 9 | 2019-02-08T11:17:34.000Z | 2022-01-29T00:27:14.000Z | from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_redis import FlaskRedis
ma = Marshmallow()
db = SQLAlchemy()
redis_cache = FlaskRedis()
class FoodModel(db.Model):
__tablename__ = 'foods'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
description = db.Column(db.String(250))
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel', backref=db.backref('foods', lazy='dynamic' ))
menu_id = db.Column(db.Integer, db.ForeignKey('menus.id', ondelete='CASCADE'), nullable=False)
menu = db.relationship('MenuModel')
def __init__(self, name, description, restaurant_id, menu_id):
self.name = name
self.description = description
self.restaurant_id = restaurant_id
self.menu_id = menu_id
class MenuModel(db.Model):
__tablename__ = 'menus'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel')
def __init__(self, name, restaurant_id):
self.name = name
self.restaurant_id = restaurant_id
class RestaurantModel(db.Model):
__tablename__ = 'restaurants'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(150), unique=True, nullable=False)
def __init__(self, name):
self.name = name
class RestaurantSchema(ma.Schema):
id = fields.Integer()
name = fields.String(required=True)
class MenuSchema(ma.Schema):
id = fields.Integer()
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True)
class FoodSchema(ma.Schema):
id = fields.Integer(dump_only=True)
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True, validate=validate.Length(1))
description = fields.String()
creation_date = fields.DateTime()
| 35.553846 | 110 | 0.719169 | from flask import Flask
from marshmallow import Schema, fields, pre_load, validate
from flask_marshmallow import Marshmallow
from flask_sqlalchemy import SQLAlchemy
from flask_redis import FlaskRedis
ma = Marshmallow()
db = SQLAlchemy()
redis_cache = FlaskRedis()
class FoodModel(db.Model):
__tablename__ = 'foods'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
description = db.Column(db.String(250))
creation_date = db.Column(db.TIMESTAMP, server_default=db.func.current_timestamp(), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel', backref=db.backref('foods', lazy='dynamic' ))
menu_id = db.Column(db.Integer, db.ForeignKey('menus.id', ondelete='CASCADE'), nullable=False)
menu = db.relationship('MenuModel')
def __init__(self, name, description, restaurant_id, menu_id):
self.name = name
self.description = description
self.restaurant_id = restaurant_id
self.menu_id = menu_id
class MenuModel(db.Model):
__tablename__ = 'menus'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(250), nullable=False)
restaurant_id = db.Column(db.Integer, db.ForeignKey('restaurants.id', ondelete='CASCADE'), nullable=False)
restaurant = db.relationship('RestaurantModel')
def __init__(self, name, restaurant_id):
self.name = name
self.restaurant_id = restaurant_id
class RestaurantModel(db.Model):
__tablename__ = 'restaurants'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(150), unique=True, nullable=False)
def __init__(self, name):
self.name = name
class RestaurantSchema(ma.Schema):
id = fields.Integer()
name = fields.String(required=True)
class MenuSchema(ma.Schema):
id = fields.Integer()
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True)
class FoodSchema(ma.Schema):
id = fields.Integer(dump_only=True)
restaurant_id = fields.Integer(required=True)
name = fields.String(required=True, validate=validate.Length(1))
description = fields.String()
creation_date = fields.DateTime()
| true | true |
f72b36f1c01c85d1f6f16819bc764c32780c7fb6 | 22,006 | py | Python | sdk/python/pulumi_azure_native/databoxedge/v20200501preview/share.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20200501preview/share.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/databoxedge/v20200501preview/share.py | polivbr/pulumi-azure-native | 09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ShareArgs', 'Share']
@pulumi.input_type
class ShareArgs:
def __init__(__self__, *,
access_protocol: pulumi.Input[Union[str, 'ShareAccessProtocol']],
device_name: pulumi.Input[str],
monitoring_status: pulumi.Input[Union[str, 'MonitoringStatus']],
resource_group_name: pulumi.Input[str],
share_status: pulumi.Input[Union[str, 'ShareStatus']],
azure_container_info: Optional[pulumi.Input['AzureContainerInfoArgs']] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input['RefreshDetailsArgs']] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]] = None):
"""
The set of arguments for constructing a Share resource.
:param pulumi.Input[Union[str, 'ShareAccessProtocol']] access_protocol: Access protocol to be used by the share.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Current monitoring status of the share.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'ShareStatus']] share_status: Current status of the share.
:param pulumi.Input['AzureContainerInfoArgs'] azure_container_info: Azure container mapping for the share.
:param pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]] client_access_rights: List of IP addresses and corresponding access rights on the share(required for NFS protocol).
:param pulumi.Input[Union[str, 'DataPolicy']] data_policy: Data policy of the share.
:param pulumi.Input[str] description: Description for the share.
:param pulumi.Input[str] name: The share name.
:param pulumi.Input['RefreshDetailsArgs'] refresh_details: Details of the refresh job on this share.
:param pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]] user_access_rights: Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
pulumi.set(__self__, "access_protocol", access_protocol)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "monitoring_status", monitoring_status)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_status", share_status)
if azure_container_info is not None:
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights is not None:
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy is not None:
pulumi.set(__self__, "data_policy", data_policy)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if refresh_details is not None:
pulumi.set(__self__, "refresh_details", refresh_details)
if user_access_rights is not None:
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Input[Union[str, 'ShareAccessProtocol']]:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@access_protocol.setter
def access_protocol(self, value: pulumi.Input[Union[str, 'ShareAccessProtocol']]):
pulumi.set(self, "access_protocol", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
"""
The device name.
"""
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Input[Union[str, 'MonitoringStatus']]:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@monitoring_status.setter
def monitoring_status(self, value: pulumi.Input[Union[str, 'MonitoringStatus']]):
pulumi.set(self, "monitoring_status", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Input[Union[str, 'ShareStatus']]:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@share_status.setter
def share_status(self, value: pulumi.Input[Union[str, 'ShareStatus']]):
pulumi.set(self, "share_status", value)
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional[pulumi.Input['AzureContainerInfoArgs']]:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@azure_container_info.setter
def azure_container_info(self, value: Optional[pulumi.Input['AzureContainerInfoArgs']]):
pulumi.set(self, "azure_container_info", value)
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@client_access_rights.setter
def client_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]):
pulumi.set(self, "client_access_rights", value)
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[pulumi.Input[Union[str, 'DataPolicy']]]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@data_policy.setter
def data_policy(self, value: Optional[pulumi.Input[Union[str, 'DataPolicy']]]):
pulumi.set(self, "data_policy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The share name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional[pulumi.Input['RefreshDetailsArgs']]:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@refresh_details.setter
def refresh_details(self, value: Optional[pulumi.Input['RefreshDetailsArgs']]):
pulumi.set(self, "refresh_details", value)
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
@user_access_rights.setter
def user_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]):
pulumi.set(self, "user_access_rights", value)
class Share(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
"""
Represents a share on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'ShareAccessProtocol']] access_protocol: Access protocol to be used by the share.
:param pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']] azure_container_info: Azure container mapping for the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]] client_access_rights: List of IP addresses and corresponding access rights on the share(required for NFS protocol).
:param pulumi.Input[Union[str, 'DataPolicy']] data_policy: Data policy of the share.
:param pulumi.Input[str] description: Description for the share.
:param pulumi.Input[str] device_name: The device name.
:param pulumi.Input[Union[str, 'MonitoringStatus']] monitoring_status: Current monitoring status of the share.
:param pulumi.Input[str] name: The share name.
:param pulumi.Input[pulumi.InputType['RefreshDetailsArgs']] refresh_details: Details of the refresh job on this share.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[Union[str, 'ShareStatus']] share_status: Current status of the share.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]] user_access_rights: Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Represents a share on the Data Box Edge/Gateway device.
:param str resource_name: The name of the resource.
:param ShareArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareArgs.__new__(ShareArgs)
if access_protocol is None and not opts.urn:
raise TypeError("Missing required property 'access_protocol'")
__props__.__dict__["access_protocol"] = access_protocol
__props__.__dict__["azure_container_info"] = azure_container_info
__props__.__dict__["client_access_rights"] = client_access_rights
__props__.__dict__["data_policy"] = data_policy
__props__.__dict__["description"] = description
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if monitoring_status is None and not opts.urn:
raise TypeError("Missing required property 'monitoring_status'")
__props__.__dict__["monitoring_status"] = monitoring_status
__props__.__dict__["name"] = name
__props__.__dict__["refresh_details"] = refresh_details
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_status is None and not opts.urn:
raise TypeError("Missing required property 'share_status'")
__props__.__dict__["share_status"] = share_status
__props__.__dict__["user_access_rights"] = user_access_rights
__props__.__dict__["share_mappings"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Share"), pulumi.Alias(type_="azure-native:databoxedge:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:Share")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Share, __self__).__init__(
'azure-native:databoxedge/v20200501preview:Share',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Share':
"""
Get an existing Share resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ShareArgs.__new__(ShareArgs)
__props__.__dict__["access_protocol"] = None
__props__.__dict__["azure_container_info"] = None
__props__.__dict__["client_access_rights"] = None
__props__.__dict__["data_policy"] = None
__props__.__dict__["description"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["refresh_details"] = None
__props__.__dict__["share_mappings"] = None
__props__.__dict__["share_status"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_access_rights"] = None
return Share(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Output[str]:
"""
Access protocol to be used by the share.
"""
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> pulumi.Output[Optional['outputs.AzureContainerInfoResponse']]:
"""
Azure container mapping for the share.
"""
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.ClientAccessRightResponse']]]:
"""
List of IP addresses and corresponding access rights on the share(required for NFS protocol).
"""
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> pulumi.Output[Optional[str]]:
"""
Data policy of the share.
"""
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description for the share.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
"""
Current monitoring status of the share.
"""
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The object name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> pulumi.Output[Optional['outputs.RefreshDetailsResponse']]:
"""
Details of the refresh job on this share.
"""
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> pulumi.Output[Sequence['outputs.MountPointMapResponse']]:
"""
Share mount point to the role.
"""
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Output[str]:
"""
Current status of the share.
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The hierarchical type of the object.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.UserAccessRightResponse']]]:
"""
Mapping of users and corresponding access rights on the share (required for SMB protocol).
"""
return pulumi.get(self, "user_access_rights")
| 48.578366 | 1,294 | 0.668045 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['ShareArgs', 'Share']
@pulumi.input_type
class ShareArgs:
def __init__(__self__, *,
access_protocol: pulumi.Input[Union[str, 'ShareAccessProtocol']],
device_name: pulumi.Input[str],
monitoring_status: pulumi.Input[Union[str, 'MonitoringStatus']],
resource_group_name: pulumi.Input[str],
share_status: pulumi.Input[Union[str, 'ShareStatus']],
azure_container_info: Optional[pulumi.Input['AzureContainerInfoArgs']] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input['RefreshDetailsArgs']] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]] = None):
pulumi.set(__self__, "access_protocol", access_protocol)
pulumi.set(__self__, "device_name", device_name)
pulumi.set(__self__, "monitoring_status", monitoring_status)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "share_status", share_status)
if azure_container_info is not None:
pulumi.set(__self__, "azure_container_info", azure_container_info)
if client_access_rights is not None:
pulumi.set(__self__, "client_access_rights", client_access_rights)
if data_policy is not None:
pulumi.set(__self__, "data_policy", data_policy)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if refresh_details is not None:
pulumi.set(__self__, "refresh_details", refresh_details)
if user_access_rights is not None:
pulumi.set(__self__, "user_access_rights", user_access_rights)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Input[Union[str, 'ShareAccessProtocol']]:
return pulumi.get(self, "access_protocol")
@access_protocol.setter
def access_protocol(self, value: pulumi.Input[Union[str, 'ShareAccessProtocol']]):
pulumi.set(self, "access_protocol", value)
@property
@pulumi.getter(name="deviceName")
def device_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "device_name")
@device_name.setter
def device_name(self, value: pulumi.Input[str]):
pulumi.set(self, "device_name", value)
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Input[Union[str, 'MonitoringStatus']]:
return pulumi.get(self, "monitoring_status")
@monitoring_status.setter
def monitoring_status(self, value: pulumi.Input[Union[str, 'MonitoringStatus']]):
pulumi.set(self, "monitoring_status", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Input[Union[str, 'ShareStatus']]:
return pulumi.get(self, "share_status")
@share_status.setter
def share_status(self, value: pulumi.Input[Union[str, 'ShareStatus']]):
pulumi.set(self, "share_status", value)
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> Optional[pulumi.Input['AzureContainerInfoArgs']]:
return pulumi.get(self, "azure_container_info")
@azure_container_info.setter
def azure_container_info(self, value: Optional[pulumi.Input['AzureContainerInfoArgs']]):
pulumi.set(self, "azure_container_info", value)
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]:
return pulumi.get(self, "client_access_rights")
@client_access_rights.setter
def client_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ClientAccessRightArgs']]]]):
pulumi.set(self, "client_access_rights", value)
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> Optional[pulumi.Input[Union[str, 'DataPolicy']]]:
return pulumi.get(self, "data_policy")
@data_policy.setter
def data_policy(self, value: Optional[pulumi.Input[Union[str, 'DataPolicy']]]):
pulumi.set(self, "data_policy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> Optional[pulumi.Input['RefreshDetailsArgs']]:
return pulumi.get(self, "refresh_details")
@refresh_details.setter
def refresh_details(self, value: Optional[pulumi.Input['RefreshDetailsArgs']]):
pulumi.set(self, "refresh_details", value)
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]:
return pulumi.get(self, "user_access_rights")
@user_access_rights.setter
def user_access_rights(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['UserAccessRightArgs']]]]):
pulumi.set(self, "user_access_rights", value)
class Share(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: ShareArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ShareArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
access_protocol: Optional[pulumi.Input[Union[str, 'ShareAccessProtocol']]] = None,
azure_container_info: Optional[pulumi.Input[pulumi.InputType['AzureContainerInfoArgs']]] = None,
client_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['ClientAccessRightArgs']]]]] = None,
data_policy: Optional[pulumi.Input[Union[str, 'DataPolicy']]] = None,
description: Optional[pulumi.Input[str]] = None,
device_name: Optional[pulumi.Input[str]] = None,
monitoring_status: Optional[pulumi.Input[Union[str, 'MonitoringStatus']]] = None,
name: Optional[pulumi.Input[str]] = None,
refresh_details: Optional[pulumi.Input[pulumi.InputType['RefreshDetailsArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[Union[str, 'ShareStatus']]] = None,
user_access_rights: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['UserAccessRightArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ShareArgs.__new__(ShareArgs)
if access_protocol is None and not opts.urn:
raise TypeError("Missing required property 'access_protocol'")
__props__.__dict__["access_protocol"] = access_protocol
__props__.__dict__["azure_container_info"] = azure_container_info
__props__.__dict__["client_access_rights"] = client_access_rights
__props__.__dict__["data_policy"] = data_policy
__props__.__dict__["description"] = description
if device_name is None and not opts.urn:
raise TypeError("Missing required property 'device_name'")
__props__.__dict__["device_name"] = device_name
if monitoring_status is None and not opts.urn:
raise TypeError("Missing required property 'monitoring_status'")
__props__.__dict__["monitoring_status"] = monitoring_status
__props__.__dict__["name"] = name
__props__.__dict__["refresh_details"] = refresh_details
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if share_status is None and not opts.urn:
raise TypeError("Missing required property 'share_status'")
__props__.__dict__["share_status"] = share_status
__props__.__dict__["user_access_rights"] = user_access_rights
__props__.__dict__["share_mappings"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:databoxedge/v20200501preview:Share"), pulumi.Alias(type_="azure-native:databoxedge:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190301:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190701:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20190801:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20200901preview:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20201201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201:Share"), pulumi.Alias(type_="azure-native:databoxedge/v20210201preview:Share"), pulumi.Alias(type_="azure-nextgen:databoxedge/v20210201preview:Share")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Share, __self__).__init__(
'azure-native:databoxedge/v20200501preview:Share',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Share':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ShareArgs.__new__(ShareArgs)
__props__.__dict__["access_protocol"] = None
__props__.__dict__["azure_container_info"] = None
__props__.__dict__["client_access_rights"] = None
__props__.__dict__["data_policy"] = None
__props__.__dict__["description"] = None
__props__.__dict__["monitoring_status"] = None
__props__.__dict__["name"] = None
__props__.__dict__["refresh_details"] = None
__props__.__dict__["share_mappings"] = None
__props__.__dict__["share_status"] = None
__props__.__dict__["type"] = None
__props__.__dict__["user_access_rights"] = None
return Share(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="accessProtocol")
def access_protocol(self) -> pulumi.Output[str]:
return pulumi.get(self, "access_protocol")
@property
@pulumi.getter(name="azureContainerInfo")
def azure_container_info(self) -> pulumi.Output[Optional['outputs.AzureContainerInfoResponse']]:
return pulumi.get(self, "azure_container_info")
@property
@pulumi.getter(name="clientAccessRights")
def client_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.ClientAccessRightResponse']]]:
return pulumi.get(self, "client_access_rights")
@property
@pulumi.getter(name="dataPolicy")
def data_policy(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "data_policy")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="monitoringStatus")
def monitoring_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "monitoring_status")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="refreshDetails")
def refresh_details(self) -> pulumi.Output[Optional['outputs.RefreshDetailsResponse']]:
return pulumi.get(self, "refresh_details")
@property
@pulumi.getter(name="shareMappings")
def share_mappings(self) -> pulumi.Output[Sequence['outputs.MountPointMapResponse']]:
return pulumi.get(self, "share_mappings")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Output[str]:
return pulumi.get(self, "share_status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
@property
@pulumi.getter(name="userAccessRights")
def user_access_rights(self) -> pulumi.Output[Optional[Sequence['outputs.UserAccessRightResponse']]]:
return pulumi.get(self, "user_access_rights")
| true | true |
f72b36f52912edb8de8bb2207281239f45df89b6 | 2,134 | py | Python | demo/orm.py | 1987539447/start-python | 06ee5eb30e7395cd8432e8e33d7209fa855f4ad9 | [
"Apache-2.0"
] | null | null | null | demo/orm.py | 1987539447/start-python | 06ee5eb30e7395cd8432e8e33d7209fa855f4ad9 | [
"Apache-2.0"
] | null | null | null | demo/orm.py | 1987539447/start-python | 06ee5eb30e7395cd8432e8e33d7209fa855f4ad9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# FileName:orm.py
# -*- coding: utf-8 -*-
""" 通过元类实现简单的ORM框剪 """
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class ModelMetaClass(type):
def __new__(cls, name, base, attrs):
if name == 'Model':
return type.__new__(cls, name, base, attrs)
print('Found Model: %s' % name)
mapping = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping %s ==> %s' % (k, v))
mapping[k] = v
for k in mapping.keys():
attrs.pop(k)
attrs['__mapping__'] = mapping
attrs['__table__'] = name
return type.__new__(cls, name, base, attrs)
class Model(dict, metaclass=ModelMetaClass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object do not has attribute %s" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mapping__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values(%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
# test code
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id=123, name='Michel', email='abc@jd.com', password='pass')
u.save()
| 27.012658 | 101 | 0.56701 |
class Field(object):
def __init__(self, name, column_type):
self.name = name
self.column_type = column_type
def __str__(self):
return '<%s:%s>' % (self.__class__.__name__, self.name)
class IntegerField(Field):
def __init__(self, name):
super(IntegerField, self).__init__(name, 'bigint')
class StringField(Field):
def __init__(self, name):
super(StringField, self).__init__(name, 'varchar(100)')
class ModelMetaClass(type):
def __new__(cls, name, base, attrs):
if name == 'Model':
return type.__new__(cls, name, base, attrs)
print('Found Model: %s' % name)
mapping = dict()
for k, v in attrs.items():
if isinstance(v, Field):
print('Found mapping %s ==> %s' % (k, v))
mapping[k] = v
for k in mapping.keys():
attrs.pop(k)
attrs['__mapping__'] = mapping
attrs['__table__'] = name
return type.__new__(cls, name, base, attrs)
class Model(dict, metaclass=ModelMetaClass):
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Model' object do not has attribute %s" % key)
def __setattr__(self, key, value):
self[key] = value
def save(self):
fields = []
params = []
args = []
for k, v in self.__mapping__.items():
fields.append(v.name)
params.append('?')
args.append(getattr(self, k, None))
sql = 'insert into %s (%s) values(%s)' % (self.__table__, ','.join(fields), ','.join(params))
print('SQL: %s' % sql)
print('ARGS: %s' % str(args))
class User(Model):
id = IntegerField('id')
name = StringField('username')
email = StringField('email')
password = StringField('password')
u = User(id=123, name='Michel', email='abc@jd.com', password='pass')
u.save()
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.