content stringlengths 27 928k | path stringlengths 4 230 | size int64 27 928k | nl_text stringlengths 21 396k | nl_size int64 21 396k | nl_language stringlengths 2 3 | nl_language_score float64 0.04 1 |
|---|---|---|---|---|---|---|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class SwitchConfigLearnedInformation(Base):
"""NOT DEFINED
The SwitchConfigLearnedInformation class encapsulates a list of switchConfigLearnedInformation resources that are managed by the system.
A list of resources can be retrieved from the server using the SwitchConfigLearnedInformation.find() method.
"""
__slots__ = ()
_SDM_NAME = 'switchConfigLearnedInformation'
_SDM_ATT_MAP = {
'ConfigFlags': 'configFlags',
'DataPathId': 'dataPathId',
'DataPathIdAsHex': 'dataPathIdAsHex',
'ErrorCode': 'errorCode',
'ErrorType': 'errorType',
'Latency': 'latency',
'LocalIp': 'localIp',
'MissSendLength': 'missSendLength',
'NegotiatedVersion': 'negotiatedVersion',
'RemoteIp': 'remoteIp',
'ReplyState': 'replyState',
}
def __init__(self, parent):
super(SwitchConfigLearnedInformation, self).__init__(parent)
@property
def ConfigFlags(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ConfigFlags'])
@property
def DataPathId(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DataPathId'])
@property
def DataPathIdAsHex(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['DataPathIdAsHex'])
@property
def ErrorCode(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ErrorCode'])
@property
def ErrorType(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ErrorType'])
@property
def Latency(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['Latency'])
@property
def LocalIp(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['LocalIp'])
@property
def MissSendLength(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['MissSendLength'])
@property
def NegotiatedVersion(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['NegotiatedVersion'])
@property
def RemoteIp(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['RemoteIp'])
@property
def ReplyState(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ReplyState'])
def find(self, ConfigFlags=None, DataPathId=None, DataPathIdAsHex=None, ErrorCode=None, ErrorType=None, Latency=None, LocalIp=None, MissSendLength=None, NegotiatedVersion=None, RemoteIp=None, ReplyState=None):
"""Finds and retrieves switchConfigLearnedInformation resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchConfigLearnedInformation resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all switchConfigLearnedInformation resources from the server.
Args
----
- ConfigFlags (str): NOT DEFINED
- DataPathId (str): NOT DEFINED
- DataPathIdAsHex (str): NOT DEFINED
- ErrorCode (str): NOT DEFINED
- ErrorType (str): NOT DEFINED
- Latency (number): NOT DEFINED
- LocalIp (str): NOT DEFINED
- MissSendLength (number): NOT DEFINED
- NegotiatedVersion (str): NOT DEFINED
- RemoteIp (str): NOT DEFINED
- ReplyState (str): NOT DEFINED
Returns
-------
- self: This instance with matching switchConfigLearnedInformation resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of switchConfigLearnedInformation data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the switchConfigLearnedInformation resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
| ixnetwork_restpy/testplatform/sessions/ixnetwork/vport/protocols/switchconfiglearnedinformation_e983ca5da0eadbba93d1ce1d2903a5b7.py | 6,457 | NOT DEFINED
The SwitchConfigLearnedInformation class encapsulates a list of switchConfigLearnedInformation resources that are managed by the system.
A list of resources can be retrieved from the server using the SwitchConfigLearnedInformation.find() method.
Returns
-------
- str: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Returns
-------
- number: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Returns
-------
- number: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Returns
-------
- str: NOT DEFINED
Finds and retrieves switchConfigLearnedInformation resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve switchConfigLearnedInformation resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all switchConfigLearnedInformation resources from the server.
Args
----
- ConfigFlags (str): NOT DEFINED
- DataPathId (str): NOT DEFINED
- DataPathIdAsHex (str): NOT DEFINED
- ErrorCode (str): NOT DEFINED
- ErrorType (str): NOT DEFINED
- Latency (number): NOT DEFINED
- LocalIp (str): NOT DEFINED
- MissSendLength (number): NOT DEFINED
- NegotiatedVersion (str): NOT DEFINED
- RemoteIp (str): NOT DEFINED
- ReplyState (str): NOT DEFINED
Returns
-------
- self: This instance with matching switchConfigLearnedInformation resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
Retrieves a single instance of switchConfigLearnedInformation data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the switchConfigLearnedInformation resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
MIT LICENSE Copyright 1997 - 2020 by IXIA Keysight Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 3,266 | en | 0.692968 |
# -*- coding: utf-8 -*-
"""Cisco DNA Center Claim a Device to a Site data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import fastjsonschema
import json
from dnacentersdk.exceptions import MalformedRequest
from builtins import *
class JSONSchemaValidator5889Fb844939A13B(object):
"""Claim a Device to a Site request schema definition."""
def __init__(self):
super(JSONSchemaValidator5889Fb844939A13B, self).__init__()
self._validator = fastjsonschema.compile(json.loads(
'''{
"properties": {
"deviceId": {
"type": [
"string",
"null"
]
},
"siteId": {
"type": [
"string",
"null"
]
},
"type": {
"enum": [
"Default",
"AccessPoint",
"StackSwitch",
"Sensor",
"MobilityExpress",
null
],
"type": [
"string",
"null"
]
}
},
"type": "object"
}'''.replace("\n" + ' ' * 16, '')
))
def validate(self, request):
try:
self._validator(request)
except fastjsonschema.exceptions.JsonSchemaException as e:
raise MalformedRequest(
'{} is invalid. Reason: {}'.format(request, e.message)
)
| dnacentersdk/models/validators/v1_2_10/jsd_5889fb844939a13b.py | 2,704 | Claim a Device to a Site request schema definition.
Cisco DNA Center Claim a Device to a Site data model.
Copyright (c) 2019-2021 Cisco Systems.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
-*- coding: utf-8 -*- | 1,193 | en | 0.869151 |
#!/usr/bin/python
import sys
import cgi
import cgitb
import sqlite3
reload(sys)
sys.setdefaultencoding('utf-8')
cgitb.enable()
# html
print("Content-type: text/html\n")
print('<meta charset="utf-8">')
print("<html><head>")
print('''<script type="text/javascript" src="https://ajax.googleapis.com/ajax/libs/jquery/3.3.1/jquery.min.js"></script>''')
print("<title>BRITE REU Candidates</title>")
print('''<link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/nav.css">
<link rel="stylesheet" href="https://bioed.bu.edu/students_21/group_proj/group_K/css/appadmin.css">
</head>''')
print("<body>")
print('''<div id="bg-image">''')
print('''<div id="topnav">
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/show_applicant_admin.py">Applicant List</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/stats_admin.py">Applicant Statistics</a>
<a href="#assign users">Assign Users</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/input_projects.py">Input Faculty Projects</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/review_summary_admin.py">View All Past Reviews</a>
<a class="active" href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/assign_candidate.py">Assign Candidates to Faculty</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/can_pref.py">Candidate Preferences</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/match.py">Match Candidates to Faculty</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/finalmatch.py">Final Matches</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/help_admin.py">Help</a>
<a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/about_admin.py">About/Contact</a>
</div>''')
print("<h3>Select Checkboxes to Assign Candidates to Faculty Members</h3>")
print("<h4>To Remove an Assignment, Uncheck the Checkbox</h4>")
#query to get candidate data for the rows
query1 = "SELECT cid, firstname, lastname FROM Applicant join Candidate on Applicant.aid=Candidate.cid;"
#query to get the faculty and project names for the table headers
query2 = 'SELECT pid, uid, fname || " " || lname || ":\n" || project_name FROM Project JOIN User using(uid) ORDER BY(lname);'
#query to get all current candidate-faculty pairs in the database
query3 = 'SELECT cid || "_" || pid, assigned_at FROM Assignment ORDER BY(cid);'
#start connection
connection = sqlite3.connect('db/BRITEREU.db')
c = connection.cursor()
try:
#execute query 1
c.execute(query1)
#get results to above standard query
results1 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 1</b></font></p>")
try:
#execute query 2
c.execute(query2)
#get results to above standard query
results2 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 2</b></font></p>")
try:
#execute query 3
c.execute(query3)
#get results to above standard query
results3 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 3</b></font></p>")
c.close()
connection.close()
#get all the candidate-faculty pair ids currently in the database which will be used in the section that checks and uses form data
cfids = [cf[0] for cf in results3]
#retrieve form data
form = cgi.FieldStorage()
#if form is empty, then it's possible that everything is to be deleted from the Assignment table
#if not form:
# if results3:
# truncateStatement = "DELETE FROM Assignment;"
# connection = sqlite3.connect('db/BRITEREU.db')
# c = connection.cursor()
# c.execute(truncateStatement)
# connection.commit()
#check what checkboxes are checked
#if checkbox was selected that was not previously selected - insert those pairs into the Assignment table
#if checkbox is no longer selected - delete those pairs from the Assignment table
if form:
res3 = [pair for pair in cfids]
pairlist = form.getlist("cf")
#find pairs that are in the selected list (pairlist) and not in the current database list (res3)
tobe_inserted = list(set(pairlist) - set(res3))
tobe_inserted = [tuple(i.split("_")) for i in tobe_inserted]
#find pairs that are not in the selected list(pairlist) and are in the current database list (res3)
tobe_removed = list(set(res3) - set(pairlist))
tobe_removed = [tuple(map(int, i.split("_"))) for i in tobe_removed]
if tobe_inserted or tobe_removed:
connection = sqlite3.connect('db/BRITEREU.db')
c = connection.cursor()
for pair in tobe_inserted:
insertStatement = "INSERT INTO Assignment(cid, pid) VALUES (%s, %s);" % pair
c.execute(insertStatement)
connection.commit()
for pair in tobe_removed:
deleteStatement = 'DELETE FROM Assignment WHERE cid ="%s" and pid ="%s";' % pair
c.execute(deleteStatement)
connection.commit()
c.close()
connection.close()
#query the database again to now get all updated pairs
query4 = 'SELECT cid || "_" || pid, assigned_at FROM Assignment ORDER BY(cid);'
connection = sqlite3.connect('db/BRITEREU.db')
c = connection.cursor()
try:
#execute query 1
c.execute(query4)
#get results to above standard query
results4 = c.fetchall()
except Exception:
print("<p><font color=red><b>Error Query 4</b></font></p>")
#form action for user to submit checkboxes selections
print('''<form name="form1" id="form1" action="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/assign_candidate.py" method="post" >''')
print('<table id=Candidate class="dataframe">')
print("<tr><th>Candidate ID</th><th>Candidate Name</th>")
#gets list of faculty
#adds all the faculty who are in the database as columns
for faculty in results2:
print("<th>%s</th>") % faculty[2]
print("</tr>")
#get the Project IDs for the projects so that you concatenate to the CID to formulate a value pair
pids = [faculty[0] for faculty in results2]
#added proper URL for reference to reviewer page
#print the candidate table with a checkbox for each faculty member
for row in results1:
print('''<tr><td><a href="https://bioed.bu.edu/cgi-bin/students_21/group_proj/group_K/reviewer.py?AID=%s">%s</a></td><td>%s %s</td>''') % (row[0], row[0], row[1], row[2])
for f in pids:
for cf_pair in results4:
if (str(row[0])+"_"+str(f)) in cf_pair:
print('<td><input title="%s GMT" type="checkbox" name="cf" value=%s checked="checked" />rank</td>') % (cf_pair[1], (str(row[0])+"_"+str(f)))
break
else:
print('<td><input type="checkbox" name="cf" value=%s /></td>') % (str(row[0])+"_"+str(f))
print("</tr>")
#add submit button for assigning faculty to candidates
print('<input type="submit" value="Assign Candidates" /><br /><br />')
#end form
print("</form>")
#filtering section for the table
print("</table>")
print('''<script src="https://bioed.bu.edu/students_21/group_proj/group_K/tablefilter/tablefilter.js"></script>''')
print('''<script data-config="">
var filtersConfig = {
base_path: 'https://bioed.bu.edu/students_21/divyas3/tablefilter/',
auto_filter: {
delay: 110 //milliseconds
},
filters_row_index: 1,
state: true,
alternate_rows: true,
rows_counter: true,
btn_reset: true,
status_bar: true,
msg_filter: 'Filtering...'
};
var tf = new TableFilter(Candidate, filtersConfig);
tf.init();
</script>''')
print("</body> </html>")
| assign_candidate.py | 7,818 | !/usr/bin/python htmlquery to get candidate data for the rowsquery to get the faculty and project names for the table headersquery to get all current candidate-faculty pairs in the databasestart connectionexecute query 1 get results to above standard queryexecute query 2get results to above standard queryexecute query 3get results to above standard queryget all the candidate-faculty pair ids currently in the database which will be used in the section that checks and uses form dataretrieve form dataif form is empty, then it's possible that everything is to be deleted from the Assignment tableif not form: if results3: truncateStatement = "DELETE FROM Assignment;" connection = sqlite3.connect('db/BRITEREU.db') c = connection.cursor() c.execute(truncateStatement) connection.commit()check what checkboxes are checkedif checkbox was selected that was not previously selected - insert those pairs into the Assignment tableif checkbox is no longer selected - delete those pairs from the Assignment tablefind pairs that are in the selected list (pairlist) and not in the current database list (res3)find pairs that are not in the selected list(pairlist) and are in the current database list (res3)query the database again to now get all updated pairsexecute query 1 get results to above standard queryform action for user to submit checkboxes selections gets list of facultyadds all the faculty who are in the database as columnsget the Project IDs for the projects so that you concatenate to the CID to formulate a value pairadded proper URL for reference to reviewer pageprint the candidate table with a checkbox for each faculty memberadd submit button for assigning faculty to candidatesend formfiltering section for the table | 1,781 | en | 0.828944 |
"""Test the UniFi Protect switch platform."""
# pylint: disable=protected-access
from __future__ import annotations
from unittest.mock import AsyncMock, Mock
import pytest
from pyunifiprotect.data import (
Camera,
Light,
RecordingMode,
SmartDetectObjectType,
VideoMode,
)
from homeassistant.components.unifiprotect.const import DEFAULT_ATTRIBUTION
from homeassistant.components.unifiprotect.switch import (
CAMERA_SWITCHES,
LIGHT_SWITCHES,
ProtectSwitchEntityDescription,
)
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_ENTITY_ID, STATE_OFF, Platform
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_registry as er
from .conftest import (
MockEntityFixture,
assert_entity_counts,
enable_entity,
ids_from_device_description,
)
CAMERA_SWITCHES_BASIC = [
d
for d in CAMERA_SWITCHES
if d.name != "Detections: Face"
and d.name != "Detections: Package"
and d.name != "SSH Enabled"
]
CAMERA_SWITCHES_NO_EXTRA = [
d for d in CAMERA_SWITCHES_BASIC if d.name not in ("High FPS", "Privacy Mode")
]
@pytest.fixture(name="light")
async def light_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_light: Light
):
"""Fixture for a single light for testing the switch platform."""
# disable pydantic validation so mocking can happen
Light.__config__.validate_assignment = False
light_obj = mock_light.copy(deep=True)
light_obj._api = mock_entry.api
light_obj.name = "Test Light"
light_obj.is_ssh_enabled = False
light_obj.light_device_settings.is_indicator_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.lights = {
light_obj.id: light_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 2, 1)
yield light_obj
Light.__config__.validate_assignment = True
@pytest.fixture(name="camera")
async def camera_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = True
camera_obj.feature_flags.has_hdr = True
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT, VideoMode.HIGH_FPS]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = True
camera_obj.feature_flags.has_smart_detect = True
camera_obj.feature_flags.smart_detect_types = [
SmartDetectObjectType.PERSON,
SmartDetectObjectType.VEHICLE,
]
camera_obj.is_ssh_enabled = False
camera_obj.led_settings.is_enabled = False
camera_obj.hdr_mode = False
camera_obj.video_mode = VideoMode.DEFAULT
camera_obj.remove_privacy_zone()
camera_obj.speaker_settings.are_system_sounds_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
camera_obj.smart_detect_settings.object_types = []
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 12, 11)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_none")
async def camera_none_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.DETECTIONS
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = False
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 5, 4)
yield camera_obj
Camera.__config__.validate_assignment = True
@pytest.fixture(name="camera_privacy")
async def camera_privacy_fixture(
hass: HomeAssistant, mock_entry: MockEntityFixture, mock_camera: Camera
):
"""Fixture for a single camera for testing the switch platform."""
# disable pydantic validation so mocking can happen
Camera.__config__.validate_assignment = False
camera_obj = mock_camera.copy(deep=True)
camera_obj._api = mock_entry.api
camera_obj.channels[0]._api = mock_entry.api
camera_obj.channels[1]._api = mock_entry.api
camera_obj.channels[2]._api = mock_entry.api
camera_obj.name = "Test Camera"
camera_obj.recording_settings.mode = RecordingMode.NEVER
camera_obj.feature_flags.has_led_status = False
camera_obj.feature_flags.has_hdr = False
camera_obj.feature_flags.video_modes = [VideoMode.DEFAULT]
camera_obj.feature_flags.has_privacy_mask = True
camera_obj.feature_flags.has_speaker = False
camera_obj.feature_flags.has_smart_detect = False
camera_obj.add_privacy_zone()
camera_obj.is_ssh_enabled = False
camera_obj.osd_settings.is_name_enabled = False
camera_obj.osd_settings.is_date_enabled = False
camera_obj.osd_settings.is_logo_enabled = False
camera_obj.osd_settings.is_debug_enabled = False
mock_entry.api.bootstrap.reset_objects()
mock_entry.api.bootstrap.cameras = {
camera_obj.id: camera_obj,
}
await hass.config_entries.async_setup(mock_entry.entry.entry_id)
await hass.async_block_till_done()
assert_entity_counts(hass, Platform.SWITCH, 6, 5)
yield camera_obj
Camera.__config__.validate_assignment = True
async def test_switch_setup_light(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
light: Light,
):
"""Test switch entity setup for light devices."""
entity_registry = er.async_get(hass)
description = LIGHT_SWITCHES[1]
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, light, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = LIGHT_SWITCHES[0]
unique_id = f"{light.id}_{description.key}"
entity_id = f"switch.test_light_{description.name.lower().replace(' ', '_')}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_all(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera: Camera,
):
"""Test switch entity setup for camera devices (all enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_setup_camera_none(
hass: HomeAssistant,
mock_entry: MockEntityFixture,
camera_none: Camera,
):
"""Test switch entity setup for camera devices (no enabled feature flags)."""
entity_registry = er.async_get(hass)
for description in CAMERA_SWITCHES_BASIC:
if description.ufp_required_field is not None:
continue
unique_id, entity_id = ids_from_device_description(
Platform.SWITCH, camera_none, description
)
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.unique_id == unique_id
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
description = CAMERA_SWITCHES[0]
description_entity_name = (
description.name.lower().replace(":", "").replace(" ", "_")
)
unique_id = f"{camera_none.id}_{description.key}"
entity_id = f"switch.test_camera_{description_entity_name}"
entity = entity_registry.async_get(entity_id)
assert entity
assert entity.disabled is True
assert entity.unique_id == unique_id
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
state = hass.states.get(entity_id)
assert state
assert state.state == STATE_OFF
assert state.attributes[ATTR_ATTRIBUTION] == DEFAULT_ATTRIBUTION
async def test_switch_light_status(hass: HomeAssistant, light: Light):
"""Tests status light switch for lights."""
description = LIGHT_SWITCHES[1]
light.__fields__["set_status_light"] = Mock()
light.set_status_light = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, light, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
light.set_status_light.assert_called_with(False)
async def test_switch_camera_ssh(
hass: HomeAssistant, camera: Camera, mock_entry: MockEntityFixture
):
"""Tests SSH switch for cameras."""
description = CAMERA_SWITCHES[0]
camera.__fields__["set_ssh"] = Mock()
camera.set_ssh = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await enable_entity(hass, mock_entry.entry.entry_id, entity_id)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_ssh.assert_called_with(False)
@pytest.mark.parametrize("description", CAMERA_SWITCHES_NO_EXTRA)
async def test_switch_camera_simple(
hass: HomeAssistant, camera: Camera, description: ProtectSwitchEntityDescription
):
"""Tests all simple switches for cameras."""
assert description.ufp_set_method is not None
camera.__fields__[description.ufp_set_method] = Mock()
setattr(camera, description.ufp_set_method, AsyncMock())
set_method = getattr(camera, description.ufp_set_method)
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_once_with(True)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
set_method.assert_called_with(False)
async def test_switch_camera_highfps(hass: HomeAssistant, camera: Camera):
"""Tests High FPS switch for cameras."""
description = CAMERA_SWITCHES[3]
camera.__fields__["set_video_mode"] = Mock()
camera.set_video_mode = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_once_with(VideoMode.HIGH_FPS)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_video_mode.assert_called_with(VideoMode.DEFAULT)
async def test_switch_camera_privacy(hass: HomeAssistant, camera: Camera):
"""Tests Privacy Mode switch for cameras."""
description = CAMERA_SWITCHES[4]
camera.__fields__["set_privacy"] = Mock()
camera.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(Platform.SWITCH, camera, description)
await hass.services.async_call(
"switch", "turn_on", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_once_with(True, 0, RecordingMode.NEVER)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera.set_privacy.assert_called_with(
False, camera.mic_volume, camera.recording_settings.mode
)
async def test_switch_camera_privacy_already_on(
hass: HomeAssistant, camera_privacy: Camera
):
"""Tests Privacy Mode switch for cameras with privacy mode defaulted on."""
description = CAMERA_SWITCHES[4]
camera_privacy.__fields__["set_privacy"] = Mock()
camera_privacy.set_privacy = AsyncMock()
_, entity_id = ids_from_device_description(
Platform.SWITCH, camera_privacy, description
)
await hass.services.async_call(
"switch", "turn_off", {ATTR_ENTITY_ID: entity_id}, blocking=True
)
camera_privacy.set_privacy.assert_called_once_with(False, 100, RecordingMode.ALWAYS)
| tests/components/unifiprotect/test_switch.py | 15,576 | Test the UniFi Protect switch platform.
pylint: disable=protected-access disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen disable pydantic validation so mocking can happen | 274 | en | 0.499516 |
#!/usr/bin/env python3
'''
Model for Riemannian feature calculation and classification for EEG data
'''
import numpy as np
from sklearn.svm import LinearSVC, SVC
from riemannian_multiscale import RiemannianMultiscale, QuantizedRiemannianMultiscale
from filters import load_filterbank
from utilities import quantize
__author__ = "Michael Hersche, Tino Rellstab and Tibor Schneider"
__email__ = "herschmi@ethz.ch,tinor@ethz.ch"
DATA_PATH = "dataset/"
# QUANTIZED = True
# ONLY_2HZ_BANDS = True
class RiemannianModel():
""" Riemannian Model """
def __init__(self, svm_kernel='linear', svm_c=0.1, fs=250, bands=None, time_windows=None,
riem_opt='Riemann', rho=0.1, filter_type='butter', filter_order=2,
random_state=None):
""" Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
"""
# setup classifier
if svm_kernel == 'linear':
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
else:
self.classifier = SVC(C=svm_c, kernel=svm_kernel, degree=10, gamma='auto',
cache_size=10000, random_state=random_state)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype=filter_type)
# setup Time Windows
if time_windows is None:
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int)
else:
time_windows = (np.array(time_windows) * fs).astype(int)
# setup riemannian
self.riemannian = RiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True)
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
class QuantizedRiemannianModel():
""" QuantizedRiemannian Model """
def __init__(self, svm_c=0.1, fs=250, bands=None, riem_opt='Riemann', rho=0.1, filter_order=2,
random_state=None, num_bits=8, bitshift_scale=True):
""" Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
"""
self.num_bits = num_bits
self.bitshift_scale = bitshift_scale
# setup classifier
self.classifier = LinearSVC(C=svm_c, loss='hinge', random_state=random_state, tol=0.00001)
# setup Filterbank
if bands is None:
bandwidths = np.array([2, 4, 8, 16, 32])
else:
bandwidths = np.array(bands)
filter_bank = load_filterbank(bandwidths, fs, order=filter_order, max_freq=40, ftype="butter")
# setup Time Windows
time_windows = (np.array([[2.5, 6]]) * fs).astype(int)
# time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) # !!!!!
# setup riemannian
self.riemannian = QuantizedRiemannianMultiscale(filter_bank, time_windows, riem_opt=riem_opt,
rho=rho, vectorized=True, num_bits=num_bits,
bitshift_scale=bitshift_scale)
# prepare quantized weights and biases
self.scale_weight = 0
self.scale_bias = 0
# store dimensionality
self.no_bands = filter_bank.shape[0]
self.no_time_windows = time_windows.shape[0]
self.no_riem = None
self.no_features = None
def fit(self, samples, labels):
""" Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
"""
# extract the number of eatures
assert len(samples.shape) == 3
no_channels = samples.shape[1]
self.no_riem = int(no_channels * (no_channels + 1) / 2) # Total number of CSP feature per band and timewindow
self.no_features = self.no_riem * self.no_bands * self.no_time_windows
# prepare scale factors
self.riemannian.prepare_quantization(samples)
# fit and extract training features from the riemannian
features = self.riemannian.fit(samples)
self.classifier.fit(features, labels)
# quantize the classifier
self.scale_weight = max(self.scale_weight, np.abs(self.classifier.coef_).max())
weights = quantize(self.classifier.coef_, self.scale_weight, self.num_bits, do_round=True)
self.classifier.coef_ = weights
# do not quantize the bias, this one will be added in 32 bit, and quantization does not
# matter here...
# self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max())
# bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits,
# do_round=True)
# self.classifier.intercept_ = bias
def score(self, samples, labels):
""" Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
"""
features = self.riemannian.features(samples)
return self.classifier.score(features, labels)
def predict(self, samples):
""" Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
"""
features = self.riemannian.features(samples)
return self.classifier.predict(features)
def predict_with_intermediate(self, sample, verbose=True):
""" Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
"""
if verbose:
print("Predict sample with intermediate matrices")
assert len(sample.shape) == 2
result = self.riemannian.onetrial_feature_with_intermediate(sample)
features = next(reversed(result.values()))
features = features.reshape(1, -1)
result["svm_result"] = self.classifier.decision_function(features)
result["prediction"] = self.classifier.predict(features)
return result
def get_data_dict(self):
""" Returns a nested dictionary containing all necessary data """
return {"num_bits": self.num_bits,
"bitshift_scale": self.bitshift_scale,
"SVM": {"weights": self.classifier.coef_,
"weight_scale": self.scale_weight,
"bias": self.classifier.intercept_},
"riemannian": self.riemannian.get_data_dict()}
| multiscale_bci_python/riemannian_model.py | 10,590 | QuantizedRiemannian Model
Riemannian Model
Constructor
Args:
Parameters
----------
svm_kernel: str {'linear', 'sigmoid', 'rbf'}
kernel used for classifier
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
time_windows: list of list of ints, shape = (N, 2)
time windows used, in seconds (default: [[2,5, 6]])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_type: str {"butter", "fir"}
Type of the filter
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
Constructor
Parameters
----------
svm_c: float
regularization parameter for the classifier
fs: int
sampling rate of the data
bands: list of int
bandwidths used in filterbanks (default: [2, 4, 8, 16, 32])
riem_opt: str {"riemann", "Riemann_Euclid", "Whitened_Euclid", "No_Adaptation"}
type of riemannian used
rho: float
Normalization parameter for the covariance matrix of the riemannian
filter_order: int
Order of the filter
random_state: int or None
random seed used in the SVM
num_bits: int
Number of bits used for quantization
bitshift_scale: bool
if True, make sure that all scale factors between one part and the next is a bitshift
Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Training
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns a nested dictionary containing all necessary data
Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
Predict some data
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
Returns
-------
np.array, size=[N]: prediction
Predict some data
Parameters
----------
samples: np.array, size=(C, T)
training sample
Returns
-------
ordered dictionary including every intermediate result and the output
Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
Measure the performance, returns success rate
Parameters
----------
samples: np.array, size=(N, C, T)
training samples
labels: np.array, size=(N)
training labels
Returns
-------
float: score of the model
Model for Riemannian feature calculation and classification for EEG data
!/usr/bin/env python3 QUANTIZED = True ONLY_2HZ_BANDS = True setup classifier setup Filterbank setup Time Windows time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) setup riemannian store dimensionality extract the number of eatures Total number of CSP feature per band and timewindow fit and extract training features from the riemannian setup classifier setup Filterbank setup Time Windows time_windows = (np.array([[2.0, 5.0]]) * fs).astype(int) !!!!! setup riemannian prepare quantized weights and biases store dimensionality extract the number of eatures Total number of CSP feature per band and timewindow prepare scale factors fit and extract training features from the riemannian quantize the classifier do not quantize the bias, this one will be added in 32 bit, and quantization does not matter here... self.scale_bias = max(self.scale_bias, np.abs(self.classifier.intercept_).max()) bias = quantize(self.classifier.intercept_, self.scale_weight, self.num_bits, do_round=True) self.classifier.intercept_ = bias | 4,013 | en | 0.49274 |
import logging
import os
import random
import time
from functools import lru_cache
import cv2
import numpy as np
import imgreco.main
from Arknights.helper import logger
from addons.activity import ActivityAddOn, get_stage_map
from addons.base import BaseAddOn, pil2cv, crop_cv_by_rect, show_img
from addons.common_cache import load_game_data
from imgreco.ocr.cnocr import ocr_and_correct
icon1 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon1.png'), cv2.IMREAD_GRAYSCALE)
icon2 = cv2.imread(os.path.join(os.path.realpath(os.path.dirname(__file__)), 'icon2.png'), cv2.IMREAD_GRAYSCALE)
@lru_cache(maxsize=1)
def get_activity_infos():
return load_game_data('activity_table')['basicInfo']
@lru_cache()
def get_available_activity(display_type=None):
activity_infos = get_activity_infos()
name_set = set()
for aid, info in activity_infos.items():
if info.get('displayType') in {'SIDESTORY', 'BRANCHLINE'}:
if info['displayType'] == 'BRANCHLINE' or info.get('isReplicate'):
raw_name = info['name'][:-3] if info.get('isReplicate') else info['name']
if display_type is None or display_type == info['displayType']:
name_set.add(raw_name)
return name_set
def get_activity_name(activity):
name = activity['name']
if activity['isReplicate']:
return name[:-3]
return name
def crop_image_only_outside(gray_img, raw_img, threshold=128, padding=3):
mask = gray_img > threshold
m, n = gray_img.shape
mask0, mask1 = mask.any(0), mask.any(1)
col_start, col_end = mask0.argmax(), n - mask0[::-1].argmax()
row_start, row_end = mask1.argmax(), m - mask1[::-1].argmax()
return raw_img[row_start - padding:row_end + padding, col_start - padding:col_end + padding]
class StartSpStageAddon(BaseAddOn):
def __init__(self, helper=None):
super(StartSpStageAddon, self).__init__(helper)
self.scale = self.helper.viewport[1] / 720
if self.helper.viewport != (1280, 720):
logger.warning('It may produce some weird effects when the resolution is not 1280x720.')
def apply_scale(self, value):
if self.scale == 1:
return value
return int(value * self.scale)
def run(self, stage_code: str, repeat_times: int = 1000, try_current_activity=True):
stage_code = stage_code.upper()
if try_current_activity:
try:
return ActivityAddOn(self.helper).run(stage_code, repeat_times)
except:
pass
stage_code_map, zone_linear_map = get_stage_map()
if stage_code not in stage_code_map:
raise RuntimeError(f'无效的关卡: {stage_code}')
stage = stage_code_map[stage_code]
activity_id = stage['zoneId'].split('_')[0]
activity_infos = get_activity_infos()
activity = activity_infos[activity_id]
logger.debug(f'stage: {stage}, activity: {activity}')
self.enter_activity(activity)
stage_linear = zone_linear_map[stage['zoneId']]
self.helper.find_and_tap_stage_by_ocr(None, stage_code, stage_linear)
return self.helper.module_battle_slim(None, repeat_times)
def enter_activity(self, activity):
vh = self.vh
act_name = get_activity_name(activity)
if act_name not in get_available_activity():
raise RuntimeError(f'无效的活动: {act_name}')
self.open_terminal()
if activity['displayType'] == 'BRANCHLINE':
self.tap_branch_line()
else:
self.tap_side_story()
crop_flag = activity['displayType'] == 'SIDESTORY'
act_pos_map = self.get_all_act_pos(crop_flag)
if act_name not in act_pos_map:
if activity['displayType'] == 'BRANCHLINE':
raise RuntimeError(f'找不到相应活动: {act_name}')
last_acts = act_pos_map.keys()
while True:
origin_x = random.randint(int(5.833 * vh), int(24.861 * vh))
origin_y = random.randint(int(57.222 * vh), int(77.917 * vh))
move = -random.randint(int(vh // 5), int(vh // 4))
self.helper.adb.touch_swipe2((origin_x, origin_y),
(random.randint(-20, 20), move), random.randint(900, 1200))
act_pos_map = self.get_all_act_pos(crop_flag)
if act_name in act_pos_map:
break
if last_acts == act_pos_map.keys():
raise RuntimeError(f'找不到相应活动: {act_name}')
last_acts = act_pos_map.keys()
logger.info(f'switch to {act_name}')
self.click(act_pos_map[act_name], 1)
self.tap_enter_activity()
def tap_back(self):
vw, vh = self.vw, self.vh
self.helper.tap_rect((2.222 * vh, 1.944 * vh, 22.361 * vh, 8.333 * vh))
time.sleep(0.5)
def get_all_act_pos(self, crop=False):
act_map = {}
screen = self.screenshot()
cv_screen = pil2cv(screen)
for icon in [icon1, icon2]:
act_map.update(self.get_act_pos_by_icon(cv_screen, icon, crop))
logger.info(act_map)
return act_map
def get_act_pos_by_icon(self, cv_screen, icon, crop=False):
vh, vw = self.vh, self.vw
raw_screen = cv_screen.copy()
if self.scale != 1:
cv_screen = cv2.resize(cv_screen, (int(self.helper.viewport[0] / self.scale), 720))
roi = crop_cv_by_rect(cv_screen, (0, 0, 10.000 * vh, 100.000 * vh))
roi = cv2.cvtColor(roi, cv2.COLOR_RGB2GRAY)
result = cv2.matchTemplate(roi, icon, cv2.TM_CCOEFF_NORMED)
loc = np.where(result >= 0.8)
tag_set = set()
tag_set2 = set()
res = {}
dbg_screen = raw_screen.copy()
available_activity = get_available_activity()
for pt in zip(*loc[::-1]):
pos_key = (pt[0] // 100, pt[1] // 100)
pos_key2 = (int(pt[0] / 100 + 0.5), int(pt[1] / 100 + 0.5))
if pos_key in tag_set or pos_key2 in tag_set2:
continue
tag_set.add(pos_key)
tag_set2.add(pos_key2)
if icon1 is icon:
x, y = (int(pt[0]) + 35, int(pt[1]) - 6)
tw, th = map(self.apply_scale, (180, 40))
else:
x, y = (int(pt[0]) + 35, int(pt[1]) - 3)
tw, th = map(self.apply_scale, (150, 30))
l, t = map(self.apply_scale, (x, y))
tag_img = raw_screen[t:t + th, l:l + tw]
if crop:
gray_tag = cv2.cvtColor(tag_img, cv2.COLOR_RGB2GRAY)
tag_img = crop_image_only_outside(gray_tag, tag_img, 160)
factor = 2.5 - self.scale
if factor > 1:
# print(factor)
tag_img = cv2.resize(tag_img, (0, 0), fx=factor, fy=factor, interpolation=cv2.INTER_LINEAR)
# show_img(tag_img)
# conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc.
name = ocr_and_correct(tag_img, available_activity, model_name='densenet-lite-fc', log_level=logging.INFO)
if name:
res[name] = (int(l + 85 * self.scale), int(t + 20 * self.scale))
cv2.rectangle(dbg_screen, (l, t), (l + tw, t + th), (255, 255, 0), 2)
# show_img(dbg_screen)
return res
def tap_side_story(self):
vh, vw = self.vh, self.vw
logger.info('open side story view')
self.helper.tap_rect((44.297 * vw, 88.611 * vh, 56.406 * vw, 98.750 * vh))
time.sleep(1)
def tap_branch_line(self):
logger.info('open branch line view')
vh, vw = self.vh, self.vw
self.helper.tap_rect((29.375 * vw, 88.611 * vh, 41.719 * vw, 98.750 * vh))
time.sleep(1)
def tap_enter_activity(self):
logger.info('enter activity')
vh, vw = self.vh, self.vw
self.helper.tap_rect((100 * vw - 24.583 * vh, 69.167 * vh, 100 * vw - 8.750 * vh, 75.556 * vh))
time.sleep(1)
def open_terminal(self):
self.helper.back_to_main()
logger.info('open terminal')
self.helper.tap_quadrilateral(imgreco.main.get_ballte_corners(self.screenshot()))
time.sleep(1)
if __name__ == '__main__':
StartSpStageAddon().run('CB-10', 0, False)
# StartSpStageAddon().get_all_act_pos()
| addons/start_sp_stage/__init__.py | 8,440 | print(factor) show_img(tag_img) conv-lite-fc has better accuracy, but it is slower than densenet-lite-fc. show_img(dbg_screen) StartSpStageAddon().get_all_act_pos() | 164 | en | 0.664941 |
import os
import sys
sys.path.append(os.path.dirname(__file__))
class AbstractSystemMeter:
"""Common system meter interface for all resource monitorings.
For each system resource to monitor, a wrapper class will be written as subclass of this one. This way we have
a common "interface" for all system resources to test.
This approach is choosen since python has no real interfaces like Java or C-Sharp.
"""
def __init__(self, resource_name):
self.resource_name = resource_name
def measure(self, func):
self._start()
func()
return self._stop()
def _start(self):
raise NotImplementedError("The method is not implemented yet.")
def _stop(self):
raise NotImplementedError("The method is not implemented yet.")
| measure/system/AbstractSystemMeter.py | 800 | Common system meter interface for all resource monitorings.
For each system resource to monitor, a wrapper class will be written as subclass of this one. This way we have
a common "interface" for all system resources to test.
This approach is choosen since python has no real interfaces like Java or C-Sharp. | 310 | en | 0.928172 |
#!/usr/bin/python
from __future__ import division
import sys
import math
import cmath
import numpy as np
from numpy import genfromtxt
import csv
from decimal import Decimal
import os
import random
from lyrics import *
# BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure
# A tribute to the Beatles
#
# Updated June 14, 2020 by Hassan Harb
#
# / | \
# / | \
# /O O | O O\
# //|\ /|\ /|\ /|\\
# /=/ \=/ \= / \=/ \=\
# / == == == == == \
# / == == == == == \
# (The original Beatles)
# (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles )
#
#########################################################################
#
# NBasGrab: reads in a name of .fchk file
# output: -Number of basis functions
# -Charge
# -Multiplicity
# -Number of Atoms
# -Cartesian Coordinates
# -Atomic Symbols
# -SCF Energy
# -Total Energy (needs to be added)
# Section 1: Reading from gaussian formatted checkpoint file
def NBasGrab(filename):
NBasis = 0
NElem = 0
SCFEnergy = 0.0
Charge = 0
Multiplicity = 0
NAtoms = 0
temp = 1
with open(filename, 'r') as origin:
for line in origin:
if "Number of basis functions" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NBasis = NBasis*10 + int(letter)
if "Charge " in line:
words = line.split()
for i in words:
for letter in i:
if(letter=="-"):
temp = -1
if(letter.isdigit()):
Charge = Charge*10 + int(letter)
Charge = Charge*temp
if "Multiplicity" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
Multiplicity = Multiplicity*10 + int(letter)
if "Number of atoms" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NAtoms = NAtoms*10 + int(letter)
if "SCF Energy" in line:
words = line.split()
# print "SCF Energy = ", words[3], " Hartree"
SCFEnergy = float(words[3])
# print "SCF Energy (float) = ", SCFEnergy
# if "Total Energy" in line:
# words = line.split()
# TotalEnergy = float(words[3])
# print "Total Energy = ", TotalEnergy, " Hartree"
NElem = NBasis*NBasis
# print "Number of Basis Functions (subroutine) = ", NBasis, "\n"
# print "Charge (subroutine) = ", Charge, "\n"
return NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy
# GeomGet: reads in the file name, number of atoms
# Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom
#
def GeomGet(filename,NAtoms):
p = 0
r = 0
n = 1
NElements = NAtoms * 3
RawCart = np.zeros(NElements)
if (NElements%5 == 0):
n = 0
RawCartLines = int(NElements/5) + n
# print "Raw Cart lines = ", RawCartLines
# print "Number of Atoms =", NAtoms
# print "Number of coordinates =", NElements
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "Current cartesian coordinates" in line:
i = i + 1
pointer = i
# print "Cartesian Coordinates starts at line :", pointer
endpointer = pointer + RawCartLines - 1
# print "Cartesian Coordinates ends at line :", endpointer
for m in range(0,endpointer - pointer +1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
RawCart[r] = nextline[p]
r = r + 1
p = 0
# print "Raw Cart (subroutine) = ", RawCart
RawCart = RawCart/1.88973
# print "Raw Cart (converted to Angstroms) = ", RawCart
return RawCart
# GetAtoms: Reads in file name, number of atoms
# output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms
#
def GetAtoms(filename1,NAtoms):
p = 0
r = 0
n = 1
AtomicNum = np.zeros(NAtoms)
if (NAtoms%6 ==0):
n = 0
AtomLines = int(NAtoms/6) + n
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Atomic numbers" in line:
i = i + 1
pointer = i
endpointer = pointer + AtomLines -1
for m in range(0, endpointer - pointer + 1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AtomicNum[r] = nextline[p]
r = r + 1
p = 0
return AtomicNum
# MatGrab: Reads in filename, NBasis, user-defined switch
# Output: -Alpha MO Coefficients (Done)
# -Beta MO Coefficients (Done)
# -Alpha Density Matrix (Done)
# -Beta Density Matrix (Done)
# -Alpha MO Energies (Done)
# -Beta MO Energies (Done)
#
# Switch: 1 = Alpha MO Coefficients
# -1 = Beta MO Coefficients
# 2 = Alpha and Beta Density Matrices
# 3 = Alpha MO Energies
# -3 = Beta MO Energies
#
def MatGrab(filename,NBasis,switch):
if (switch == 1):
filename1 = filename
MOElements = NBasis * NBasis
MOlines = int(MOElements/5) + 1
if (NBasis%5 == 0):
MOlines = MOlines - 1
p = 0
r = 0
AOE = 0
MOrawa = np.zeros(NBasis*NBasis)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i
if "Alpha MO coefficients" in line:
i=i+1
AMO=i
# print "Alpha MO coefficients starts at line :", i
j=i+MOlines-1
# print "Alpha MO coefficients ends at line :", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
MOrawa[r] = nextline[p]
r = r+1
p = 0
# print "MO Raw = ", MOrawa
return MOrawa
if (switch == -1):
filename1 = filename
MOElements = NBasis * NBasis
MOlines = int(MOElements/5) + 1
if (NBasis%5 == 0):
MOlines = MOlines - 1
p = 0
r = 0
BOE = 0
BMO = 0
MOrawb = np.zeros(NBasis*NBasis)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Beta Orbital Energies" in line:
BOE = i
if "Beta MO coefficients" in line:
i=i+1
BMO=i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
MOrawb[r] = nextline[p]
r = r+1
p = 0
# print "MO Raw = ", MOrawb
return MOrawb
if (switch == 2):
filename1 = filename
PElements = int(NBasis*(NBasis+1)/2)
Plines = int(PElements/5) + 1
TotalPraw = np.zeros(PElements)
SpinPraw = np.zeros(PElements)
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Total SCF Density" in line:
i=i+1
r = 0
p = 0
# print "Total SCF Density starts at line :", i
j=i+Plines-1
# print "Total SCF Density ends at line :", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(0,len(nextline)):
if (r != PElements):
TotalPraw[r] = nextline[p]
r = r+1
p = 0
# HH + : Bug ... :(
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Spin SCF Density" in line:
# print "Found Spin density!"
i=i+1
r = 0
p = 0
# print "Spin SCF Density starts at line: ", i
j=i+Plines-1
# print "Spin SCF Density ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
if (r != PElements):
SpinPraw[r] = nextline[p]
r = r+1
p = 0
# HH - : End of bug (hopefully!)
PalphaRaw = (np.add(TotalPraw,SpinPraw)) * 0.5
PbetaRaw = (np.subtract(TotalPraw,SpinPraw)) * 0.5
Palpha = symmetrize(PalphaRaw)
Pbeta = symmetrize(PbetaRaw)
return Palpha, Pbeta
if (switch == 3):
filename1 = filename
AlphaMO = np.zeros(NBasis)
AlphaMOlines = int(NBasis/5) + 1
if (NBasis % 5 == 0):
AlphaMOlines = AlphaMOlines - 1
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
i = i + 1
r = 0
p = 0
# print "Alpha MO Energies starts at line: ", i
j = i + AlphaMOlines - 1
# print "Alpha MO Energies ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AlphaMO[r] = nextline[p]
r = r + 1
p = 0
# print "Alpha MO energies = ", AlphaMO
return AlphaMO
if (switch == -3):
filename1 = filename
BetaMO = np.zeros(NBasis)
BetaMOlines = int(NBasis/5) + 1
if (NBasis % 5 == 0):
BetaMOlines = BetaMOlines - 1
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Beta Orbital Energies" in line:
i = i + 1
r = 0
p = 0
# print "Beta MO Energies starts at line: ", i
j = i + BetaMOlines - 1
# print "Beta MO Energies ends at line: ", j
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
BetaMO[r] = nextline[p]
r = r + 1
p = 0
# print "Beta MO energies = ", BetaMO
return BetaMO
# sci_notation: reads in a number
# output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py
#
def sci_notation(n):
a = '%.8f' % n
return '%.8f' % Decimal(n.real)
# fchk_notation: reads in a number
# output: prints the number in the desired notation for fchk files
#
def fchk_notation(n):
a = '%.8E' % n
return '%.8E' % Decimal(n.real)
# AtomicSymbol: Reads in atomic number of the element
# Output: -Atomic Symbol
#
def AtomicSymbol(AtomicNumber):
p = AtomicNumber - 1
PTlist = ['H','He','Li','Be','B','C','N','O','F','Ne','Na','Mg','Al','Si','P','S','Cl','Ar','K','Ca','Sc','Ti','V','Cr','Mn','Fe','Co','Ni','Cu','Zn','Ga','Ge','As','Se','Br','Kr','Rb','Sr','Y','Zr','Nb','Mo','Tc','Ru','Rh','Pd','Ah','Cd','In','Sn','Sb','Te','I','Xe','Cs','Ba','La','Ce','Pr','Nd','Pm','Sm','Eu','Gd','Tb','Dy','Ho','Er','Tm','Yb','Lu','Hf','Ta','W','Re','Os','Ir','Pt','Au','Hb','Tl','Pb','Bi','Po','At','Rn','Fr','Ra','Ac','Th','Pa','U','Np','Pu','Am','Cm','Bk','Cf','Es','Fm','Md','No','Lr','Rf','Db','Sg','Bh','Hs','Mt','Ds','Rg','Cn','Uut','Fl','Uup','Lv','Uus','Uuo']
# print "There are currently ", len(PTlist), " atoms defined"
return PTlist[p]
# Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix
# Output: -Matrix(NBasis,NBasis)
#
def symmetrize(a):
Nbas = int((np.sqrt(8*len(a)+1)-1)/2)
b = np.zeros((Nbas,Nbas))
n = 0
for i in range(0,Nbas):
for j in range(0,i+1):
b[i,j]=a[n]
b[j,i]=a[n]
n=n+1
return b
# Column2Square: Reads in a packed column matrix, number of basis functions.
# Output: -Matrix(NBasis,NBasis)
def column2square(A,NBasis):
C = np.zeros((NBasis,NBasis))
t=0
for i in range(0,NBasis):
for j in range(0,NBasis):
C[j,i]=float(A[t])
t=t+1
return C
# GetOverlap: Reads in packed column matrix, number of basis functions.
# Output: -Overlap Matrix (NBasis,NBasis)
def GetOverlap(A,NBasis):
C = column2square(A,NBasis)
CInv = np.linalg.inv(C)
S = np.dot(np.transpose(CInv),CInv)
return S
# PrintSI: Reads in filename, user-defined switch
# Output: -SCF Energy, Charge, Multiplicity, Geometry
#
# Switch: 1 = print to new file (filename1-SI.txt)
# -1 = print to screen
#
def PrintSI(filename1,switch):
NBasis, NElementsGrab, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename1)
AtomicNum = GetAtoms(filename1,NAtoms)
RawCart = GeomGet(filename1,NAtoms)
Cart = np.resize(RawCart,(NAtoms,3))
filename2 = os.path.splitext(filename1)[0] + "-SI.txt"
filename1 = os.path.splitext(filename1)[0]
if (switch == 1):
with open(filename2,'w') as f2:
f2.write("SI info for ")
f2.write(filename1)
f2.write("\n\n")
f2.write("SCF Energy = ")
f2.write(str(SCFEnergy))
f2.write(" Hartree")
f2.write("\n\n")
f2.write(str(Charge))
f2.write(" ")
f2.write(str(Multiplicity))
f2.write("\n")
for i in range(0,NAtoms):
h = i + 1
z = AtomicNum[i]
Atom = AtomicSymbol(int(z))
f2.write(Atom)
f2.write(" ")
for j in range(0,3):
if (Cart[i,j] >= 0):
f2.write(" ")
f2.write(str(sci_notation(Cart[i,j])))
f2.write(" ")
f2.write("\n")
f2.write(" ")
f2.write("\n\n")
return filename2
if (switch == -1):
print "SCF Energy = ", SCFEnergy, " Hartree\n"
print "Charge = ", Charge, "\n"
print "Multiplicity = ", Multiplicity, "\n"
print "Cartesian Geometry:\n"
for i in range(0,NAtoms):
h = i + 1
z = AtomicNum[i]
Atom = AtomicSymbol(int(z))
print Atom, sci_notation(Cart[i,0]), sci_notation(Cart[i,1]), sci_notation(Cart[i,2])
print "\n"
# CalcNO: Reads in filename, NBasis
# Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta)
#
def CalcNO(filename,NBasis):
Palpha, Pbeta = MatGrab(filename,NBasis,2)
C = MatGrab(filename,NBasis,1)
S = GetOverlap(C,NBasis)
Svals, Svecs = np.linalg.eig(S)
Sval_minhalf = (np.diag(Svals**(0.5)))
Shalf = np.dot(Svecs,np.dot(Sval_minhalf,np.transpose(Svecs)))
NOvalsA, NOvecsA = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Palpha)))
NOvalsB, NOvecsB = np.linalg.eig(np.dot(Shalf,np.dot(Shalf,Pbeta)))
NOvalsA = NOvalsA.real
NOvalsB = NOvalsB.real
NOvecsA = NOvecsA.real
NOvecsB = NOvecsB.real
NOvecsA = np.dot(np.linalg.inv(Shalf),NOvecsA)
NOvecsB = np.dot(np.linalg.inv(Shalf),NOvecsB)
return NOvecsA, NOvecsB, NOvalsA, NOvalsB
# NElec: Reads in filename
# Output: Total number of electrons, Alpha Electrons, Beta Electrons
#
def NElec(filename):
NElec = 0
NAlpha = 0
NBeta = 0
with open(filename, 'r') as origin:
for line in origin:
if "Number of electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NElec = NElec*10 + int(letter)
if "Number of alpha electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NAlpha = NAlpha*10 + int(letter)
if "Number of beta electrons" in line:
words = line.split()
for i in words:
for letter in i:
if(letter.isdigit()):
NBeta = NBeta*10 + int(letter)
return NElec, NAlpha, NBeta
# OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n
# Output: New Density Matrices: P' = S**(1-n).P.S**(n)
#
def OrbTransform(Pa,Pb,S,n):
Svals, Svecs = np.linalg.eig(S)
Sval1 = np.diag(Svals**(n))
Sval2 = np.diag(Svals**(1-n))
Sdag1 = np.dot(Svecs,np.dot(Sval1,np.transpose(Svecs)))
Sdag2 = np.dot(Svecs,np.dot(Sval2,np.transpose(Svecs)))
PdagAlpha = np.dot(Sdag1,np.dot(Pa,Sdag2))
PdagBeta = np.dot(Sdag1,np.dot(Pb,Sdag2))
# print "OrbTransform Subroutine test:\n"
# print "PdagAlpha = ", PdagAlpha, "\n"
# print "PdagBeta = ", PdagBeta, "\n"
OvalsA, OvecsA = np.linalg.eig(PdagAlpha)
OvalsB, OvecsB = np.linalg.eig(PdagBeta)
# print "OVals A = ", OvalsA, "\n"
# print "OVecs A = ", OvecsA, "\n"
# print "OVals B = ", OvalsB, "\n"
# print "OVecs B = ", OvecsB, "\n"
return PdagAlpha, PdagBeta, OvecsA, OvecsB, OvalsA, OvalsB
# CartoZmat: Transforms Cartesian coordinates to z-matrix form
# Input: NAtoms, RawCart, AtomicNum
# Output: z-matrix printed on the screen
#
# Note that there are three other functions here, Dist, Angle, and Torsion.
# They are used to calculate the appropriate parameters for the z-matrix
# switch = 1 : print z-matrix to screen
# switch = -1 : print z-matrix to new textfile
def DistAB(e1,e2):
R = 0.0
for i in range(len(e1)):
R = R + (e1[i]-e2[i])**(2)
R = R**(0.5)
return R
def AngleABC(e1,e2,e3):
eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)
eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)
eab_z = (e2[2] - e1[2]) / DistAB(e1,e2)
ebc_x = - (e3[0] - e2[0]) / DistAB(e2,e3)
ebc_y = - (e3[1] - e2[1]) / DistAB(e2,e3)
ebc_z = - (e3[2] - e2[2]) / DistAB(e2,e3)
eab = [eab_x, eab_y, eab_z]
ebc = [ebc_x, ebc_y, ebc_z]
cos_angle = np.dot(eab,ebc)
angle = np.arccos(cos_angle) / 3.1415926535 * 180
return eab, ebc, angle
def TorsionABCD(e1,e2,e3,e4):
eab_x = (e2[0] - e1[0]) / DistAB(e1,e2)
eab_y = (e2[1] - e1[1]) / DistAB(e1,e2)
eab_z = (e2[2] - e1[2]) / DistAB(e1,e2)
ebc_x = (e3[0] - e2[0]) / DistAB(e2,e3)
ebc_y = (e3[1] - e2[1]) / DistAB(e2,e3)
ebc_z = (e3[2] - e2[2]) / DistAB(e2,e3)
ecd_x = (e4[0] - e3[0]) / DistAB(e3,e4)
ecd_y = (e4[1] - e3[1]) / DistAB(e3,e4)
ecd_z = (e4[2] - e3[2]) / DistAB(e3,e4)
eab = [eab_x, eab_y, eab_z]
ebc = [ebc_x, ebc_y, ebc_z]
ecd = [ecd_x, ecd_y, ecd_z]
n1 = np.cross(eab,ebc) / (np.linalg.norm(np.cross(eab,ebc)))
n2 = np.cross(ebc,ecd) / (np.linalg.norm(np.cross(ebc,ecd)))
u1 = n2
u3 = ebc/np.linalg.norm(ebc)
u2 = np.cross(u3,u1)
cos_angle = np.dot(n1,n2)
sin_angle = np.dot(n1,u2)
angle = -math.atan2(sin_angle,cos_angle) / 3.1415926535 * 180
return angle
def CartoZmat(RawCart,NAtoms,AtomicNum,filename2,switch):
if (switch == 1):
Cart = np.resize(RawCart,(NAtoms,3))
# print "Cartesian = ", Cart
# print "Atoms list = ", AtomicNum
for i in range(len(AtomicNum)):
Symbol = AtomicSymbol(int(AtomicNum[i]))
if (i > 2):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
D = TorsionABCD(e4,e1,e2,e3)
print Symbol, 1 , R , 2, A , 3, D
elif (i > 1):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
print Symbol, 1 , R , 2, A
elif (i > 0):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
print Symbol, 1, R
elif (i == 0):
print Symbol
elif (switch == -1):
Cart = np.resize(RawCart,(NAtoms,3))
#open new file
filename = os.path.splitext(filename2)[0] + "-zmat.txt"
with open(filename,'w') as f2:
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename2)
f2.write("Z-Matrix file for ")
f2.write(filename2)
f2.write("\n\n")
f2.write(str(Charge))
f2.write(" ")
f2.write(str(Multiplicity))
f2.write("\n")
for i in range(len(AtomicNum)):
Symbol = AtomicSymbol(int(AtomicNum[i]))
if (i > 2):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e3 = [Cart[2,0],Cart[2,1],Cart[2,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
D = TorsionABCD(e4,e1,e2,e3)
f2.write(Symbol)
f2.write(" 1 ")
f2.write(str(R))
f2.write(" 2 ")
f2.write( str(A))
f2.write(" 3 ")
f2.write(str(D))
f2.write("\n")
elif (i > 1):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e2 = [Cart[1,0],Cart[1,1],Cart[1,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
eab, ebc, A = AngleABC(e2,e1,e4)
f2.write(str(Symbol))
f2.write(" 1 ")
f2.write (str(R))
f2.write(" 2 ")
f2.write(str(A))
f2.write("\n")
elif (i > 0):
e4 = [Cart[i,0],Cart[i,1],Cart[i,2]]
e1 = [Cart[0,0],Cart[0,1],Cart[0,2]]
R = DistAB(e4,e1)
f2.write(Symbol)
f2.write(" 1 ")
f2.write(str(R))
f2.write("\n")
elif (i == 0):
f2.write(Symbol)
f2.write("\n")
# print "test test"
# Section 2: Reading from gaussian matrix files
# MatGrab2: Reads in matrices from gaussian matrix file
#
# Switch: 1 : Alpha Core Hamiltonian
# -1 : Beta Core Hamiltonian
# 2 : Alpha Fock Matrix
# -2 : Beta Fock Matrix
# 3 : Dipole matrix elements (x,y,z) [IN PROGRESS]
def MatGrab2(filename,NBasis,switch):
print "Reading from Matrix file\n"
if (switch == 1):
print "Reading Alpha Core Hamiltonian Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the core hamilonian\n"
CoreHRawa = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "CORE HAMILTONIAN ALPHA" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading Core Hamolitonian"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
CoreHRawa[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return CoreHRawa
if (switch == -1):
print "Reading Beta Core Hamiltonian Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the core hamilonian\n"
CoreHRawb = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "CORE HAMILTONIAN BETA" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading Core Hamolitonian"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
CoreHRawb[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return CoreHRawb
if (switch == 2):
print "Reading Alpha Fock Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the fock matrix\n"
FockRawA = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "ALPHA FOCK MATRIX" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading fock matrix"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
FockRawA[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return FockRawA
if (switch == -2):
print "Reading Beta Fock Matrix:\n"
NElements = int(NBasis*(NBasis + 1)/2)
print "Looking for ", NElements, " elements of the fock matrix\n"
FockRawB = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "BETA FOCK MATRIX" in line :
while (p < (NElements)):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done Reading fock matrix"
j = i+3
i = i + 4
end = j + NLines - 1
nextline = origin.next()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
FockRawB[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
return FockRawB
if (switch == 3):
# print "Reading Dipole integrals, matrix x\n"
NElements = int(NBasis*(NBasis +1)/2)
# print "Looking for ", NElements, " elements of the Dipole integrals matrix x\n"
DipX_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 1" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
# print "Done reading Dipole X matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipX_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
# print "Dip X raw = ", DipX_Raw
# print "Reading Dipole integrals, matrix y\n"
NElements = int(NBasis*(NBasis +1)/2)
print "Looking for ", NElements, " elements of the Dipole integrals matrix y\n"
DipY_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 2" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
# print "Done reading Dipole Y matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipY_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
# print "Dip Y raw = ", DipY_Raw
# print "Looking for ", NElements, " elements of the Dipole integrals matrix z\n"
DipZ_Raw = np.zeros(NElements)
p = 0
n = 0
r = 0
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if " DIPOLE INTEGRALS, matrix 3" in line:
while (p < NElements):
NLines = NBasis - 5*r
if (NLines < 0):
print "Done reading Dipole Z matrix\n"
j = i+3
i = i + 4
end = j + NLines -1
nextline = origin.next()
words = nextline.split()
for m in range(i,i+NLines):
nextline = origin.next()
words = nextline.split()
for j in range(1,len(words)):
DipZ_Raw[p] = float(words[j].replace('D','E'))
p = p + 1
r = r + 1
i = m - 2
# print "Dip Z raw = ", DipZ_Raw
return symmetrizeMat(DipX_Raw), symmetrizeMat(DipY_Raw), symmetrizeMat(DipZ_Raw)
# SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix
# Input: Packed lower triangular A
# Output: N x N Matrix
def symmetrizeMat(a):
NBasis = int((np.sqrt(8*len(a)+1)-1)/2)
NewMat = np.zeros((NBasis,NBasis))
NElements = len(a)
t = 0
l = 0
start = 0
loop = NBasis
nBlock = int(NBasis/5)
nRem = NBasis%5
# print "nBlock = ", nBlock
# print "nRem = ", nRem
i = start
j = start
if (nBlock == 0):
nBlock =1
while (l < nBlock):
# print "retrieving block ", l
for i in range (start,loop):
for j in range(start,start+5):
if (j<=i):
# print "i,j = ",i,j
NewMat[i,j] = a[t]
NewMat[j,i] = a[t]
# print "A[t]= ", a[t]
t = t + 1
start = start + 5
l = l + 1
# print "t = ", t
# print "values of i and j after nBlock loop is over: ", i, j
j = j + 1
start = j
# print "NBasis - nRem = ", NBasis -nRem
i = NBasis - nRem
while (i < NBasis):
j = start
while (j <= i):
# print "i,j = ",i,j
NewMat[i,j] = a[t]
NewMat[j,i] = a[t]
# print "A[t]= ", a[t]
t = t + 1
j = j + 1
i = i + 1
# print "final value of t = ", t
return NewMat
# ERIRead: reads in regular 2e integrals from formatted matrix file
# Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals)
# Input: matrix filename
# Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value
#
# Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d)
def swap(a,b):
return b,a
def Fourindex(a,b,c,d):
a = int(a)
b = int(b)
c = int(c)
d = int(d)
if (a < b):
a, b = swap(a,b)
if (c < d):
c, d = swap(c,d)
e = int(a*(a+1)/2 + b)
f = int(c*(c+1)/2 + d)
if (e<f):
e,f = swap(e,f)
g = e*(e +1)/2 + f
return int(g)
def ERIRead(filename,NBasis):
NElements = 0
p = 0
print "Reading ERIs from Gaussian Matrix File"
print "Subroutine can only read regular 2e integrals (NO RAFINETTI)"
with open(filename,'r') as origin:
for i, line in enumerate(origin):
if "Label REGULAR 2E INTEGRALS" in line:
print "Found 2e integrals!"
words = line.split()
print "Total number of elements = ", words[9]
NElements = int(words[9])
print "NElements = ", NElements
eri_raw = np.zeros((NElements,5))
while (p < NElements):
nextline = origin.next()
words = nextline.split()
eri_raw[p,0] = words[1]
eri_raw[p,1] = words[3]
eri_raw[p,2] = words[5]
eri_raw[p,3] = words[7]
eri_raw[p,4] = float(words[9].replace('D','E'))
# print "(",int(eri_raw[p,0]),int(eri_raw[p,1]),"|",int(eri_raw[p,2]),int(eri_raw[p,3]),") = ", eri_raw[p,4]
p = p + 1
# print "ERI RAW = ", eri_raw
NTotal = Fourindex(NBasis,NBasis,NBasis,NBasis) + 1
eri_array = np.zeros(NTotal)
eri_compact = np.zeros((NElements,2))
print "Total length of sparse 1D vector =", NTotal
print "Now forming compound indices"
for i in range(0,NElements):
eri_compact[i,0] = Fourindex(eri_raw[i,0], eri_raw[i,1], eri_raw[i,2], eri_raw[i,3])
eri_compact[i,1] = eri_raw[i,4]
eri_array[int(eri_compact[i,0])] = eri_compact[i,1]
# print "mu nu lambda sigma = ", int(eri_compact[i,0]), ", int = ", eri_compact[i,1], "One D array Value =", eri_array[eri_compact[i,0]]
return eri_array
# OVParse breaks down the MO coefficient matrix (NBasis x NBasis) into an occupied (NBasis x NOcc) and a virtual (NBasis x (Nbasis-NOcc)) matrices
# Input: A: MO Coefficient (NBasis x NBasis)
# NBasis
# NOcc = number of electrons
#
# Output: A_Occ: rectangular NBasis x NOcc matrix: Columns of occupied MOs
# A_Virt: rectangular NBasis x (NBasis - NOcc) matrix: Columns of virtual MOs
## Note TO SELF: Needs to be tested more, was only tested on H2 and V jobs.
def OVParse(A,NBasis,NOcc):
A_Occ = np.zeros((NBasis,NOcc))
A_Virt = np.zeros((NBasis,NBasis-NOcc))
for i in range(0,NOcc):
A_Occ[:,i] = A[:,i]
for j in range(0,NBasis-NOcc):
A_Virt[:,j] = A[:,j+NOcc]
return A_Occ, A_Virt
# Biorthog: Calculates the overlap between two sets of MO Coefficients, prints out the final value of the overlap
# Input: A, B: MO Coefficients, can either be full or parsed (using OVParse subroutine)
# S: AO overlap matrix
#
# Output: the final value of the overlap
#
# Option: switch: 1 : print all relevant matrices
# -1 : Dont print any matrices
#
def Biorthog(A,B,S,switch): # eqn numbers based on personal notes
D = np.dot(np.transpose(B),np.dot(S,A)) # eq. 1
u, d, v = np.linalg.svd(D,full_matrices=True) # eq. 2
DtD = np.dot(np.transpose(D),D)
l, V = np.linalg.eig(DtD)
U = np.dot(D,V)
if (switch==1):
print "D = ", D
print "DtD = ", DtD
print "lambdas = ", l
print "Eig Vecs of DtD = ", V
print "Determinants = ", np.linalg.det(u), np.linalg.det(v)
print "u = ", u
print "v = ", v
overlap = np.linalg.det(u)*np.prod(d)*np.linalg.det(v)
return d, u, v, D
# PickColumn: Subroutine that selects a specific column from a two dimensional matrix (NBasis,NBasis), outputs an array (NBasis,1)
# Input: A: Two dimensional matrix
# NBasis: Number of basis functions for A
# i: the position of the column to be selected
#
# Output: One dimensional array (NBasis,1) that is the i-th column of matrix A
#
def PickColumn(A,NBasis,i):
A_Column = np.zeros((NBasis,1))
for j in range(0,NBasis):
A_Column[j,0] = A[j,i]
return A_Column
# WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file
# Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions
#
# Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies
#
def WriteMOs(filename1,filename3,V1,V2,e1,e2,NBasis):
MOlines = int(len(V1)/5) + 1
p = 0
r = 0
AOE = 0
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i
if "Alpha MO coefficients" in line:
i=i+1
AMO=i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
r = r+1
p = 0
if "Beta Orbital Energies" in line:
BOE = i
if "Beta MO coefficients" in line:
r = 0
i=i+1
BMO = i
j=i+MOlines-1
for m in range(0,j-i+1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
r = r+1
p = 0
pointer=0
counter=1
with open(filename1,'r') as origin:
data = origin.readlines()
if "Alpha Orbital Energies" in line:
AOE = i
BOE = AOE + int(NBasis/5) + 1
with open(filename3,'w') as f2:
print "Writing results to new output file: ", filename3, " ... "
while (pointer < AOE+1):
f2.write(data[pointer])
pointer = pointer+1
for j in range(0,NBasis):
f2.write(" ")
if (e1[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e1[j].real)))
if (counter%5 == 0):
f2.write("\n")
counter=0
counter=counter+1
counter =1
BOE = AOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BOE = BOE - 1
f2.write(data[BOE])
for j in range(0,NBasis):
f2.write(" ")
if (e2[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e2[j].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter+1
counter =1
AMO = BOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
AMO = AMO - 1
f2.write(data[AMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V1[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V1[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
BMO = AMO + (int(NBasis*NBasis/5))+2
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BMO = BMO - 1
f2.write(data[BMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V2[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V2[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
if (NBasis%5 != 0):
f2.write("\n")
pointer = BMO + (int(NBasis*NBasis/5))+2
while (pointer < len(data)):
f2.write(data[pointer])
pointer = pointer+1
print "Done."
# OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix
# Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis
#
# Output : V = Full MO Coefficient Matrix
#
# (this subroutine has the exact opposite functionality of OVParse)
#
def OVMerge(A,B,NOcc,NBasis):
V = np.zeros((NBasis,NBasis))
for i in range(0,NOcc):
V[:,i] = A[:,i]
for j in range(NOcc,NBasis):
V[:,j] = B[:,j-NOcc]
return V
# DistanceMatrix: Calculates distances between all atoms in a molecule
# Input : fchk file name
#
# Output : Returns Distance Matrix and Atomic Symbol array.
#
# Unfinished part: generate and return a distance matrix (NAtoms x NAtoms)
#
def DistanceMatrix(filename):
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)
Atomic_Numbers = GetAtoms(filename,NAtoms)
Atomic_Symbol = [""]*NAtoms
for i in range(0,NAtoms):
Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))
RawCart = GeomGet(filename,NAtoms)
Cart = np.resize(RawCart,(NAtoms,3))
Distance_Matrix = np.zeros((NAtoms,NAtoms))
for i in range(0,NAtoms):
for j in range(i+1,NAtoms):
e2 = [Cart[j,0],Cart[j,1],Cart[j,2]]
e1 = [Cart[i,0],Cart[i,1],Cart[i,2]]
Distance_Matrix[i,j] = np.around(DistAB(e1,e2),decimals=2)
Distance_Matrix[j,i] = np.around(DistAB(e1,e2),decimals=2)
return Distance_Matrix, Atomic_Symbol
# PrintLyrics: A Function made just for fun, prints out a random quote from the Beatles songs
# Input: None, but reads in the lyrics.py library file (partially complete)
#
# Output: None, prints lyrics.
#
def PrintLyrics():
n = random.randint(1,32)
LyricsLibrary(n)
# GetAtomicWeights: Grabs the "real atomic weights" from the fchk file
# Input: filename, Number of Atoms
#
# Output: One dimensional array, AtomicWeight, of dimensions NAtoms.
#
def GetAtomicWeights(filename1,NAtoms):
p = 0
r = 0
n = 1
AtomicWeight = np.zeros(NAtoms)
if (NAtoms%5 ==0):
n = 0
AtomLines = int(NAtoms/5) + n
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Real atomic weights" in line:
i = i + 1
pointer = i
endpointer = pointer + AtomLines -1
for m in range(0, endpointer - pointer + 1):
nextline = origin.next()
nextline = nextline.split()
for p in range(p,len(nextline)):
AtomicWeight[r] = nextline[p]
r = r + 1
p = 0
AtomicWeight = np.around(AtomicWeight,decimals=3)
return AtomicWeight
# WriteMOsQChem: Subroutine that replaces the MO coefficients and orbital energies in a fchk file (QChem Version)
# Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions
#
# Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies
#
def WriteMOsQChem(filename1,filename3,V1,V2,e1,e2,NBasis):
MOlines = int(len(V1)/5) + 1
p = 0
r = 0
with open(filename1,'r') as origin:
for i, line in enumerate(origin):
if "Alpha Orbital Energies" in line:
AOE = i+1
AOE_header = line
if "Alpha MO coefficients" in line:
AMO = i+1
AMO_header = line
if "Beta Orbital Energies" in line:
BOE = i+1
BOE_header = line
if "Beta MO coefficients" in line:
BMO = i+1
BMO_header = line
pointer=0
counter=1
Start_point = min(AMO,BMO,AOE,BOE)
with open(filename1,'r') as origin:
data = origin.readlines()
with open(filename3,'w') as f2:
print "Writing results to new output file: ", filename3, " ... "
while (pointer < Start_point-1):
f2.write(data[pointer])
pointer = pointer+1
print "pointer at line = ", pointer
f2.write(AOE_header)
for j in range(0,NBasis):
f2.write(" ")
if (e1[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e1[j].real)))
if (counter%5 == 0):
f2.write("\n")
counter=0
counter=counter+1
counter =1
BOE = AOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BOE = BOE - 1
f2.write(BOE_header)
# f2.write("Beta Orbital Energies\n")
for j in range(0,NBasis):
f2.write(" ")
if (e2[j] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(e2[j].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter+1
counter =1
AMO = BOE + (int(NBasis/5)+2)
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
AMO = AMO - 1
# f2.write("Alpha MO coefficients\n")
f2.write(AMO_header)
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V1[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V1[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
BMO = AMO + (int(NBasis*NBasis/5))+2
if (NBasis%5 != 0):
f2.write("\n")
if (NBasis%5 == 0):
BMO = BMO - 1
# f2.write("Beta MO Coefficients\n")
f2.write(BMO_header)
# f2.write(data[BMO])
for i in range(0,NBasis):
for j in range(0,NBasis):
f2.write(" ")
if (V2[j,i] >= 0):
f2.write(" ")
f2.write(str(fchk_notation(V2[j,i].real)))
if (counter%5 ==0):
f2.write("\n")
counter=0
counter = counter + 1
counter = 1
if (NBasis%5 != 0):
f2.write("\n")
pointer = BMO + (int(NBasis*NBasis/5))+2
# while (pointer < len(data)):
# f2.write(data[pointer])
# pointer = pointer+1
print "Done."
# ContractMat: Subroutine that reads in two square matrices (NBasis x NBasis) and returns their contraction (scalar)
# Input: Matrices A and B (dimensions: NBasis x NBasis), NBasis
#
# Output: scalar m = Sum_(mu,nu) A_(mu,nu) * B_(mu,nu)
#
def ContractMat(A,B,NBasis):
value = 0.0
for i in range(0,NBasis):
for j in range(0,NBasis):
value = value + A[i,j]*B[i,j]
return value
# Work in progress: Basis set reader:
def ReadBasisSet(filename):
NBasis, NElem, Charge, Multiplicity, NAtoms, SCFEnergy = NBasGrab(filename)
print "Number of Basis functions =", NBasis
print "Number of atoms =", NAtoms
Atomic_Numbers = GetAtoms(filename,NAtoms)
print "Atomic Numbers =", Atomic_Numbers
Atomic_Symbol = [""]*NAtoms
for i in range(0,NAtoms):
Atomic_Symbol[i] = AtomicSymbol(int(Atomic_Numbers[i]))
print "Atomic Symbols =", Atomic_Symbol
| BEATLES.py | 49,185 | !/usr/bin/python BEATLES: Bundle of Essential and Assistive Tools Library for Electronic Structure A tribute to the Beatles Updated June 14, 2020 by Hassan Harb / | \ / | \ /O O | O O\ //|\ /|\ /|\ /|\\ /=/ \=/ \= / \=/ \=\ / == == == == == \ / == == == == == \ (The original Beatles) (ASCII retrieved from https://www.asciiart.eu/music/musicians/beatles ) NBasGrab: reads in a name of .fchk file output: -Number of basis functions -Charge -Multiplicity -Number of Atoms -Cartesian Coordinates -Atomic Symbols -SCF Energy -Total Energy (needs to be added) Section 1: Reading from gaussian formatted checkpoint file print "SCF Energy = ", words[3], " Hartree" print "SCF Energy (float) = ", SCFEnergy if "Total Energy" in line: words = line.split() TotalEnergy = float(words[3]) print "Total Energy = ", TotalEnergy, " Hartree" print "Number of Basis Functions (subroutine) = ", NBasis, "\n" print "Charge (subroutine) = ", Charge, "\n" GeomGet: reads in the file name, number of atoms Output: -One dimensional vector (NAtoms * 3) that includes the cartesian coordinates of each atom print "Raw Cart lines = ", RawCartLines print "Number of Atoms =", NAtoms print "Number of coordinates =", NElements print "Cartesian Coordinates starts at line :", pointer print "Cartesian Coordinates ends at line :", endpointer print "Raw Cart (subroutine) = ", RawCart print "Raw Cart (converted to Angstroms) = ", RawCart GetAtoms: Reads in file name, number of atoms output: -One dimensional vector (NAtoms) that contains the atomic numbers of the atoms MatGrab: Reads in filename, NBasis, user-defined switch Output: -Alpha MO Coefficients (Done) -Beta MO Coefficients (Done) -Alpha Density Matrix (Done) -Beta Density Matrix (Done) -Alpha MO Energies (Done) -Beta MO Energies (Done) Switch: 1 = Alpha MO Coefficients -1 = Beta MO Coefficients 2 = Alpha and Beta Density Matrices 3 = Alpha MO Energies -3 = Beta MO Energies print "Alpha MO coefficients starts at line :", i print "Alpha MO coefficients ends at line :", j print "MO Raw = ", MOrawa print "MO Raw = ", MOrawb print "Total SCF Density starts at line :", i print "Total SCF Density ends at line :", j HH + : Bug ... :( print "Found Spin density!" print "Spin SCF Density starts at line: ", i print "Spin SCF Density ends at line: ", j HH - : End of bug (hopefully!) print "Alpha MO Energies starts at line: ", i print "Alpha MO Energies ends at line: ", j print "Alpha MO energies = ", AlphaMO print "Beta MO Energies starts at line: ", i print "Beta MO Energies ends at line: ", j print "Beta MO energies = ", BetaMO sci_notation: reads in a number output: prints the number in the desired scientific notation. note that this function has a different output than the one found in nio.py fchk_notation: reads in a number output: prints the number in the desired notation for fchk files AtomicSymbol: Reads in atomic number of the element Output: -Atomic Symbol print "There are currently ", len(PTlist), " atoms defined" Symmetrize: Reads in a packed symmetric column matrix into NBasis x NBasis square matrix Output: -Matrix(NBasis,NBasis) Column2Square: Reads in a packed column matrix, number of basis functions. Output: -Matrix(NBasis,NBasis) GetOverlap: Reads in packed column matrix, number of basis functions. Output: -Overlap Matrix (NBasis,NBasis) PrintSI: Reads in filename, user-defined switch Output: -SCF Energy, Charge, Multiplicity, Geometry Switch: 1 = print to new file (filename1-SI.txt) -1 = print to screen CalcNO: Reads in filename, NBasis Output: Natural Orbitals eigenvalues and eigenvectors (both alpha and beta) NElec: Reads in filename Output: Total number of electrons, Alpha Electrons, Beta Electrons OrbTransform: Reads in Alpha Density Matrix, Beta Density Matrix, Overlap Matrix, n Output: New Density Matrices: P' = S**(1-n).P.S**(n) print "OrbTransform Subroutine test:\n" print "PdagAlpha = ", PdagAlpha, "\n" print "PdagBeta = ", PdagBeta, "\n" print "OVals A = ", OvalsA, "\n" print "OVecs A = ", OvecsA, "\n" print "OVals B = ", OvalsB, "\n" print "OVecs B = ", OvecsB, "\n" CartoZmat: Transforms Cartesian coordinates to z-matrix form Input: NAtoms, RawCart, AtomicNum Output: z-matrix printed on the screen Note that there are three other functions here, Dist, Angle, and Torsion. They are used to calculate the appropriate parameters for the z-matrix switch = 1 : print z-matrix to screen switch = -1 : print z-matrix to new textfile print "Cartesian = ", Cart print "Atoms list = ", AtomicNumopen new file print "test test" Section 2: Reading from gaussian matrix files MatGrab2: Reads in matrices from gaussian matrix file Switch: 1 : Alpha Core Hamiltonian -1 : Beta Core Hamiltonian 2 : Alpha Fock Matrix -2 : Beta Fock Matrix 3 : Dipole matrix elements (x,y,z) [IN PROGRESS] print "Reading Dipole integrals, matrix x\n" print "Looking for ", NElements, " elements of the Dipole integrals matrix x\n" print "Done reading Dipole X matrix\n" print "Dip X raw = ", DipX_Raw print "Reading Dipole integrals, matrix y\n" print "Done reading Dipole Y matrix\n" print "Dip Y raw = ", DipY_Raw print "Looking for ", NElements, " elements of the Dipole integrals matrix z\n" print "Dip Z raw = ", DipZ_Raw SymmetrizeMat: Reads in packed matrix (recovered from Matrix file) and prints out NBasis x NBasis matrix Input: Packed lower triangular A Output: N x N Matrix print "nBlock = ", nBlock print "nRem = ", nRem print "retrieving block ", l print "i,j = ",i,j print "A[t]= ", a[t] print "t = ", t print "values of i and j after nBlock loop is over: ", i, j print "NBasis - nRem = ", NBasis -nRem print "i,j = ",i,j print "A[t]= ", a[t] print "final value of t = ", t ERIRead: reads in regular 2e integrals from formatted matrix file Note that to get these integrals, use SCF=Conventional and int=NoRaff (saves integrals to disk and prints out regular 2e integrals) Input: matrix filename Output: 2D Matrix, two columns: Column 1 = compound index, Column 2 = integral value Two small functions are defined here: swap(a,b) and Fourindex(a,b,c,d) print "(",int(eri_raw[p,0]),int(eri_raw[p,1]),"|",int(eri_raw[p,2]),int(eri_raw[p,3]),") = ", eri_raw[p,4] print "ERI RAW = ", eri_raw print "mu nu lambda sigma = ", int(eri_compact[i,0]), ", int = ", eri_compact[i,1], "One D array Value =", eri_array[eri_compact[i,0]] OVParse breaks down the MO coefficient matrix (NBasis x NBasis) into an occupied (NBasis x NOcc) and a virtual (NBasis x (Nbasis-NOcc)) matrices Input: A: MO Coefficient (NBasis x NBasis) NBasis NOcc = number of electrons Output: A_Occ: rectangular NBasis x NOcc matrix: Columns of occupied MOs A_Virt: rectangular NBasis x (NBasis - NOcc) matrix: Columns of virtual MOs Note TO SELF: Needs to be tested more, was only tested on H2 and V jobs. Biorthog: Calculates the overlap between two sets of MO Coefficients, prints out the final value of the overlap Input: A, B: MO Coefficients, can either be full or parsed (using OVParse subroutine) S: AO overlap matrix Output: the final value of the overlap Option: switch: 1 : print all relevant matrices -1 : Dont print any matrices eqn numbers based on personal notes eq. 1 eq. 2 PickColumn: Subroutine that selects a specific column from a two dimensional matrix (NBasis,NBasis), outputs an array (NBasis,1) Input: A: Two dimensional matrix NBasis: Number of basis functions for A i: the position of the column to be selected Output: One dimensional array (NBasis,1) that is the i-th column of matrix A WriteMOs: Subroutine that replaces the MO coefficients and orbital energies in a fchk file Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies OVMerge: Does the opposite of OVParse, merges back the Occ and Virt components of the MO Coefficient matrix Input : A (Occ Matrix), B(Vir Matrix), Number of occupied orbitals, NBasis Output : V = Full MO Coefficient Matrix (this subroutine has the exact opposite functionality of OVParse) DistanceMatrix: Calculates distances between all atoms in a molecule Input : fchk file name Output : Returns Distance Matrix and Atomic Symbol array. Unfinished part: generate and return a distance matrix (NAtoms x NAtoms) PrintLyrics: A Function made just for fun, prints out a random quote from the Beatles songs Input: None, but reads in the lyrics.py library file (partially complete) Output: None, prints lyrics. GetAtomicWeights: Grabs the "real atomic weights" from the fchk file Input: filename, Number of Atoms Output: One dimensional array, AtomicWeight, of dimensions NAtoms. WriteMOsQChem: Subroutine that replaces the MO coefficients and orbital energies in a fchk file (QChem Version) Input: Input filename, output filename, Orbital coefficient alpha, orbital coefficient beta, Orbtial energies alpha, orbital energies beta, number of basis functions Output: None. New file will be generated (filename3) that has the new Orbital coefficients and energies f2.write("Beta Orbital Energies\n") f2.write("Alpha MO coefficients\n") f2.write("Beta MO Coefficients\n") f2.write(data[BMO]) while (pointer < len(data)): f2.write(data[pointer]) pointer = pointer+1 ContractMat: Subroutine that reads in two square matrices (NBasis x NBasis) and returns their contraction (scalar) Input: Matrices A and B (dimensions: NBasis x NBasis), NBasis Output: scalar m = Sum_(mu,nu) A_(mu,nu) * B_(mu,nu) Work in progress: Basis set reader: | 10,618 | en | 0.749958 |
"""Utility functions related to file operations."""
import copy
import logging
import os
import subprocess
import sys
from argparse import Namespace
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from tempfile import NamedTemporaryFile
from typing import TYPE_CHECKING, Any, Dict, Iterator, List, Optional, Set, Union
# import wcmatch
import wcmatch.pathlib
from wcmatch.wcmatch import RECURSIVE, WcMatch
from ansiblelint.config import BASE_KINDS, options
from ansiblelint.constants import FileType
if TYPE_CHECKING:
# https://github.com/PyCQA/pylint/issues/3979
BasePathLike = os.PathLike[Any] # pylint: disable=unsubscriptable-object
else:
BasePathLike = os.PathLike
_logger = logging.getLogger(__package__)
def normpath(path: Union[str, BasePathLike]) -> str:
"""
Normalize a path in order to provide a more consistent output.
Currently it generates a relative path but in the future we may want to
make this user configurable.
"""
# conversion to string in order to allow receiving non string objects
relpath = os.path.relpath(str(path))
abspath = os.path.abspath(str(path))
# we avoid returning relative paths that endup at root level
if abspath in relpath:
return abspath
return relpath
@contextmanager
def cwd(path: Union[str, BasePathLike]) -> Iterator[None]:
"""Context manager for temporary changing current working directory."""
old_pwd = os.getcwd()
os.chdir(path)
try:
yield
finally:
os.chdir(old_pwd)
def expand_path_vars(path: str) -> str:
"""Expand the environment or ~ variables in a path string."""
# It may be possible for function to be called with a Path object
path = str(path).strip()
path = os.path.expanduser(path)
path = os.path.expandvars(path)
return path
def expand_paths_vars(paths: List[str]) -> List[str]:
"""Expand the environment or ~ variables in a list."""
paths = [expand_path_vars(p) for p in paths]
return paths
def kind_from_path(path: Path, base: bool = False) -> FileType:
"""Determine the file kind based on its name.
When called with base=True, it will return the base file type instead
of the explicit one. That is expected to return 'yaml' for any yaml files.
"""
# pathlib.Path.match patterns are very limited, they do not support *a*.yml
# glob.glob supports **/foo.yml but not multiple extensions
pathex = wcmatch.pathlib.PurePath(path.absolute().resolve())
kinds = options.kinds if not base else BASE_KINDS
for entry in kinds:
for k, v in entry.items():
if pathex.globmatch(
v,
flags=(
wcmatch.pathlib.GLOBSTAR
| wcmatch.pathlib.BRACE
| wcmatch.pathlib.DOTGLOB
),
):
return str(k) # type: ignore
if base:
# Unknown base file type is default
return ""
if path.is_dir():
return "role"
if str(path) == '/dev/stdin':
return "playbook"
# Unknown file types report a empty string (evaluated as False)
return ""
class Lintable:
"""Defines a file/folder that can be linted.
Providing file content when creating the object allow creation of in-memory
instances that do not need files to be present on disk.
"""
def __init__(
self,
name: Union[str, Path],
content: Optional[str] = None,
kind: Optional[FileType] = None,
):
"""Create a Lintable instance."""
# Filename is effective file on disk, for stdin is a namedtempfile
self.filename: str = str(name)
self.dir: str = ""
self.kind: Optional[FileType] = None
if isinstance(name, str):
self.name = normpath(name)
self.path = Path(self.name)
else:
self.name = str(name)
self.path = name
self._content = content
# if the lintable is part of a role, we save role folder name
self.role = ""
parts = self.path.parent.parts
if 'roles' in parts:
role = self.path
while role.parent.name != "roles" and role.name:
role = role.parent
if role.exists:
self.role = role.name
if str(self.path) in ['/dev/stdin', '-']:
# pylint: disable=consider-using-with
self.file = NamedTemporaryFile(mode="w+", suffix="playbook.yml")
self.filename = self.file.name
self._content = sys.stdin.read()
self.file.write(self._content)
self.file.flush()
self.path = Path(self.file.name)
self.name = 'stdin'
self.kind = 'playbook'
self.dir = '/'
else:
self.kind = kind or kind_from_path(self.path)
# We store absolute directory in dir
if not self.dir:
if self.kind == "role":
self.dir = str(self.path.resolve())
else:
self.dir = str(self.path.parent.resolve())
# determine base file kind (yaml, xml, ini, ...)
self.base_kind = kind_from_path(self.path, base=True)
def __getitem__(self, key: Any) -> Any:
"""Provide compatibility subscriptable support."""
if key == 'path':
return str(self.path)
if key == 'type':
return str(self.kind)
raise NotImplementedError()
def get(self, key: Any, default: Any = None) -> Any:
"""Provide compatibility subscriptable support."""
try:
return self.__getitem__(key)
except NotImplementedError:
return default
@property
def content(self) -> str:
"""Retried file content, from internal cache or disk."""
if self._content is None:
with open(self.path, mode='r', encoding='utf-8') as f:
self._content = f.read()
return self._content
def __hash__(self) -> int:
"""Return a hash value of the lintables."""
return hash((self.name, self.kind))
def __eq__(self, other: object) -> bool:
"""Identify whether the other object represents the same rule match."""
if isinstance(other, Lintable):
return bool(self.name == other.name and self.kind == other.kind)
return False
def __repr__(self) -> str:
"""Return user friendly representation of a lintable."""
return f"{self.name} ({self.kind})"
def discover_lintables(options: Namespace) -> Dict[str, Any]:
"""Find all files that we know how to lint."""
# git is preferred as it also considers .gitignore
git_command = ['git', 'ls-files', '-z']
out = None
try:
out = subprocess.check_output(
git_command, stderr=subprocess.STDOUT, universal_newlines=True
).split("\x00")[:-1]
_logger.info("Discovered files to lint using: %s", ' '.join(git_command))
except subprocess.CalledProcessError as exc:
if not (exc.returncode == 128 and 'fatal: not a git repository' in exc.output):
_logger.warning(
"Failed to discover lintable files using git: %s",
exc.output.rstrip('\n'),
)
except FileNotFoundError as exc:
if options.verbosity:
_logger.warning("Failed to locate command: %s", exc)
if out is None:
exclude_pattern = "|".join(options.exclude_paths)
_logger.info("Looking up for files, excluding %s ...", exclude_pattern)
out = WcMatch('.', exclude_pattern=exclude_pattern, flags=RECURSIVE).match()
return OrderedDict.fromkeys(sorted(out))
def guess_project_dir() -> str:
"""Return detected project dir or user home directory."""
try:
result = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
check=False,
)
except FileNotFoundError:
# if git is absent we use home directory
return str(Path.home())
if result.returncode != 0:
return str(Path.home())
return result.stdout.splitlines()[0]
def expand_dirs_in_lintables(lintables: Set[Lintable]) -> None:
"""Return all recognized lintables within given directory."""
should_expand = False
for item in lintables:
if item.path.is_dir():
should_expand = True
break
if should_expand:
# this relies on git and we do not want to call unless needed
all_files = discover_lintables(options)
for item in copy.copy(lintables):
if item.path.is_dir():
for filename in all_files:
if filename.startswith(str(item.path)):
lintables.add(Lintable(filename))
| src/ansiblelint/file_utils.py | 8,968 | Defines a file/folder that can be linted.
Providing file content when creating the object allow creation of in-memory
instances that do not need files to be present on disk.
Identify whether the other object represents the same rule match.
Provide compatibility subscriptable support.
Return a hash value of the lintables.
Create a Lintable instance.
Return user friendly representation of a lintable.
Retried file content, from internal cache or disk.
Context manager for temporary changing current working directory.
Find all files that we know how to lint.
Return all recognized lintables within given directory.
Expand the environment or ~ variables in a path string.
Expand the environment or ~ variables in a list.
Provide compatibility subscriptable support.
Return detected project dir or user home directory.
Determine the file kind based on its name.
When called with base=True, it will return the base file type instead
of the explicit one. That is expected to return 'yaml' for any yaml files.
Normalize a path in order to provide a more consistent output.
Currently it generates a relative path but in the future we may want to
make this user configurable.
Utility functions related to file operations.
import wcmatch https://github.com/PyCQA/pylint/issues/3979 pylint: disable=unsubscriptable-object conversion to string in order to allow receiving non string objects we avoid returning relative paths that endup at root level It may be possible for function to be called with a Path object pathlib.Path.match patterns are very limited, they do not support *a*.yml glob.glob supports **/foo.yml but not multiple extensions type: ignore Unknown base file type is default Unknown file types report a empty string (evaluated as False) Filename is effective file on disk, for stdin is a namedtempfile if the lintable is part of a role, we save role folder name pylint: disable=consider-using-with We store absolute directory in dir determine base file kind (yaml, xml, ini, ...) git is preferred as it also considers .gitignore if git is absent we use home directory this relies on git and we do not want to call unless needed | 2,141 | en | 0.83681 |
# -*- coding: utf-8 -*-
import gzip
import bz2
import numpy as np
def advanced_open(filepath, *args, **kwargs):
""" Open function interface for files with different extensions.
Parameters
----------
filepath: str
File path with extension.
args: list
Non-key arguments
kwargs: dict
Key arguments
Returns
-------
"""
open_fn = open
if filepath.endswith('.gz'):
open_fn = gzip.open
elif filepath.endswith('.bz2'):
open_fn = bz2.open
return open_fn(filepath, mode="rt", *args, **kwargs)
def load_kg_file(filepath, separator="\t", as_stream=False):
""" Import knowledge graph from file
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
iterator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
kg_triples = []
with advanced_open(filepath) as file_content:
for line in file_content:
kg_triples.append(line.strip().split(separator))
return np.array(kg_triples)
def load_kg_file_as_stream(filepath, separator="\t"):
""" Import knowledge graph from file as a stream
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
generator
The knowledge graph triplets obtained from the files with size [?, 3]
"""
with advanced_open(filepath) as file_content:
for line in file_content:
yield line.strip().split(separator) | benchmarking/libkge/libkge/io/base.py | 1,606 | Open function interface for files with different extensions.
Parameters
----------
filepath: str
File path with extension.
args: list
Non-key arguments
kwargs: dict
Key arguments
Returns
-------
Import knowledge graph from file
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
iterator
The knowledge graph triplets obtained from the files with size [?, 3]
Import knowledge graph from file as a stream
Parameters
----------
filepath: str
File path
separator: str
File column separator
Returns
-------
generator
The knowledge graph triplets obtained from the files with size [?, 3]
-*- coding: utf-8 -*- | 695 | en | 0.56994 |
import os
import requests
import datetime
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse, Http404
from django.template import loader
from django.contrib.auth import login
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from requests import status_codes
from rest_framework.authtoken.models import Token
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated, IsAuthenticatedOrReadOnly
from rest_framework import viewsets, status
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
from constance import config
import constance.settings
from tau.twitch.models import TwitchAPIScope, TwitchEventSubSubscription
from tau.users.models import User
from .forms import ChannelNameForm, FirstRunForm
from .utils import cleanup_remote_webhooks, cleanup_webhooks, log_request, check_access_token_expired, refresh_access_token, teardown_all_acct_webhooks, teardown_webhooks
from tau.twitch.models import TwitchHelixEndpoint
@api_view(['POST'])
def irc_message_view(request):
channel_layer = get_channel_layer()
async_to_sync(channel_layer.group_send)('twitchchat', {
'type': 'twitchchat.event',
'data': request.data
})
return Response({}, status=status.HTTP_201_CREATED)
@api_view(['GET', 'POST', 'PUT', 'PATCH', 'DELETE'])
def helix_view(request, helix_path=None):
if check_access_token_expired():
refresh_access_token()
try:
endpoint_instance = TwitchHelixEndpoint.objects.get(
endpoint=helix_path,
method=request.method
)
if endpoint_instance.token_type == 'OA':
token = config.TWITCH_ACCESS_TOKEN
else:
token = config.TWITCH_APP_ACCESS_TOKEN
except TwitchHelixEndpoint.DoesNotExist:
token = config.TWITCH_ACCESS_TOKEN
body = request.data
client_id = os.environ.get('TWITCH_APP_ID', None)
headers = {
'Authorization': 'Bearer {}'.format(token),
'Client-Id': client_id
}
url = f'https://api.twitch.tv/helix/' \
f'{helix_path}'
uri = request.build_absolute_uri()
url_params = ''
if uri.count('?') > 0:
url_params = uri.split('?', 1)[1]
if url_params != '':
url += f'?{url_params}'
if request.method == 'GET':
data = requests.get(
url,
headers=headers
)
elif request.method == 'POST':
data = requests.post(
url,
data=body,
headers=headers
)
elif request.method == 'PUT':
data = requests.put(
url,
data=body,
headers=headers
)
print(data)
elif request.method == 'PATCH':
data = requests.patch(
url,
data=body,
headers=headers
)
elif request.method == 'DELETE':
data = requests.delete(
url,
headers=headers
)
try:
if(settings.DEBUG_TWITCH_CALLS):
log_request(data)
stream_data = data.json()
except ValueError:
stream_data = None
return Response(stream_data, status=data.status_code)
def home_view(request):
user_count = User.objects.all().exclude(username='worker_process').count()
if user_count == 0:
return HttpResponseRedirect('/first-run/')
# elif not request.user.is_authenticated:
# return HttpResponseRedirect('/accounts/login/')
elif config.CHANNEL == '':
return HttpResponseRedirect('/set-channel/')
elif config.SCOPE_UPDATED_NEEDED:
return HttpResponseRedirect('/refresh-token-scope/')
else:
# # template = loader.get_template('home.html')
# template = loader.get_template('dashboard/index.html')
# return HttpResponse(template.render({'config': config}, request))
return HttpResponseRedirect('/dashboard')
def first_run_view(request):
user_count = User.objects.all().exclude(username='worker_process').count()
if user_count > 0: # If users already exist, it is not first run
return HttpResponseRedirect('/') # reject creating a new super-user
if request.method == 'POST':
form = FirstRunForm(request.POST)
if form.is_valid():
user = User.objects.create_user(
form.cleaned_data['username'],
password=form.cleaned_data['password1']
)
user.is_superuser=True
user.is_staff=True
user.save()
login(request, user)
return HttpResponseRedirect('/')
else:
template = loader.get_template('registration/first-run.html')
return HttpResponse(template.render({}, request))
else:
template = loader.get_template('registration/first-run.html')
return HttpResponse(template.render({}, request))
def get_channel_name_view(request):
if request.method == 'POST':
port = os.environ.get('PORT', 8000)
form = ChannelNameForm(request.POST)
if form.is_valid():
# Process the data
config.CHANNEL = form.cleaned_data['channel_name']
scope=' '.join(settings.TOKEN_SCOPES)
client_id = os.environ.get('TWITCH_APP_ID', None)
url = f'https://id.twitch.tv/oauth2/authorize?' \
f'client_id={client_id}&' \
f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \
f'response_type=code&' \
f'scope={scope}&' \
f'force_verify=true'
return HttpResponseRedirect(url)
else:
# Show some error page
pass
else:
template = loader.get_template('registration/twitch-channel-setup.html')
return HttpResponse(template.render({}, request))
def refresh_token_scope(request):
client_id = os.environ.get('TWITCH_APP_ID', None)
helix_scopes = list(
TwitchAPIScope.objects.filter(
required=True
).values_list('scope', flat=True)
)
eventsub_scopes = list(
TwitchEventSubSubscription.objects.filter(
active=True
).values_list('scope_required', flat=True)
)
scopes = list(set(settings.TOKEN_SCOPES + eventsub_scopes + helix_scopes))
scopes = list(filter(lambda x: (x is not None), scopes))
scope=' '.join(scopes)
url = f'https://id.twitch.tv/oauth2/authorize?' \
f'client_id={client_id}&' \
f'redirect_uri={settings.BASE_URL}/twitch-callback/&' \
f'response_type=code&' \
f'scope={scope}&' \
f'force_verify=true'
return HttpResponseRedirect(url)
@api_view()
def get_tau_token(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
token = Token.objects.get(user=request.user)
return JsonResponse({'token': token.key})
@api_view(['GET'])
def get_public_url(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
public_url = config.PUBLIC_URL
return JsonResponse({'public_url': public_url})
@api_view(['POST'])
def refresh_tau_token(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
else:
token = Token.objects.get(user=request.user)
token.delete()
token = Token.objects.create(user=request.user)
return JsonResponse({'token': token.key})
@api_view(['POST'])
def reset_webhooks(request):
if not request.user.is_authenticated:
return JsonResponse({'error': 'You must be logged into access this endpoint.'})
data = request.data
if data['type'] == 'all':
teardown_all_acct_webhooks()
elif data['type'] == 'remote':
token = Token.objects.get(user=request.user)
cleanup_remote_webhooks()
elif data['type'] == 'broken':
token = Token.objects.get(user=request.user)
cleanup_webhooks()
else:
return JsonResponse({'webhooks_reset': False, 'error': 'Proper type not found.'})
config.FORCE_WEBHOOK_REFRESH = True
return JsonResponse({'webhooks_reset': True})
def process_twitch_callback_view(request):
port = os.environ.get('PORT', 8000)
params = request.GET
auth_code = params['code']
client_id = os.environ.get('TWITCH_APP_ID', None)
client_secret = os.environ.get('TWITCH_CLIENT_SECRET', None)
auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {
'client_id': client_id,
'client_secret': client_secret,
'code': auth_code,
'grant_type': 'authorization_code',
'redirect_uri': f'{settings.BASE_URL}/twitch-callback/'
})
response_data = auth_r.json()
if(settings.DEBUG_TWITCH_CALLS):
log_request(auth_r)
config.TWITCH_ACCESS_TOKEN = response_data['access_token']
config.TWITCH_REFRESH_TOKEN = response_data['refresh_token']
expiration = timezone.now() + datetime.timedelta(seconds=response_data['expires_in'])
config.TWITCH_ACCESS_TOKEN_EXPIRATION = expiration
scope=' '.join(settings.TOKEN_SCOPES)
app_auth_r = requests.post('https://id.twitch.tv/oauth2/token', data = {
'client_id': client_id,
'client_secret': client_secret,
'grant_type': 'client_credentials',
'scope': scope
})
if(settings.DEBUG_TWITCH_CALLS):
log_request(app_auth_r)
app_auth_data = app_auth_r.json()
config.TWITCH_APP_ACCESS_TOKEN = app_auth_data['access_token']
config.SCOPE_UPDATED_NEEDED = False
config.SCOPES_REFRESHED = True
headers = {
'Authorization': 'Bearer {}'.format(config.TWITCH_ACCESS_TOKEN),
'Client-Id': client_id
}
user_r = requests.get('https://api.twitch.tv/helix/users', headers=headers)
if(settings.DEBUG_TWITCH_CALLS):
log_request(user_r)
user_data = user_r.json()
channel_id = user_data['data'][0]['id']
config.CHANNEL_ID = channel_id
return HttpResponseRedirect('/')
class HeartbeatViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticatedOrReadOnly, )
def list(self, request, *args, **kwargs):
response = {'message': 'pong'}
return Response(response)
class TAUSettingsViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticated, )
valid_keys = ['USE_IRC']
def list(self, request, *args, **kwargs):
response = {key.lower(): getattr(config, key) for key in self.valid_keys}
return Response(response)
def retrieve(self, request, pk=None):
if pk.upper() in self.valid_keys:
return Response({pk: getattr(config, pk.upper())})
else:
raise Http404
def update(self, request, pk=None):
if pk.upper() in self.valid_keys:
data = request.data
setattr(config, pk.upper(), data['value'])
return Response({pk: data['value']})
else:
raise Http404
class ServiceStatusViewSet(viewsets.ViewSet):
permission_classes = (IsAuthenticated, )
def update(self, request, pk=None):
if pk.startswith('STATUS_') and hasattr(config, pk):
data = request.data
new_status = data['status']
setattr(config, pk, new_status)
return Response({
pk: new_status
})
elif pk == 'SET_ALL':
status_keys = filter(
lambda x: x.startswith('STATUS_'),
constance.settings.CONFIG.keys()
)
data = request.data
new_status = data['status']
for key in status_keys:
setattr(config, key, new_status)
return Response({
'reset': 'complete'
})
else:
raise Http404("Config does not exist")
| tau/core/views.py | 12,154 | elif not request.user.is_authenticated: return HttpResponseRedirect('/accounts/login/') template = loader.get_template('home.html') template = loader.get_template('dashboard/index.html') return HttpResponse(template.render({'config': config}, request)) If users already exist, it is not first run reject creating a new super-user Process the data Show some error page | 372 | en | 0.341475 |
def find_words(string, word_set):
if string == "" or not word_set:
return None
if string in word_set: # O(1)
return [string]
#"bedbathbeyondunk"
#{'bed', 'bath', 'bedbath', 'and', 'beyond'}
tmp = "" # bedbathbeyondunk
out = [] # []
retro = False # True
i = 0
while i < len(string): # i = 15
if not retro:
tmp += string[i]
if tmp in word_set:
out.append(tmp)
tmp = ""
if i == len(string)-1 and tmp != "":
if not out:
return None
tmp = out.pop() + tmp
retro = True
i -= 1
i += 1
return out
assert find_words(
"bedbathandbeyond",
set(['bed', 'bath', 'bedbath', 'and', 'beyond'])
) == ['bed', 'bath', 'and', 'beyond']
assert find_words(
"thequickbrownfox",
set(['quick', 'brown', 'the', 'fox'])
) == ['the', 'quick', 'brown', 'fox']
assert find_words(
"thequickbrownfoxa",
set(['quick', 'brown', 'the', 'fox'])
) == None
| reconstruct-words.py | 1,041 | O(1)"bedbathbeyondunk"{'bed', 'bath', 'bedbath', 'and', 'beyond'} bedbathbeyondunk [] True i = 15 | 97 | de | 0.052457 |
import json
import yaml
from pathlib import Path
from brownie import *
from substrateinterface import Keypair
from hashlib import blake2b
import base58
def get_derivative_account(root_account, index):
seed_bytes = b'modlpy/utilisuba'
root_account_bytes = bytes.fromhex(Keypair(root_account).public_key[2:])
index_bytes = int(index).to_bytes(2, 'little')
entropy = blake2b(seed_bytes + root_account_bytes + index_bytes, digest_size=32).digest()
input_bytes = bytes([42]) + entropy
checksum = blake2b(b'SS58PRE' + input_bytes).digest()
return base58.b58encode(input_bytes + checksum[:2]).decode()
class Contracts:
user = None
proxy_admin = None
lido = None
vksm = None
oracle_master = None
wstksm = None
auth_manager = None
controller = None
ledgers = None
validators = None
def __init__(self, _user, _proxy_admin, _lido, _vksm, _oracle_master, _wstksm, _auth_manager, _controller, _ledgers, _validators):
self.user = _user
self.proxy_admin = _proxy_admin
self.lido = _lido
self.vksm = _vksm
self.oracle_master = _oracle_master
self.wstksm = _wstksm
self.auth_manager = _auth_manager
self.controller = _controller
self.ledgers = _ledgers
self.validators = _validators
NETWORK="kusama"
def load_deployments(network):
path = './deployments/' + network + '.json'
if Path(path).is_file():
with open(path) as file:
return json.load(file)
else:
return {}
def load_deployment_config(network):
with open('./deployment-config.yml') as file:
return yaml.safe_load(file)['networks'][network]
CONFIG = load_deployment_config(NETWORK)
DEPLOYMENTS = load_deployments(NETWORK)
def gen_ledger_account(index):
sovereign = CONFIG['sovereign_account']
root_index = CONFIG['root_derivative_index']
controller = get_derivative_account(sovereign, root_index)
return get_derivative_account(controller, index)
#contracts = run('./scripts/prepare_env.py') from brownie console --network=moonbase
def main():
user = accounts.load(CONFIG['deployer'])
proxy_admin = ProxyAdminMock.at(DEPLOYMENTS['ProxyAdmin'])
lido = Lido.at(DEPLOYMENTS['Lido'])
vksm = vKSM_mock.at(CONFIG['precompiles']['vksm'])
oracle_master = OracleMaster.at(DEPLOYMENTS['OracleMaster'])
wstksm = WstKSM.at(DEPLOYMENTS['WstKSM'])
auth_manager = AuthManager.at(DEPLOYMENTS['AuthManager'])
controller = Controller.at(DEPLOYMENTS['Controller'])
ledgers = [ Ledger.at(addr) for addr in lido.getLedgerAddresses() ]
# current validators in moonbase
validator_1 = Keypair("5CX2ov8tmW6nZwy6Eouzc7VxFHcAyZioNm5QjEUYc7zjbS66").public_key
validator_2 = Keypair("5FRiNmoi9HFGFrY3K9xsSCeewRtA2pcXTZVZrwLacPCfvHum").public_key
validator_3 = Keypair("5EcdgHV81hu6YpPucSMrWbdQRBUr18XypiiGsgQ7HREYdrWG").public_key
validator_4 = Keypair("5FCEmzonc34D2SXXv2CMsDoFWCVivH2a2Mwe32t9BT1TcpAD").public_key
validator_5 = Keypair("5Ehgvgk1LERD5aTEWw6HLdKZurBqcRYbHXvrAtTgYPhUpr1R").public_key
validators = [validator_1, validator_2, validator_3, validator_4, validator_5]
# 5CxXVE7pHqzR4kzfz6nop529odm8eVemFFtStruyNQvdTopo
# 5GxgDNMhbvMhuJzXC2voX5nKUyNaNQFCZxgnoa18eGiBBZwt
# 5Cqb9WXVQQF73a1dcJEBFS2bWrukaC6dmzjeWZeJHj3NMwvB
return Contracts(user, proxy_admin, lido, vksm, oracle_master, wstksm, auth_manager, controller, ledgers, validators)
| scripts/prepare_env.py | 3,484 | contracts = run('./scripts/prepare_env.py') from brownie console --network=moonbase current validators in moonbase 5CxXVE7pHqzR4kzfz6nop529odm8eVemFFtStruyNQvdTopo 5GxgDNMhbvMhuJzXC2voX5nKUyNaNQFCZxgnoa18eGiBBZwt 5Cqb9WXVQQF73a1dcJEBFS2bWrukaC6dmzjeWZeJHj3NMwvB | 261 | en | 0.235365 |
import datetime
from dateutil.parser import parse
from decimal import Decimal
import re
import importlib
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.utils import datetime_safe
from tastypie.bundle import Bundle
from tastypie.exceptions import ApiFieldError, NotFound
from tastypie.utils import dict_strip_unicode_keys, make_aware
class NOT_PROVIDED:
def __str__(self):
return 'No default provided.'
DATE_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2}).*?$')
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the ApiField variants.
class ApiField(object):
"""The base implementation of a field used by the resources."""
dehydrated_type = 'string'
help_text = ''
def __init__(self, attribute=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, unique=False, help_text=None):
"""
Sets up the field. This is generally called when the containing
``Resource`` is initialized.
Optionally accepts an ``attribute``, which should be a string of
either an instance attribute or callable off the object during the
``dehydrate`` or push data onto an object during the ``hydrate``.
Defaults to ``None``, meaning data will be manually accessed.
Optionally accepts a ``default``, which provides default data when the
object being ``dehydrated``/``hydrated`` has no data on the field.
Defaults to ``NOT_PROVIDED``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
# Track what the index thinks this field is called.
self.instance_name = None
self._resource = None
self.attribute = attribute
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.value = None
self.unique = unique
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
# Do the least we can here so that we don't hate ourselves in the
# morning.
self.instance_name = name
self._resource = cls
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def dehydrate(self, bundle):
"""
Takes data from the provided object and prepares it for the
resource.
"""
if self.attribute is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.attribute.split('__')
current_object = bundle.obj
for attr in attrs:
previous_object = current_object
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail miserably.
break
else:
raise ApiFieldError("The object '%r' has an empty attribute '%s' and doesn't allow a default or null value." % (previous_object, attr))
if callable(current_object):
current_object = current_object()
return self.convert(current_object)
if self.has_default():
return self.convert(self.default)
else:
return None
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
def hydrate(self, bundle):
"""
Takes data stored in the bundle for the field and returns it. Used for
taking simple data and building a instance object.
"""
if self.readonly:
return None
if not bundle.data.has_key(self.instance_name):
is_related = getattr(self, 'is_related', False)
is_m2m = getattr(self, 'is_m2m', False)
if is_related and not is_m2m:
# We've got an FK (or alike field) & a possible parent object.
# Check for it.
if bundle.related_obj and bundle.related_name in (self.attribute, self.instance_name):
return bundle.related_obj
# Functor for safely checking if bundle.obj has a non-None property
def has_non_null_attr(obj, name):
try:
return getattr(obj, name, None) is not None
except:
if is_related:
return None
else:
raise
if self.blank:
return None
elif self.attribute and has_non_null_attr(bundle.obj, self.attribute):
return getattr(bundle.obj, self.attribute)
elif self.instance_name and has_non_null_attr(bundle.obj, self.instance_name):
return getattr(bundle.obj, self.instance_name)
elif self.has_default():
if callable(self._default):
return self._default()
return self._default
elif self.null:
return None
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a default or null value." % self.instance_name)
bundle_val = bundle.data[self.instance_name]
if bundle_val is None and not self.null:
raise ApiFieldError("The '%s' field doesn't allow a null value." % self.instance_name)
else:
return bundle_val
def set_value_on_bundle_obj(self, bundle, value):
"""
Overrideable hook for writing a value into the object on a bundle. Enables the use of
custom setters in your app code if setattr() is too raw for your fancy ORM model.
"""
try:
setattr(bundle.obj, self.attribute, value)
except Exception, e:
raise ApiFieldError("The '%s' field couldn't set value '%s': %s" %
(self.instance_name, value, e))
class CharField(ApiField):
"""
A text field of arbitrary length.
Covers both ``models.CharField`` and ``models.TextField``.
"""
dehydrated_type = 'string'
help_text = 'Unicode string data. Ex: "Hello World"'
def convert(self, value):
if value is None:
return None
return unicode(value)
class FileField(ApiField):
"""
A file-related field.
Covers both ``models.FileField`` and ``models.ImageField``.
"""
dehydrated_type = 'string'
help_text = 'A file URL as a string. Ex: "http://media.example.com/media/photos/my_photo.jpg"'
def convert(self, value):
if value is None:
return None
try:
# Try to return the URL if it's a ``File``, falling back to the string
# itself if it's been overridden or is a default.
return getattr(value, 'url', value)
except ValueError:
return None
class IntegerField(ApiField):
"""
An integer field.
Covers ``models.IntegerField``, ``models.PositiveIntegerField``,
``models.PositiveSmallIntegerField`` and ``models.SmallIntegerField``.
"""
dehydrated_type = 'integer'
help_text = 'Integer data. Ex: 2673'
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(ApiField):
"""
A floating point field.
"""
dehydrated_type = 'float'
help_text = 'Floating point numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(ApiField):
"""
A decimal field.
"""
dehydrated_type = 'decimal'
help_text = 'Fixed precision numeric data. Ex: 26.73'
def convert(self, value):
if value is None:
return None
return Decimal(value)
def hydrate(self, bundle):
value = super(DecimalField, self).hydrate(bundle)
if value and not isinstance(value, Decimal):
value = Decimal(value)
return value
class BooleanField(ApiField):
"""
A boolean field.
Covers both ``models.BooleanField`` and ``models.NullBooleanField``.
"""
dehydrated_type = 'boolean'
help_text = 'Boolean data. Ex: True'
def convert(self, value):
if value is None:
return None
return bool(value)
class ListField(ApiField):
"""
A list field.
"""
dehydrated_type = 'list'
help_text = "A list of data. Ex: ['abc', 26.73, 8]"
def convert(self, value):
if value is None:
return None
return list(value)
class DictField(ApiField):
"""
A dictionary field.
"""
dehydrated_type = 'dict'
help_text = "A dictionary of data. Ex: {'price': 26.73, 'name': 'Daniel'}"
def convert(self, value):
if value is None:
return None
return dict(value)
class DateField(ApiField):
"""
A date field.
"""
dehydrated_type = 'date'
help_text = 'A date as a string. Ex: "2010-11-10"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATE_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise ApiFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
if hasattr(value, 'hour'):
value = value.date()
except ValueError:
pass
return value
class DateTimeField(ApiField):
"""
A datetime field.
"""
dehydrated_type = 'datetime'
help_text = 'A date & time as a string. Ex: "2010-11-10T03:07:43"'
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return make_aware(datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second'])))
else:
raise ApiFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
def hydrate(self, bundle):
value = super(DateTimeField, self).hydrate(bundle)
if value and not hasattr(value, 'year'):
try:
# Try to rip a date/datetime out of it.
value = make_aware(parse(value))
except ValueError:
pass
return value
class RelatedField(ApiField):
"""
Provides access to data that is related within the database.
The ``RelatedField`` base class is not intended for direct use but provides
functionality that ``ToOneField`` and ``ToManyField`` build upon.
The contents of this field actually point to another ``Resource``,
rather than the related object. This allows the field to represent its data
in different ways.
The abstractions based around this are "leaky" in that, unlike the other
fields provided by ``tastypie``, these fields don't handle arbitrary objects
very well. The subclasses use Django's ORM layer to make things go, though
there is no ORM-specific code at this level.
"""
dehydrated_type = 'related'
is_related = True
self_referential = False
help_text = 'A related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED, null=False, blank=False, readonly=False, full=False, unique=False, help_text=None):
"""
Builds the field and prepares it to access to related data.
The ``to`` argument should point to a ``Resource`` class, NOT
to a ``Model``. Required.
The ``attribute`` argument should specify what field/callable points to
the related data on the instance object. Required.
Optionally accepts a ``related_name`` argument. Currently unused, as
unlike Django's ORM layer, reverse relations between ``Resource``
classes are not automatically created. Defaults to ``None``.
Optionally accepts a ``null``, which indicated whether or not a
``None`` is allowable data on the field. Defaults to ``False``.
Optionally accepts a ``blank``, which indicated whether or not
data may be omitted on the field. Defaults to ``False``.
Optionally accepts a ``readonly``, which indicates whether the field
is used during the ``hydrate`` or not. Defaults to ``False``.
Optionally accepts a ``full``, which indicates how the related
``Resource`` will appear post-``dehydrate``. If ``False``, the
related ``Resource`` will appear as a URL to the endpoint of that
resource. If ``True``, the result of the sub-resource's
``dehydrate`` will be included in full.
Optionally accepts a ``unique``, which indicates if the field is a
unique identifier for the object.
Optionally accepts ``help_text``, which lets you provide a
human-readable description of the field exposed at the schema level.
Defaults to the per-Field definition.
"""
self.instance_name = None
self._resource = None
self.to = to
self.attribute = attribute
self.related_name = related_name
self._default = default
self.null = null
self.blank = blank
self.readonly = readonly
self.full = full
self.api_name = None
self.resource_name = None
self.unique = unique
self._to_class = None
if self.to == 'self':
self.self_referential = True
self._to_class = self.__class__
if help_text:
self.help_text = help_text
def contribute_to_class(self, cls, name):
super(RelatedField, self).contribute_to_class(cls, name)
# Check if we're self-referential and hook it up.
# We can't do this quite like Django because there's no ``AppCache``
# here (which I think we should avoid as long as possible).
if self.self_referential or self.to == 'self':
self._to_class = cls
def get_related_resource(self, related_instance=None):
"""
Instantiates the related resource.
"""
instance = self.to_class(api_name=self.api_name)
instance.api_name = self.api_name
return instance
@property
def to_class(self):
# We need to be lazy here, because when the metaclass constructs the
# Resources, other classes may not exist yet.
# That said, memoize this so we never have to relookup/reimport.
if self._to_class:
return self._to_class
if not isinstance(self.to, basestring):
self._to_class = self.to
return self._to_class
# It's a string. Let's figure it out.
if '.' in self.to:
# Try to import.
module_bits = self.to.split('.')
module_path, class_name = '.'.join(module_bits[:-1]), module_bits[-1]
module = importlib.import_module(module_path)
else:
# We've got a bare class name here, which won't work (No AppCache
# to rely on). Try to throw a useful error.
raise ImportError("Tastypie requires a Python-style path (<module.module.Class>) to lazy load related resources. Only given '%s'." % self.to)
self._to_class = getattr(module, class_name, None)
if self._to_class is None:
raise ImportError("Module '%s' does not appear to have a class called '%s'." % (module_path, class_name))
return self._to_class
def dehydrate_related(self, bundle, related_resource, related_instance):
"""
Based on the ``full_resource``, returns either the endpoint or the data
from ``full_dehydrate`` for the related resource.
"""
if not self.full:
# Be a good netizen.
return related_resource.get_resource_uri(bundle)
else:
# ZOMG extra data and big payloads.
bundle = related_resource.build_bundle(obj=related_instance, request=bundle.request)
return related_resource.full_dehydrate(bundle)
def resource_from_uri(self, fk_resource, uri, request=None, related_obj=None, related_name=None):
"""
Given a URI is provided, the related resource is attempted to be
loaded based on the identifiers in the URI.
"""
try:
obj = fk_resource.get_via_uri(uri, request=request)
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
except ObjectDoesNotExist:
raise ApiFieldError("Could not find the provided object via resource URI '%s'." % uri)
def resource_from_data(self, fk_resource, data, request=None, related_obj=None, related_name=None):
"""
Given a dictionary-like structure is provided, a fresh related
resource is created using that data.
"""
# Try to hydrate the data provided.
data = dict_strip_unicode_keys(data)
fk_bundle = fk_resource.build_bundle(data=data, request=request)
if related_obj:
fk_bundle.related_obj = related_obj
fk_bundle.related_name = related_name
# We need to check to see if updates are allowed on the FK
# resource. If not, we'll just return a populated bundle instead
# of mistakenly updating something that should be read-only.
if not fk_resource.can_update():
# If the resource already exists and the client specified where to find it, we look it up.
if 'resource_uri' in data:
obj = fk_resource.get_via_uri(data['resource_uri'], request=request)
fk_bundle.install_existing_obj( obj )
return fk_bundle
# If the resource supports creation, then we can full_hydrate() and create a new instance.
elif fk_resource.can_create():
return fk_resource.full_hydrate(fk_bundle)
else:
raise ApiFieldError("Resource %s does not support being created via POST" %
fk_resource._meta.resource_name)
try:
return fk_resource.obj_update(fk_bundle, **data)
except NotFound:
try:
# Attempt lookup by primary key
lookup_kwargs = dict((k, v) for k, v in data.iteritems() if getattr(fk_resource, k).unique)
if not lookup_kwargs:
raise NotFound()
return fk_resource.obj_update(fk_bundle, **lookup_kwargs)
except NotFound:
fk_bundle = fk_resource.full_hydrate(fk_bundle)
fk_resource.is_valid(fk_bundle, request)
return fk_bundle
except MultipleObjectsReturned:
return fk_resource.full_hydrate(fk_bundle)
def resource_from_pk(self, fk_resource, obj, request=None, related_obj=None, related_name=None):
"""
Given an object with a ``pk`` attribute, the related resource
is attempted to be loaded via that PK.
"""
bundle = fk_resource.build_bundle(obj=obj, request=request)
return fk_resource.full_dehydrate(bundle)
def build_related_resource(self, value, request=None, related_obj=None, related_name=None):
"""
Returns a bundle of data built by the related resource, usually via
``hydrate`` with the data provided.
Accepts either a URI, a data dictionary (or dictionary-like structure)
or an object with a ``pk``.
"""
self.fk_resource = self.to_class(api_name=self.api_name)
kwargs = {
'request': request,
'related_obj': related_obj,
'related_name': related_name,
}
if isinstance(value, basestring):
# We got a URI. Load the object and assign it.
return self.resource_from_uri(self.fk_resource, value, **kwargs)
elif isinstance(value, Bundle):
# We got a valid bundle object, the RelatedField had full=True
return value
elif isinstance(value, dict):
# We've got a data dictionary.
# Since this leads to creation, this is the only one of these
# methods that might care about "parent" data.
return self.resource_from_data(self.fk_resource, value, **kwargs)
elif hasattr(value, 'pk'):
# We've got an object with a primary key.
return self.resource_from_pk(self.fk_resource, value, **kwargs)
else:
raise ApiFieldError("The '%s' field was given data that was not a URI, not a dictionary-alike and does not have a 'pk' attribute: %s." % (self.instance_name, value))
class ToOneField(RelatedField):
"""
Provides access to related data via foreign key.
This subclass requires Django's ORM layer to work properly.
"""
help_text = 'A single related resource. Can be either a URI or set of nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToOneField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.fk_resource = None
def dehydrate(self, bundle):
foreign_obj = None
if isinstance(self.attribute, basestring):
attrs = self.attribute.split('__')
foreign_obj = bundle.obj
for attr in attrs:
previous_obj = foreign_obj
try:
foreign_obj = getattr(foreign_obj, attr, None)
except ObjectDoesNotExist:
foreign_obj = None
elif callable(self.attribute):
foreign_obj = self.attribute(bundle)
if not foreign_obj:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return None
self.fk_resource = self.get_related_resource(foreign_obj)
fk_bundle = Bundle(obj=foreign_obj, request=bundle.request)
return self.dehydrate_related(fk_bundle, self.fk_resource, foreign_obj)
def hydrate(self, bundle):
value = super(ToOneField, self).hydrate(bundle)
if value is None:
return value
return self.build_related_resource(value, request=bundle.request)
class ForeignKey(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToOneField(ToOneField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class ToManyField(RelatedField):
"""
Provides access to related data via a join table.
This subclass requires Django's ORM layer to work properly.
Note that the ``hydrate`` portions of this field are quite different than
any other field. ``hydrate_m2m`` actually handles the data and relations.
This is due to the way Django implements M2M relationships.
"""
is_m2m = True
help_text = 'Many related resources. Can be either a list of URIs or list of individually nested resource data.'
def __init__(self, to, attribute, related_name=None, default=NOT_PROVIDED,
null=False, blank=False, readonly=False, full=False,
unique=False, help_text=None):
super(ToManyField, self).__init__(
to, attribute, related_name=related_name, default=default,
null=null, blank=blank, readonly=readonly, full=full,
unique=unique, help_text=help_text
)
self.m2m_bundles = []
def dehydrate(self, bundle):
if not bundle.obj or not bundle.obj.pk:
if not self.null:
raise ApiFieldError("The model '%r' does not have a primary key and can not be used in a ToMany context." % bundle.obj)
return []
the_m2ms = None
previous_obj = bundle.obj
attr = self.attribute
if isinstance(self.attribute, basestring):
attrs = self.attribute.split('__')
the_m2ms = bundle.obj
for attr in attrs:
previous_obj = the_m2ms
try:
the_m2ms = getattr(the_m2ms, attr, None)
except ObjectDoesNotExist:
the_m2ms = None
if not the_m2ms:
break
elif callable(self.attribute):
the_m2ms = self.attribute(bundle)
if not the_m2ms:
if not self.null:
raise ApiFieldError("The model '%r' has an empty attribute '%s' and doesn't allow a null value." % (previous_obj, attr))
return []
self.m2m_resources = []
m2m_dehydrated = []
# TODO: Also model-specific and leaky. Relies on there being a
# ``Manager`` there.
for m2m in the_m2ms.all():
m2m_resource = self.get_related_resource(m2m)
m2m_bundle = Bundle(obj=m2m, request=bundle.request)
self.m2m_resources.append(m2m_resource)
m2m_dehydrated.append(self.dehydrate_related(m2m_bundle, m2m_resource, m2m))
return m2m_dehydrated
def hydrate(self, bundle):
pass
def hydrate_m2m(self, bundle):
if self.readonly:
return None
if bundle.data.get(self.instance_name) is None:
if self.blank:
return []
elif self.null:
return []
else:
raise ApiFieldError("The '%s' field has no data and doesn't allow a null value." % self.instance_name)
m2m_hydrated = []
for value in bundle.data.get(self.instance_name):
if value is None:
continue
kwargs = {
'request': bundle.request,
}
if self.related_name:
kwargs['related_obj'] = bundle.obj
kwargs['related_name'] = self.related_name
m2m_hydrated.append(self.build_related_resource(value, **kwargs))
return m2m_hydrated
class ManyToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class OneToManyField(ToManyField):
"""
A convenience subclass for those who prefer to mirror ``django.db.models``.
"""
pass
class TimeField(ApiField):
dehydrated_type = 'time'
help_text = 'A time as string. Ex: "20:05:23"'
def dehydrate(self, obj):
return self.convert(super(TimeField, self).dehydrate(obj))
def convert(self, value):
if isinstance(value, basestring):
return self.to_time(value)
return value
def to_time(self, s):
try:
dt = parse(s)
except ValueError, e:
raise ApiFieldError(str(e))
else:
return datetime.time(dt.hour, dt.minute, dt.second)
def hydrate(self, bundle):
value = super(TimeField, self).hydrate(bundle)
if value and not isinstance(value, datetime.time):
value = self.to_time(value)
return value
| tastypie/fields.py | 29,911 | All the ApiField variants. Track what the index thinks this field is called. Do the least we can here so that we don't hate ourselves in the morning. Check for `__` in the field for looking through the relation. Fall out of the loop, given any further attempts at accesses will fail miserably. Fall out of the loop, given any further attempts at accesses will fail miserably. We've got an FK (or alike field) & a possible parent object. Check for it. Functor for safely checking if bundle.obj has a non-None property Try to return the URL if it's a ``File``, falling back to the string itself if it's been overridden or is a default. Try to rip a date/datetime out of it. Try to rip a date/datetime out of it. Check if we're self-referential and hook it up. We can't do this quite like Django because there's no ``AppCache`` here (which I think we should avoid as long as possible). We need to be lazy here, because when the metaclass constructs the Resources, other classes may not exist yet. That said, memoize this so we never have to relookup/reimport. It's a string. Let's figure it out. Try to import. We've got a bare class name here, which won't work (No AppCache to rely on). Try to throw a useful error. Be a good netizen. ZOMG extra data and big payloads. Try to hydrate the data provided. We need to check to see if updates are allowed on the FK resource. If not, we'll just return a populated bundle instead of mistakenly updating something that should be read-only. If the resource already exists and the client specified where to find it, we look it up. If the resource supports creation, then we can full_hydrate() and create a new instance. Attempt lookup by primary key We got a URI. Load the object and assign it. We got a valid bundle object, the RelatedField had full=True We've got a data dictionary. Since this leads to creation, this is the only one of these methods that might care about "parent" data. We've got an object with a primary key. TODO: Also model-specific and leaky. Relies on there being a ``Manager`` there. | 2,053 | en | 0.933934 |
# coding: utf-8
"""
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class CouponFreeItemAndShippingWithSubtotal(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'currency_code': 'str',
'items': 'list[str]',
'limit': 'int',
'shipping_methods': 'list[str]',
'subtotal_amount': 'float'
}
attribute_map = {
'currency_code': 'currency_code',
'items': 'items',
'limit': 'limit',
'shipping_methods': 'shipping_methods',
'subtotal_amount': 'subtotal_amount'
}
def __init__(self, currency_code=None, items=None, limit=None, shipping_methods=None, subtotal_amount=None):
"""
CouponFreeItemAndShippingWithSubtotal - a model defined in Swagger
"""
self._currency_code = None
self._items = None
self._limit = None
self._shipping_methods = None
self._subtotal_amount = None
self.discriminator = None
if currency_code is not None:
self.currency_code = currency_code
if items is not None:
self.items = items
if limit is not None:
self.limit = limit
if shipping_methods is not None:
self.shipping_methods = shipping_methods
if subtotal_amount is not None:
self.subtotal_amount = subtotal_amount
@property
def currency_code(self):
"""
Gets the currency_code of this CouponFreeItemAndShippingWithSubtotal.
The ISO-4217 three letter currency code the customer is viewing prices in
:return: The currency_code of this CouponFreeItemAndShippingWithSubtotal.
:rtype: str
"""
return self._currency_code
@currency_code.setter
def currency_code(self, currency_code):
"""
Sets the currency_code of this CouponFreeItemAndShippingWithSubtotal.
The ISO-4217 three letter currency code the customer is viewing prices in
:param currency_code: The currency_code of this CouponFreeItemAndShippingWithSubtotal.
:type: str
"""
if currency_code is not None and len(currency_code) > 3:
raise ValueError("Invalid value for `currency_code`, length must be less than or equal to `3`")
self._currency_code = currency_code
@property
def items(self):
"""
Gets the items of this CouponFreeItemAndShippingWithSubtotal.
A list of items that are eligible for this discount_price.
:return: The items of this CouponFreeItemAndShippingWithSubtotal.
:rtype: list[str]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this CouponFreeItemAndShippingWithSubtotal.
A list of items that are eligible for this discount_price.
:param items: The items of this CouponFreeItemAndShippingWithSubtotal.
:type: list[str]
"""
self._items = items
@property
def limit(self):
"""
Gets the limit of this CouponFreeItemAndShippingWithSubtotal.
The limit of free items that may be received when purchasing multiple items
:return: The limit of this CouponFreeItemAndShippingWithSubtotal.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""
Sets the limit of this CouponFreeItemAndShippingWithSubtotal.
The limit of free items that may be received when purchasing multiple items
:param limit: The limit of this CouponFreeItemAndShippingWithSubtotal.
:type: int
"""
self._limit = limit
@property
def shipping_methods(self):
"""
Gets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
One or more shipping methods that may be free
:return: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
:rtype: list[str]
"""
return self._shipping_methods
@shipping_methods.setter
def shipping_methods(self, shipping_methods):
"""
Sets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
One or more shipping methods that may be free
:param shipping_methods: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
:type: list[str]
"""
self._shipping_methods = shipping_methods
@property
def subtotal_amount(self):
"""
Gets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
The amount of subtotal required to receive the discount percent
:return: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
:rtype: float
"""
return self._subtotal_amount
@subtotal_amount.setter
def subtotal_amount(self, subtotal_amount):
"""
Sets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
The amount of subtotal required to receive the discount percent
:param subtotal_amount: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
:type: float
"""
self._subtotal_amount = subtotal_amount
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, CouponFreeItemAndShippingWithSubtotal):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| ultracart/models/coupon_free_item_and_shipping_with_subtotal.py | 7,294 | NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Returns true if both objects are equal
CouponFreeItemAndShippingWithSubtotal - a model defined in Swagger
Returns true if both objects are not equal
For `print` and `pprint`
Gets the currency_code of this CouponFreeItemAndShippingWithSubtotal.
The ISO-4217 three letter currency code the customer is viewing prices in
:return: The currency_code of this CouponFreeItemAndShippingWithSubtotal.
:rtype: str
Sets the currency_code of this CouponFreeItemAndShippingWithSubtotal.
The ISO-4217 three letter currency code the customer is viewing prices in
:param currency_code: The currency_code of this CouponFreeItemAndShippingWithSubtotal.
:type: str
Gets the items of this CouponFreeItemAndShippingWithSubtotal.
A list of items that are eligible for this discount_price.
:return: The items of this CouponFreeItemAndShippingWithSubtotal.
:rtype: list[str]
Sets the items of this CouponFreeItemAndShippingWithSubtotal.
A list of items that are eligible for this discount_price.
:param items: The items of this CouponFreeItemAndShippingWithSubtotal.
:type: list[str]
Gets the limit of this CouponFreeItemAndShippingWithSubtotal.
The limit of free items that may be received when purchasing multiple items
:return: The limit of this CouponFreeItemAndShippingWithSubtotal.
:rtype: int
Sets the limit of this CouponFreeItemAndShippingWithSubtotal.
The limit of free items that may be received when purchasing multiple items
:param limit: The limit of this CouponFreeItemAndShippingWithSubtotal.
:type: int
Gets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
One or more shipping methods that may be free
:return: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
:rtype: list[str]
Sets the shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
One or more shipping methods that may be free
:param shipping_methods: The shipping_methods of this CouponFreeItemAndShippingWithSubtotal.
:type: list[str]
Gets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
The amount of subtotal required to receive the discount percent
:return: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
:rtype: float
Sets the subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
The amount of subtotal required to receive the discount percent
:param subtotal_amount: The subtotal_amount of this CouponFreeItemAndShippingWithSubtotal.
:type: float
Returns the model properties as a dict
Returns the string representation of the model
UltraCart Rest API V2
UltraCart REST API Version 2
OpenAPI spec version: 2.0.0
Contact: support@ultracart.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 | 2,799 | en | 0.68995 |
import RPi.GPIO as GPIO
import time,sys, datetime, json, requests
from requests.exceptions import ConnectionError, Timeout, TooManyRedirects
'''
Configure raspberry
'''
GPIO.setmode(GPIO.BCM)
inpt = 13
GPIO.setup(inpt,GPIO.IN)
'''
Configure some global variables
'''
current_input = GPIO.input(inpt) # This is used to compare to the new_input later.
total_rotations = 0 # This is a counter. It gets reset after the number of seconds in rotation_downtime.
cup_movements = 200 # This is how many rotations occur as a cup of liquid passes through.
rotation_downtime = 5 # Sets the cut-off time for establishing a water-flow event.
last_movement_time = time.time() + rotation_downtime # This is used to determine if a new water-flow event should be created.
record_data = False # A flag used to trigger database insert.
data = []
print('Control C to exit')
def commit_data(data):
'''
This passes data to the data base as a single row. It then resets/empties data.
'''
url = 'http://localhost:1880/sensor'
headers = {
'Accepts': 'application/json'
}
print(f"1: {data[0]}")
send_jsn = json.dumps({"Movements": data[0][1], "Cups": data[0][2], "Gallons": data[0][3], "Liters": data[0][4]})
try:
response = requests.post(url, data=send_jsn, headers=headers)
print(response.text)
except (ConnectionError, Timeout, TooManyRedirects) as e:
print(e)
data = []
return data
def prep_and_send(data,total_rotations):
'''
Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list.
It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send
the list of data-tuples the next time there is a water-flow event.
Once the connection is successful data is emptied in commit_data().
'''
total_cups = total_rotations/cup_movements
total_gallons = total_cups/16
total_liters = total_gallons*3.78541
now = datetime.datetime.now()
print('{}: Movements: {}. \nCups: {}. \nGallons: {}. \nLiters: {}'.format(now,total_rotations,total_cups,total_gallons,total_liters))
current_data = (
now,
round(total_rotations,2),
round(total_cups,2),
round(total_gallons,2),
round(total_liters,2),
)
data.append(current_data)
print(f"datos: {data}")
data = commit_data(data)
return data
while True:
'''
This is what actually runs the whole time.
It first checks to see if new_input is different from current_input. This would be the case if there was a rotation.
Once it detects that the input is different it knows water is flowing.
It starts tracking the total_rotations and when the last rotation occured.
After each rotation it refreshes the value of the last rotation time.
It waits a few seconds (rotation_downtime) after the last rotation time to make sure the water has stopped.
Once the water stops it passes the total_rotations to prep_and_send().
It also passes 'data' which is any previous water-flow events that were not successfully sent at the time they were recorded.
'''
new_input = GPIO.input(inpt)
if new_input != current_input:
total_rotations += 1
if time.time() <= last_movement_time: #if it hasn't been more than 10 seconds
record_data = True
current_input = new_input
last_movement_time = time.time() + rotation_downtime
else: #flow starts
last_movement_time = time.time() + rotation_downtime
elif record_data == True and time.time() > last_movement_time: #if it's been x seconds since last change
data = prep_and_send(data,total_rotations)
record_data = False
total_rotations = 0
last_movement_time = time.time() + rotation_downtime
current_input = new_input
try:
None
#print('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations)
except KeyboardInterrupt:
print('\nCTRL C - Exiting nicely')
GPIO.cleanup()
sys.exit()
| software/read-sensor-python/waterFlow/waterFlowMeter.py | 4,411 | This passes data to the data base as a single row. It then resets/empties data.
Calculates measurements (cups and gallons). Prepares the data into a database-friendly tuple. Appends that tuple to a list.
It then tries to connect to database. If it is not successful then it does nothing but saves the data; it will try to send
the list of data-tuples the next time there is a water-flow event.
Once the connection is successful data is emptied in commit_data().
This is used to compare to the new_input later. This is a counter. It gets reset after the number of seconds in rotation_downtime. This is how many rotations occur as a cup of liquid passes through. Sets the cut-off time for establishing a water-flow event. This is used to determine if a new water-flow event should be created. A flag used to trigger database insert.if it hasn't been more than 10 secondsflow startsif it's been x seconds since last changeprint('New input: ',new_input, '. Current input: ', current_input, '. Movements: ', total_rotations) | 1,026 | en | 0.912425 |
import re
import traceback
import subprocess
from serviceDB import ServiceDB
class NeadmServiceWrapper:
_service_list_cmd = ['/opt/nedge/neadm/neadm', 'service', 'list']
# _status_cmd = ['/opt/nedge/neadm/fake-neadm-status.sh']
_service_list_header = re.compile("^.*TYPE.*NAME.*SERVERID.*STATUS.*$")
# unit_id key well be added during parsing of each line
_service_list_names = ['type', 'name', 'sid', 'status']
def __init__(self, db):
self.exit_code = 0
self.db = ServiceDB(db)
def get_exit_code(self):
return self.exit_code
def get_raw_output(self, command):
try:
output = subprocess.check_output(command, stderr=subprocess.STDOUT)
self.exit_code = 0
return output
except subprocess.CalledProcessError as ex:
self.exit_code = ex.returncode
return ex.output
except Exception as e:
self.exit_code = 1
return "Failed to start {0} command.' \
Exeption {1}".format(command, e.output)
def get_all_services(self):
output = self.get_raw_output(NeadmServiceWrapper._service_list_cmd)
# print(output)
result = NeadmServiceList()
# error exit code
if self.exit_code:
result.exit_code = self.exit_code
result.output = output
return result
output_array = output.split('\n')
for line in output_array:
# print(line)
if NeadmServiceWrapper._service_list_header.match(line):
continue
params = line.split()
# print(params)
# print(len(params))
if len(params) < 4:
continue
service_record = {}
for name in NeadmServiceWrapper._service_list_names:
service_record[name] = params[
NeadmServiceWrapper._service_list_names.index(name)]
# check ServiceDB for sid and unit_id already joined
# add unit_id key
db_record = self.db.find(sid=service_record['sid'],
service_name=service_record['name'])
if len(db_record) == 1:
service_record['unit_id'] = db_record[0]['unit_id']
else:
service_record['unit_id'] = ''
# print(node)
result.append(service_record)
# print(status)
return result
def exec_cmd(self, cmd_name, cmd):
try:
print("\t{0} cmd is {1}".format(cmd_name, ' '.join(cmd)))
subprocess.check_output(cmd)
except Exception as ex:
raise Exception('in {0}\nMessage:{1}\nTrace: {2}'.format(
self.__class__.__name__, ex.message, traceback.format_exc()))
# is node included into service nodes list
def is_node_exist(self, service_name, sid):
services = self.get_all_services()
return services.is_already_in_service(service_name, sid)
# is iscsi service already created
def is_service_exist(self, service_name):
services = self.get_all_services()
return services.is_service_exist(service_name)
# create new iscsi(cinder) service by name
def create_iscsi_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'iscsi',
service_name]
if not self.is_service_exist(service_name):
self.exec_cmd('create_iscsi_service', cmd)
else:
print("create_iscsi_service: Service {} already exist!".format(
service_name))
# create new swift service by name
def create_swift_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'create', 'swift',
service_name]
if not self.is_service_exist(service_name):
self.exec_cmd('create_swift_service', cmd)
else:
print("create_swift_service: Service {} already exist!".format(
service_name))
# remove iscsi service by name
def delete_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'delete', service_name]
if self.is_service_exist(service_name):
self.exec_cmd('delete_service', cmd)
else:
print("remove_iscsi_service: {0} service does not exist".format(
service_name))
def is_service_enabled(self, service_name):
services = self.get_all_services()
return services.is_service_enabled(service_name)
# serve command, apply swift servie to cluster
def serve_service(self, service_name, cluster_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'serve', service_name,
cluster_name]
if not self.is_service_exist(service_name):
print("serve_service: Service {} does not exist".format(
service_name))
return
self.exec_cmd('serve_service', cmd)
# enable service if exist
def enable_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'enable', service_name]
if not self.is_service_exist(service_name):
print("enable_service: Service {} does not exist".format(
service_name))
return
if not self.is_service_enabled(service_name):
self.exec_cmd('enable_service', cmd)
else:
print("enable_service: Service {} already enabled".format(
service_name))
def disable_service(self, service_name):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'disable', service_name]
if not self.is_service_exist(service_name):
print("disable_service: Service {} does not exist".format(
service_name))
return
if self.is_service_enabled(service_name):
self.exec_cmd('disable_service', cmd)
else:
print("disable_service: Service {} already disabled".format(
service_name))
def add_node_to_service(self, service_name, sid, unit_id):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'add', service_name, sid]
if not self.is_node_exist(service_name, sid):
self.exec_cmd('add_node_to_service', cmd)
# add node to persistent db
# self.db.add(sid, unit_id, service_name)
else:
print("\tadd_node_to_service:"
"Node {0} already exist as service node".format(sid))
self.db.add(sid, unit_id, service_name)
def get_service_node_count(self, service_name):
services = self.get_all_services()
return len(services.get_service_nodes(service_name))
def remove_node_by_unit_id(self, unit_id):
service = self.db.find(unit_id=unit_id)
if len(service) > 0:
sid = service[0]['sid']
service_name = service[0]['service']
self.remove_node_from_service(service_name, sid, unit_id)
else:
print("Can't find service by unit_id:{}".format(unit_id))
def disable_service_by_unit_id(self, unit_id):
service = self.db.find(unit_id=unit_id)
if len(service) > 0:
service_name = service[0]['service']
print("service to disable is :{}".format(service_name))
self.disable_service(service_name)
else:
print("Can't find service by unit_id:{}".format(unit_id))
def remove_node_from_service(self, service_name, sid, unit_id):
cmd = ['/opt/nedge/neadm/neadm', 'service', 'remove', service_name,
sid]
if self.is_node_exist(service_name, sid):
self.exec_cmd('remove_node_from_service', cmd)
node_count = self.get_service_node_count(service_name)
if node_count == 0:
self.delete_service(service_name)
else:
print("\tremove_node_from_service: "
"Node {} does not exist to remove".format(sid))
# remove from persistent db
self.db.remove(sid, unit_id)
def print_services(self):
service_list = self.get_all_services()
service_list.show()
class NeadmServiceList:
def __init__(self):
# service records array
self.service_records = []
self.exit_code = 0
self.output = ""
def is_correct(self):
return True if self.exit_code == 0 else False
def get_all(self):
return self.service_records
def get_service_nodes(self, service_name):
return filter(lambda service: service['name'] == service_name and
service['sid'] != '-',
self.service_records)
def get_iscsi_nodes(self):
return filter(lambda service: service['type'] == 'iscsi' and
service['sid'] != '-',
self.service_records)
def get_iscsi_nodes_by_service_name(self, service_name):
return filter(lambda service: service['type'] == 'iscsi' and
service['name'] == service_name and
service['sid'] != '-',
self.service_records)
def get_swift_nodes(self):
return filter(lambda service: service['type'] == 'swift' and
service['sid'] != '-',
self.service_records)
def get_swift_nodes_by_service_name(self, service_name):
return filter(lambda service: service['type'] == 'swift' and
service['name'] == service_name and
service['sid'] != '-',
self.service_records)
# is node present into whole services list
def is_already_listed(self, sid):
return True if filter(lambda service: service['sid'] == sid,
self.service_records) else False
# is node presented in service already
def is_already_in_service(self, service_name, sid):
return True if filter(lambda service: service['sid'] == sid and
service['name'] == service_name,
self.service_records) else False
def is_service_exist(self, service_name):
return True if filter(lambda service: service['name'] == service_name,
self.service_records) else False
def is_service_enabled(self, service_name):
nodes = self.get_service_nodes(service_name)
print(nodes)
if len(nodes) > 0:
if nodes[0]['status'] == 'enabled':
return True
return False
def append(self, service_record):
self.service_records.append(service_record)
# def show(self):
# print('TYPE\t\tNAME\t\t\tID\t\t\tSTATE\t\t\tUNIT_ID')
# for record in self.service_records:
# print("{0:<{col0}}{1:<{col1}}{2:<{col2}}"+
# "{3:<{col3}}{4:<{col4}}".format(
# record['type'],
# record['name'],
# record['sid'],
# record['status'],
# record['unit_id'],
# col0=8,
# col1=20,
# col2=36,
# col3=12,
# col4=16))
# print("")
| nexentaedge/neadmServiceWrapper.py | 11,309 | _status_cmd = ['/opt/nedge/neadm/fake-neadm-status.sh'] unit_id key well be added during parsing of each line print(output) error exit code print(line) print(params) print(len(params)) check ServiceDB for sid and unit_id already joined add unit_id key print(node) print(status) is node included into service nodes list is iscsi service already created create new iscsi(cinder) service by name create new swift service by name remove iscsi service by name serve command, apply swift servie to cluster enable service if exist add node to persistent db self.db.add(sid, unit_id, service_name) remove from persistent db service records array is node present into whole services list is node presented in service already def show(self): print('TYPE\t\tNAME\t\t\tID\t\t\tSTATE\t\t\tUNIT_ID') for record in self.service_records: print("{0:<{col0}}{1:<{col1}}{2:<{col2}}"+ "{3:<{col3}}{4:<{col4}}".format( record['type'], record['name'], record['sid'], record['status'], record['unit_id'], col0=8, col1=20, col2=36, col3=12, col4=16)) print("") | 1,194 | en | 0.695027 |
#!/usr/bin/env python3
#
# Copyright (c) 2019 LG Electronics, Inc.
#
# This software contains code licensed as described in LICENSE.
#
import os
import lgsvl
import random
import time
from pathlib import Path
import json
sim = lgsvl.Simulator(os.environ.get("SIMULATOR_HOST", "127.0.0.1"), 8181)
layer_mask = 0
layer_mask |= 1 << 0 # 0 is the layer for the road (default)
if sim.current_scene == "SanFrancisco":
sim.reset()
else:
sim.load("SanFrancisco")
# if sim.current_scene == "Testbed":
# sim.reset()
# else:
# sim.load("Testbed")
spawns = sim.get_spawn()
spawns[0].position.x = 705.6
spawns[0].position.y = 10.1
spawns[0].position.z = -308.7
spawns[0].rotation.y -= 95
forward = lgsvl.utils.transform_to_forward(spawns[0])
right = lgsvl.utils.transform_to_right(spawns[0])
state = lgsvl.AgentState()
# state.transform.position = spawns[0].position
state.transform.position = spawns[0].position
state.transform.rotation = spawns[0].rotation
ego = sim.add_agent("SingleLiDAR (Autoware)", lgsvl.AgentType.EGO, state)
ego.connect_bridge(os.environ.get("BRIDGE_HOST", "127.0.0.1"), 9090)
#------- Stand vehicle -------#
#set stand vehicle's initial position
pose_arr = [
(-3, 5),
(-3, 10),
(-3, 15),
(-3, 20),
(-5, 25),
(3, 30),
(-1, 40),
(-6, 33)
]
sv_state_arr = []
for (x, y) in pose_arr:
sv_state_arr.append(lgsvl.AgentState())
sv_state_arr[-1].transform.position = spawns[0].position + y * forward + x * right
sv_state_arr[-1].transform.rotation = spawns[0].rotation
_ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1])
# for i in range(30):
# sv_state_arr.append(lgsvl.AgentState())
# sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward + 3.5 * right
# sv_state_arr[-1].transform.rotation = spawns[0].rotation
# _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1])
# for i in range(30):
# sv_state_arr.append(lgsvl.AgentState())
# sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward - 6 * right
# sv_state_arr[-1].transform.rotation = spawns[0].rotation
# _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1])
sim.run()
| autoware.ai/autoware_files/lgsvl_file/scripts/testbed_scenario/sanfrancisco.py | 2,198 | !/usr/bin/env python3 Copyright (c) 2019 LG Electronics, Inc. This software contains code licensed as described in LICENSE. 0 is the layer for the road (default) if sim.current_scene == "Testbed": sim.reset() else: sim.load("Testbed") state.transform.position = spawns[0].position------- Stand vehicle -------set stand vehicle's initial position for i in range(30): sv_state_arr.append(lgsvl.AgentState()) sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward + 3.5 * right sv_state_arr[-1].transform.rotation = spawns[0].rotation _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1]) for i in range(30): sv_state_arr.append(lgsvl.AgentState()) sv_state_arr[-1].transform.position = spawns[0].position + (150 + i * 7) * forward - 6 * right sv_state_arr[-1].transform.rotation = spawns[0].rotation _ = sim.add_agent("Sedan", lgsvl.AgentType.NPC, sv_state_arr[-1]) | 923 | en | 0.317903 |
"""
Space object.
Refer: https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/space
"""
from dataclasses import dataclass, field
from typing import List, Optional
from .base import BaseModel
@dataclass
class Space(BaseModel):
"""
A class representing the space object.
"""
id: Optional[str] = field(default=None)
state: Optional[str] = field(default=None)
created_at: Optional[str] = field(default=None, repr=False)
host_ids: Optional[List[str]] = field(default=None, repr=False)
lang: Optional[str] = field(default=None, repr=False)
is_ticketed: Optional[bool] = field(default=None, repr=False)
invited_user_ids: Optional[List[str]] = field(default=None, repr=False)
participant_count: Optional[int] = field(default=None, repr=False)
scheduled_start: Optional[str] = field(default=None, repr=False)
speaker_ids: Optional[List[str]] = field(default=None, repr=False)
started_at: Optional[str] = field(default=None, repr=False)
title: Optional[str] = field(default=None, repr=False)
updated_at: Optional[str] = field(default=None, repr=False)
| pytwitter/models/space.py | 1,146 | A class representing the space object.
Space object.
Refer: https://developer.twitter.com/en/docs/twitter-api/data-dictionary/object-model/space | 145 | en | 0.542582 |
"""Make / Download Telegram Sticker Packs without installing Third Party applications
Available Commands:
.kangsticker [Optional Emoji]
.packinfo
.getsticker"""
from telethon import events
from io import BytesIO
from PIL import Image
import asyncio
import datetime
from collections import defaultdict
import math
import os
import requests
import zipfile
from telethon.errors.rpcerrorlist import StickersetInvalidError
from telethon.errors import MessageNotModifiedError
from telethon.tl.functions.account import UpdateNotifySettingsRequest
from telethon.tl.functions.messages import GetStickerSetRequest
from telethon.tl.types import (
DocumentAttributeFilename,
DocumentAttributeSticker,
InputMediaUploadedDocument,
InputPeerNotifySettings,
InputStickerSetID,
InputStickerSetShortName,
MessageMediaPhoto
)
from uniborg.util import admin_cmd
@borg.on(admin_cmd(pattern="kangsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to a photo to add to my personal sticker pack.")
return
reply_message = await event.get_reply_message()
sticker_emoji = "🔥"
input_str = event.pattern_match.group(1)
if input_str:
sticker_emoji = input_str
me = borg.me
userid = event.from_id
packname = f"{userid}'s @MC0917 Pack"
packshortname = f"MC_0917_{userid}" # format: Uni_Borg_userid
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "@MC0917_Sticker.png"
file = await borg.download_file(reply_message.media)
uploaded_sticker = None
if is_a_s:
file_ext_ns_ion = "AnimatedSticker.tgs"
uploaded_sticker = await borg.upload_file(file, file_name=file_ext_ns_ion)
packname = f"{userid}'s @AnimatedStickersGroup"
packshortname = f"MC_0917_{userid}_as" # format: Uni_Borg_userid
elif not is_message_image(reply_message):
await event.edit("Invalid message type")
return
else:
with BytesIO(file) as mem_file, BytesIO() as sticker:
resize_image(mem_file, sticker)
sticker.seek(0)
uploaded_sticker = await borg.upload_file(sticker, file_name=file_ext_ns_ion)
await event.edit("Processing this sticker. Please Wait!")
async with borg.conversation("@Stickers") as bot_conv:
now = datetime.datetime.now()
dt = now + datetime.timedelta(minutes=1)
if not await stickerset_exists(bot_conv, packshortname):
await silently_send_message(bot_conv, "/cancel")
if is_a_s:
response = await silently_send_message(bot_conv, "/newanimated")
else:
response = await silently_send_message(bot_conv, "/newpack")
if "Yay!" not in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
response = await silently_send_message(bot_conv, packname)
if not response.text.startswith("Alright!"):
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
w = await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/publish")
response = await silently_send_message(bot_conv, f"<{packname}>")
await silently_send_message(bot_conv, "/skip")
response = await silently_send_message(bot_conv, packshortname)
if response.text == "Sorry, this short name is already taken.":
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
else:
await silently_send_message(bot_conv, "/cancel")
await silently_send_message(bot_conv, "/addsticker")
await silently_send_message(bot_conv, packshortname)
await bot_conv.send_file(
file=uploaded_sticker,
allow_cache=False,
force_document=True
)
response = await bot_conv.get_response()
if "Sorry" in response.text:
await event.edit(f"**FAILED**! @Stickers replied: {response.text}")
return
await silently_send_message(bot_conv, sticker_emoji)
await silently_send_message(bot_conv, "/done")
await event.edit(f"sticker added! Your pack can be found [here](t.me/addstickers/{packshortname})")
@borg.on(admin_cmd(pattern="packinfo"))
async def _(event):
if event.fwd_from:
return
if not event.is_reply:
await event.edit("Reply to any sticker to get it's pack info.")
return
rep_msg = await event.get_reply_message()
if not rep_msg.document:
await event.edit("Reply to any sticker to get it's pack info.")
return
stickerset_attr_s = rep_msg.document.attributes
stickerset_attr = find_instance(stickerset_attr_s, DocumentAttributeSticker)
if not stickerset_attr.stickerset:
await event.edit("sticker does not belong to a pack.")
return
get_stickerset = await borg(
GetStickerSetRequest(
InputStickerSetID(
id=stickerset_attr.stickerset.id,
access_hash=stickerset_attr.stickerset.access_hash
)
)
)
pack_emojis = []
for document_sticker in get_stickerset.packs:
if document_sticker.emoticon not in pack_emojis:
pack_emojis.append(document_sticker.emoticon)
await event.edit(f"**Sticker Title:** `{get_stickerset.set.title}\n`"
f"**Sticker Short Name:** `{get_stickerset.set.short_name}`\n"
f"**Official:** `{get_stickerset.set.official}`\n"
f"**Archived:** `{get_stickerset.set.archived}`\n"
f"**Stickers In Pack:** `{len(get_stickerset.packs)}`\n"
f"**Emojis In Pack:** {' '.join(pack_emojis)}")
@borg.on(admin_cmd(pattern="getsticker ?(.*)"))
async def _(event):
if event.fwd_from:
return
input_str = event.pattern_match.group(1)
if not os.path.isdir(Config.TMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TMP_DOWNLOAD_DIRECTORY)
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
if not reply_message.sticker:
return
sticker = reply_message.sticker
sticker_attrib = find_instance(sticker.attributes, DocumentAttributeSticker)
if not sticker_attrib.stickerset:
await event.reply("This sticker is not part of a pack")
return
is_a_s = is_it_animated_sticker(reply_message)
file_ext_ns_ion = "webp"
file_caption = "https://t.me/RoseSupportChat/33801"
if is_a_s:
file_ext_ns_ion = "tgs"
file_caption = "Forward the ZIP file to @AnimatedStickersRoBot to get lottIE JSON containing the vector information."
sticker_set = await borg(GetStickerSetRequest(sticker_attrib.stickerset))
pack_file = os.path.join(Config.TMP_DOWNLOAD_DIRECTORY, sticker_set.set.short_name, "pack.txt")
if os.path.isfile(pack_file):
os.remove(pack_file)
# Sticker emojis are retrieved as a mapping of
# <emoji>: <list of document ids that have this emoji>
# So we need to build a mapping of <document id>: <list of emoji>
# Thanks, Durov
emojis = defaultdict(str)
for pack in sticker_set.packs:
for document_id in pack.documents:
emojis[document_id] += pack.emoticon
async def download(sticker, emojis, path, file):
await borg.download_media(sticker, file=os.path.join(path, file))
with open(pack_file, "a") as f:
f.write(f"{{'image_file': '{file}','emojis':{emojis[sticker.id]}}},")
pending_tasks = [
asyncio.ensure_future(
download(document, emojis, Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name, f"{i:03d}.{file_ext_ns_ion}")
) for i, document in enumerate(sticker_set.documents)
]
await event.edit(f"Downloading {sticker_set.set.count} sticker(s) to .{Config.TMP_DOWNLOAD_DIRECTORY}{sticker_set.set.short_name}...")
num_tasks = len(pending_tasks)
while 1:
done, pending_tasks = await asyncio.wait(pending_tasks, timeout=2.5,
return_when=asyncio.FIRST_COMPLETED)
try:
await event.edit(
f"Downloaded {num_tasks - len(pending_tasks)}/{sticker_set.set.count}")
except MessageNotModifiedError:
pass
if not pending_tasks:
break
await event.edit("Downloading to my local completed")
# https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7
directory_name = Config.TMP_DOWNLOAD_DIRECTORY + sticker_set.set.short_name
zipf = zipfile.ZipFile(directory_name + ".zip", "w", zipfile.ZIP_DEFLATED)
zipdir(directory_name, zipf)
zipf.close()
await borg.send_file(
event.chat_id,
directory_name + ".zip",
caption=file_caption,
force_document=True,
allow_cache=False,
reply_to=event.message.id,
progress_callback=progress
)
try:
os.remove(directory_name + ".zip")
os.remove(directory_name)
except:
pass
await event.edit("task Completed")
await asyncio.sleep(3)
await event.delete()
else:
await event.edit("TODO: Not Implemented")
# Helpers
def is_it_animated_sticker(message):
try:
if message.media and message.media.document:
mime_type = message.media.document.mime_type
if "tgsticker" in mime_type:
return True
else:
return False
else:
return False
except:
return False
def is_message_image(message):
if message.media:
if isinstance(message.media, MessageMediaPhoto):
return True
if message.media.document:
if message.media.document.mime_type.split("/")[0] == "image":
return True
return False
return False
async def silently_send_message(conv, text):
await conv.send_message(text)
response = await conv.get_response()
await conv.mark_read(message=response)
return response
async def stickerset_exists(conv, setname):
try:
await borg(GetStickerSetRequest(InputStickerSetShortName(setname)))
response = await silently_send_message(conv, "/addsticker")
if response.text == "Invalid pack selected.":
await silently_send_message(conv, "/cancel")
return False
await silently_send_message(conv, "/cancel")
return True
except StickersetInvalidError:
return False
def resize_image(image, save_locaton):
""" Copyright Rhyse Simpson:
https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py
"""
im = Image.open(image)
maxsize = (512, 512)
if (im.width and im.height) < 512:
size1 = im.width
size2 = im.height
if im.width > im.height:
scale = 512 / size1
size1new = 512
size2new = size2 * scale
else:
scale = 512 / size2
size1new = size1 * scale
size2new = 512
size1new = math.floor(size1new)
size2new = math.floor(size2new)
sizenew = (size1new, size2new)
im = im.resize(sizenew)
else:
im.thumbnail(maxsize)
im.save(save_locaton, "PNG")
def progress(current, total):
logger.info("Uploaded: {} of {}\nCompleted {}".format(current, total, (current / total) * 100))
def find_instance(items, class_or_tuple):
for item in items:
if isinstance(item, class_or_tuple):
return item
return None
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
os.remove(os.path.join(root, file))
| stdplugins/stickers.py | 12,714 | Copyright Rhyse Simpson:
https://github.com/skittles9823/SkittBot/blob/master/tg_bot/modules/stickers.py
Make / Download Telegram Sticker Packs without installing Third Party applications
Available Commands:
.kangsticker [Optional Emoji]
.packinfo
.getsticker
format: Uni_Borg_userid format: Uni_Borg_userid https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 Sticker emojis are retrieved as a mapping of <emoji>: <list of document ids that have this emoji> So we need to build a mapping of <document id>: <list of emoji> Thanks, Durov https://gist.github.com/udf/e4e3dbb2e831c8b580d8fddd312714f7 Helpers ziph is zipfile handle | 638 | en | 0.682723 |
#!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import time
from test_framework.blocktools import create_block, create_coinbase, create_transaction
class AcceptBlockTest(BitcoinTestFramework):
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "uexd"),
help="uexd binary to test")
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.extra_args = [[], ["-minimumchainwork=0x10"]]
def setup_network(self):
# Node0 will be used to test behavior of processing unrequested blocks
# from peers which are not whitelisted, while Node1 will be used for
# the whitelisted case.
# Node2 will be used for non-whitelisted peers to test the interaction
# with nMinimumChainWork.
self.setup_nodes()
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# min_work_node connects to node1 (whitelisted)
min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
network_thread_start()
# Test logic begins here
test_node.wait_for_verack()
min_work_node.wait_for_verack()
# 1. Have nodes mine a block (leave IBD)
[ n.generate(1) for n in self.nodes ]
tips = [ int("0x" + n.getbestblockhash(), 0) for n in self.nodes ]
# 2. Send one block that builds on each tip.
# This should be accepted by node0
blocks_h2 = [] # the height 2 blocks on each node's chain
block_time = int(time.time()) + 1
for i in range(2):
blocks_h2.append(create_block(tips[i], create_coinbase(2), block_time))
blocks_h2[i].solve()
block_time += 1
test_node.send_message(msg_block(blocks_h2[0]))
min_work_node.send_message(msg_block(blocks_h2[1]))
for x in [test_node, min_work_node]:
x.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
assert_equal(self.nodes[1].getblockcount(), 1)
self.log.info("First height 2 block accepted by node0; correctly rejected by node1")
# 3. Send another block that builds on genesis.
block_h1f = create_block(int("0x" + self.nodes[0].getblockhash(0), 0), create_coinbase(1), block_time)
block_time += 1
block_h1f.solve()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h1f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_h1f.hash)
# 4. Send another two block that build on the fork.
block_h2f = create_block(block_h1f.sha256, create_coinbase(2), block_time)
block_time += 1
block_h2f.solve()
test_node.send_message(msg_block(block_h2f))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h2f.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
# But this block should be accepted by node since it has equal work.
self.nodes[0].getblock(block_h2f.hash)
self.log.info("Second height 2 block accepted, but not reorg'ed to")
# 4b. Now send another block that builds on the forking chain.
block_h3 = create_block(block_h2f.sha256, create_coinbase(3), block_h2f.nTime+1)
block_h3.solve()
test_node.send_message(msg_block(block_h3))
test_node.sync_with_ping()
# Since the earlier block was not processed by node, the new block
# can't be fully validated.
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_h3.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
self.nodes[0].getblock(block_h3.hash)
# But this block should be accepted by node since it has more work.
self.nodes[0].getblock(block_h3.hash)
self.log.info("Unrequested more-work block accepted")
# 4c. Now mine 288 more blocks and deliver; all should be processed but
# the last (height-too-high) on node (as long as its not missing any headers)
tip = block_h3
all_blocks = []
for i in range(288):
next_block = create_block(tip.sha256, create_coinbase(i + 4), tip.nTime+1)
next_block.solve()
all_blocks.append(next_block)
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(all_blocks[0]))
test_node.send_message(headers_message)
test_node.send_message(msg_block(all_blocks[1]))
test_node.sync_with_ping()
self.nodes[0].getblock(all_blocks[1].hash)
# Now send the blocks in all_blocks
for i in range(288):
test_node.send_message(msg_block(all_blocks[i]))
test_node.sync_with_ping()
# Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead
for x in all_blocks[:-1]:
self.nodes[0].getblock(x.hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[-1].hash)
# 5. Test handling of unrequested block on the node that didn't process
# Should still not be processed (even though it has a child that has more
# work).
# The node should have requested the blocks at some point, so
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
self.nodes[1].disconnect_p2ps()
network_thread_join()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 2)
self.log.info("Unrequested block that would complete more-work chain was ignored")
# 6. Try to get node to request the missing block.
# Poke the node with an inv for block at height 3 and see if that
# triggers a getdata on block 2 (it should if block 2 is missing).
with mininode_lock:
# Clear state so we can check the getdata request
test_node.last_message.pop("getdata", None)
test_node.send_message(msg_inv([CInv(2, block_h3.sha256)]))
test_node.sync_with_ping()
with mininode_lock:
getdata = test_node.last_message["getdata"]
# Check that the getdata includes the right block
assert_equal(getdata.inv[0].hash, block_h1f.sha256)
self.log.info("Inv at tip triggered getdata for unprocessed block")
# 7. Send the missing block for the third time (now it is requested)
test_node.send_message(msg_block(block_h1f))
test_node.sync_with_ping()
assert_equal(self.nodes[0].getblockcount(), 290)
self.nodes[0].getblock(all_blocks[286].hash)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, all_blocks[287].hash)
self.log.info("Successfully reorged to longer chain from non-whitelisted peer")
# 8. Create a chain which is invalid at a height longer than the
# current chain, but which has more blocks on top of that
block_289f = create_block(all_blocks[284].sha256, create_coinbase(289), all_blocks[284].nTime+1)
block_289f.solve()
block_290f = create_block(block_289f.sha256, create_coinbase(290), block_289f.nTime+1)
block_290f.solve()
block_291 = create_block(block_290f.sha256, create_coinbase(291), block_290f.nTime+1)
# block_291 spends a coinbase below maturity!
block_291.vtx.append(create_transaction(block_290f.vtx[0], 0, b"42", 1))
block_291.hashMerkleRoot = block_291.calc_merkle_root()
block_291.solve()
block_292 = create_block(block_291.sha256, create_coinbase(292), block_291.nTime+1)
block_292.solve()
# Now send all the headers on the chain and enough blocks to trigger reorg
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_289f))
headers_message.headers.append(CBlockHeader(block_290f))
headers_message.headers.append(CBlockHeader(block_291))
headers_message.headers.append(CBlockHeader(block_292))
test_node.send_message(headers_message)
test_node.sync_with_ping()
tip_entry_found = False
for x in self.nodes[0].getchaintips():
if x['hash'] == block_292.hash:
assert_equal(x['status'], "headers-only")
tip_entry_found = True
assert(tip_entry_found)
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, block_292.hash)
test_node.send_message(msg_block(block_289f))
test_node.send_message(msg_block(block_290f))
test_node.sync_with_ping()
self.nodes[0].getblock(block_289f.hash)
self.nodes[0].getblock(block_290f.hash)
test_node.send_message(msg_block(block_291))
# At this point we've sent an obviously-bogus block, wait for full processing
# without assuming whether we will be disconnected or not
try:
# Only wait a short while so the test doesn't take forever if we do get
# disconnected
test_node.sync_with_ping(timeout=1)
except AssertionError:
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
test_node = self.nodes[0].add_p2p_connection(P2PInterface())
network_thread_start()
test_node.wait_for_verack()
# We should have failed reorg and switched back to 290 (but have block 291)
assert_equal(self.nodes[0].getblockcount(), 290)
assert_equal(self.nodes[0].getbestblockhash(), all_blocks[286].hash)
assert_equal(self.nodes[0].getblock(block_291.hash)["confirmations"], -1)
# Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected
block_293 = create_block(block_292.sha256, create_coinbase(293), block_292.nTime+1)
block_293.solve()
headers_message = msg_headers()
headers_message.headers.append(CBlockHeader(block_293))
test_node.send_message(headers_message)
test_node.wait_for_disconnect()
# 9. Connect node1 to node0 and ensure it is able to sync
connect_nodes(self.nodes[0], 1)
sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Successfully synced nodes 1 and 0")
if __name__ == '__main__':
AcceptBlockTest().main()
| test/functional/p2p_unrequested_blocks.py | 14,257 | Test processing of unrequested blocks.
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
1. Generate one block on each node, to leave IBD.
2. Mine a new block on each tip, and deliver to each node from node's peer.
The tip should advance for node0, but node1 should skip processing due to
nMinimumChainWork.
Node1 is unused in tests 3-7:
3. Mine a block that forks from the genesis block, and deliver to test_node.
Node0 should not process this block (just accept the header), because it
is unrequested and doesn't have more or equal work to the tip.
4a,b. Send another two blocks that build on the forking block.
Node0 should process the second block but be stuck on the shorter chain,
because it's missing an intermediate block.
4c.Send 288 more blocks on the longer chain (the number of blocks ahead
we currently store).
Node0 should process all but the last block (too far ahead in height).
5. Send a duplicate of the block in #3 to Node0.
Node0 should not process the block because it is unrequested, and stay on
the shorter chain.
6. Send Node0 an inv for the height 3 block produced in #4 above.
Node0 should figure out that Node0 has the missing height 2 block and send a
getdata.
7. Send Node0 the missing block again.
Node0 should process and the tip should advance.
8. Create a fork which is invalid at a height longer than the current chain
(ie to which the node will try to reorg) but which has headers built on top
of the invalid block. Check that we get disconnected if we send more headers
on the chain the node now knows to be invalid.
9. Test Node1 is able to sync when connected to node0 (which should have sufficient
work on its chain).
!/usr/bin/env python3 Copyright (c) 2015-2017 The Bitcoin Core developers Distributed under the MIT software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. Node0 will be used to test behavior of processing unrequested blocks from peers which are not whitelisted, while Node1 will be used for the whitelisted case. Node2 will be used for non-whitelisted peers to test the interaction with nMinimumChainWork. Setup the p2p connections and start up the network thread. test_node connects to node0 (not whitelisted) min_work_node connects to node1 (whitelisted) Test logic begins here 1. Have nodes mine a block (leave IBD) 2. Send one block that builds on each tip. This should be accepted by node0 the height 2 blocks on each node's chain 3. Send another block that builds on genesis. 4. Send another two block that build on the fork. Since the earlier block was not processed by node, the new block can't be fully validated. But this block should be accepted by node since it has equal work. 4b. Now send another block that builds on the forking chain. Since the earlier block was not processed by node, the new block can't be fully validated. But this block should be accepted by node since it has more work. 4c. Now mine 288 more blocks and deliver; all should be processed but the last (height-too-high) on node (as long as its not missing any headers) Now send the block at height 5 and check that it wasn't accepted (missing header) The block at height 5 should be accepted if we provide the missing header, though Now send the blocks in all_blocks Blocks 1-287 should be accepted, block 288 should be ignored because it's too far ahead 5. Test handling of unrequested block on the node that didn't process Should still not be processed (even though it has a child that has more work). The node should have requested the blocks at some point, so disconnect/reconnect first 6. Try to get node to request the missing block. Poke the node with an inv for block at height 3 and see if that triggers a getdata on block 2 (it should if block 2 is missing). Clear state so we can check the getdata request Check that the getdata includes the right block 7. Send the missing block for the third time (now it is requested) 8. Create a chain which is invalid at a height longer than the current chain, but which has more blocks on top of that block_291 spends a coinbase below maturity! Now send all the headers on the chain and enough blocks to trigger reorg At this point we've sent an obviously-bogus block, wait for full processing without assuming whether we will be disconnected or not Only wait a short while so the test doesn't take forever if we do get disconnected We should have failed reorg and switched back to 290 (but have block 291) Now send a new header on the invalid chain, indicating we're forked off, and expect to get disconnected 9. Connect node1 to node0 and ensure it is able to sync | 4,893 | en | 0.929681 |
import networkx
import random
def regularize_graph(graph,d):
regularized = True
for node_id in list(graph.nodes()):
if graph.in_degree(node_id)!=d or graph.out_degree(node_id)!=d:
regularized = False
break
while not regularized:
lost_in_degree_ids = []
full_in_degree_ids = []
for node_id in list(graph.nodes()):
if graph.in_degree(node_id)<d:
lost_in_degree_ids.append(node_id)
elif graph.in_degree(node_id)==d:
full_in_degree_ids.append(node_id)
else:
raise Exception('In degree too large')
lost_in_degree_ids = random.sample(lost_in_degree_ids, len(lost_in_degree_ids))
lost_outdegree_ids = []
full_outdegree_ids = []
for node_id in list(graph.nodes()):
if graph.out_degree(node_id)<d:
lost_outdegree_ids.append(node_id)
elif graph.out_degree(node_id)==d:
full_outdegree_ids.append(node_id)
else:
raise Exception('Out degree too large')
lost_outdegree_ids = random.sample(lost_outdegree_ids, len(lost_outdegree_ids))
if len(lost_in_degree_ids)!=len(lost_outdegree_ids):
raise Exception('Number of missing in and out degrees do not match')
for i in range(len(lost_in_degree_ids)):
full_in_degree_ids = random.sample(full_in_degree_ids, len(full_in_degree_ids))
full_outdegree_ids = random.sample(full_outdegree_ids, len(full_outdegree_ids))
lost_in_degree_id = lost_in_degree_ids[i]
lost_outdegree_id = lost_outdegree_ids[i]
# Find appropriate (full_outdegree_id, full_in_degree_id) pair
full_in_degree_id = -1
full_outdegree_id = -1
for fod_id in full_outdegree_ids:
if fod_id!=lost_in_degree_id:
suc_ids = list(graph.successors(fod_id))
for suc_id in suc_ids:
if (suc_id in full_in_degree_ids) and (suc_id!=lost_outdegree_id):
full_in_degree_id = suc_id
full_outdegree_id = fod_id
break
if full_in_degree_id!=-1 and full_outdegree_id!=-1:
break
# Patch
graph.remove_edge(full_outdegree_id, full_in_degree_id)
graph.add_edge(full_outdegree_id, lost_in_degree_id)
graph.add_edge(lost_outdegree_id, full_in_degree_id)
regularized = True
for node_id in list(graph.nodes()):
if graph.in_degree(node_id)!=d or graph.out_degree(node_id)!=d:
regularized = False
break
return graph
| exact-2-regular-k/regularize_graph.py | 2,438 | Find appropriate (full_outdegree_id, full_in_degree_id) pair Patch | 66 | en | 0.516314 |
#!/usr/bin/env python
# Brief: This node subscribes to /tracked_humans and publishes the predicted goal to humans based on their trajectory
# Author: Phani Teja Singamaneni
import numpy as np
import rospy
import tf
from geometry_msgs.msg import Point, PoseStamped
from human_msgs.msg import TrackedHumans, TrackedHuman, TrackedSegmentType
from human_path_prediction.msg import PredictedGoal
from scipy.stats import multivariate_normal
from std_srvs.srv import SetBool, Trigger, TriggerResponse
EPS = 1e-12
class PredictGoal(object):
def __init__(self, human_num=1):
self.human_num = human_num
# laas_adream
self.goals_x = [1.5, 7.0, 9.0, 10.5, 1.5, 10.3, 8.5]
self.goals_y = [2.0, 8.0, 12.5, 15.0, 15.0, 1.5, -4.5]
self.goal_num = 7
# maze
# self.goals_x = [1.5,1.5,1.5,1.5,1.5,7.5,25,42,42,41.5,42,37,22,15.5,28.5,37,23.5,10.5,15.5,31.5,20,25.5,7]
# self.goals_y = [45,15,30,60,87,87,81.5,81.5,66,41.5,22,3,3,12.5,12.5,20.5,21.5,28.5,39.5,47,53,59,59]
self.predicted_goal = PoseStamped()
self.last_idx = 0
self.changed = False
self.current_poses = [[] for i in range(self.human_num)]
self.prev_poses = [[] for i in range(self.human_num)]
self.mv_nd = multivariate_normal(mean=0,cov=0.1)
self.theta_phi = [[0]*self.goal_num for i in range(self.human_num)]
self.window_size = 10
self.probability_goal = [np.array([1.0/self.goal_num]*self.goal_num) for i in range(self.human_num)]
self.probability_goal_window = [np.array([[1.0/self.goal_num]*self.goal_num]*self.window_size) for i in range(self.human_num)]
self.done = False
self.itr = 0
NODE_NAME = "human_goal_predict"
rospy.init_node(NODE_NAME)
self.humans_sub_ = rospy.Subscriber("/tracked_humans",TrackedHumans,self.tracked_humansCB)
self.goal_pub_ = rospy.Publisher(NODE_NAME+"/predicted_goal",PredictedGoal, queue_size=2)
self.goal_srv_ = rospy.Service("goal_changed", Trigger, self.goal_changed)
rospy.spin()
def tracked_humansCB(self,msg):
self.prev_poses = self.current_poses
self.current_poses = [[] for i in range(self.human_num)]
for human in msg.humans:
for segment in human.segments:
if segment.type == TrackedSegmentType.TORSO:
self.current_poses[human.track_id-1].append(segment.pose.pose)
if not self.done:
self.prev_poses = self.current_poses
for i in range(0,len(self.current_poses[0])):
diff = np.linalg.norm([self.current_poses[0][i].position.x - self.prev_poses[0][i].position.x, self.current_poses[0][i].position.y - self.prev_poses[0][i].position.y])
if diff > EPS or not self.done:
dist = []
for j in range(0,len(self.goals_x)):
vec1 = np.array([self.goals_x[j],self.goals_y[j],0.0]) - np.array([self.current_poses[0][i].position.x,self.current_poses[0][i].position.y,0.0]) #Vector from current position to a goal
rotation = (self.current_poses[0][i].orientation.x,self.current_poses[0][i].orientation.y,self.current_poses[0][i].orientation.z,self.current_poses[0][i].orientation.w)
roll,pitch,yaw = tf.transformations.euler_from_quaternion(rotation)
unit_vec = np.array([np.cos(yaw), np.sin(yaw),0.0])
self.theta_phi[i][j] = (np.arccos(np.dot(vec1,unit_vec)/np.linalg.norm(vec1)))
dist.append(np.linalg.norm([self.current_poses[0][i].position.x - self.goals_x[j],self.current_poses[0][i].position.y - self.goals_y[j]]))
self.probability_goal_window[i][self.itr] = self.mv_nd.pdf(np.array(self.theta_phi[i]));
self.probability_goal[i] = np.array([1.0]*self.goal_num)
for k in range(0,len(self.probability_goal_window[i])):
gf = np.exp((k-self.window_size)/5)
self.probability_goal[i] = np.power(self.probability_goal_window[i][k],gf)* np.array(self.probability_goal[i]) # Linear prediction of goal
for ln in range(0,len(self.goals_x)):
self.probability_goal[i][ln] = (1/dist[ln])*self.probability_goal[i][ln];
self.probability_goal[i] = (self.probability_goal[i]-np.min(self.probability_goal[i]))/(np.max(self.probability_goal[i])-np.min(self.probability_goal[i]))
self.itr = self.itr + 1
if self.itr == self.window_size:
self.itr = 0
self.done = True
self.predict_goal()
def predict_goal(self):
idx = 0
max_prob = 0.0
p_goal = PredictedGoal()
for i in range(0,len(self.current_poses[0])):
for j in range(0,len(self.goals_x)):
if(max_prob<self.probability_goal[i][j]):
idx = j
max_prob = self.probability_goal[i][j]
self.predicted_goal.header.stamp = rospy.Time.now()
self.predicted_goal.header.frame_id = 'map'
self.predicted_goal.pose.position.x = self.goals_x[idx]
self.predicted_goal.pose.position.y = self.goals_y[idx]
self.predicted_goal.pose.position.z = 0.0
self.predicted_goal.pose.orientation = self.current_poses[0][i].orientation
if self.last_idx != idx:
p_goal.changed = True
self.changed = True
self.last_idx = idx
p_goal.goal = self.predicted_goal
self.goal_pub_.publish(p_goal)
def goal_changed(self,req):
if self.changed:
self.changed = False
return TriggerResponse(True,"Goal Changed")
return TriggerResponse(False, "Goal not changed")
if __name__ == '__main__':
predict_srv = PredictGoal(60)
| human_path_prediction/scripts/predict_goal.py | 5,916 | !/usr/bin/env python Brief: This node subscribes to /tracked_humans and publishes the predicted goal to humans based on their trajectory Author: Phani Teja Singamaneni laas_adream maze self.goals_x = [1.5,1.5,1.5,1.5,1.5,7.5,25,42,42,41.5,42,37,22,15.5,28.5,37,23.5,10.5,15.5,31.5,20,25.5,7] self.goals_y = [45,15,30,60,87,87,81.5,81.5,66,41.5,22,3,3,12.5,12.5,20.5,21.5,28.5,39.5,47,53,59,59]Vector from current position to a goal Linear prediction of goal | 457 | en | 0.882588 |
# Copyright (c) 2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0)
def f1(x):
return 1
def f1(x):
return 'foo'
def f2(x):
pass
def f2(x,y):
pass
def f3(x):
return 1+x
def f3(x):
return 'asd'+x
| pytype/tools/merge_pyi/test_data/redefine.py | 236 | Copyright (c) 2016 Google Inc. (under http://www.apache.org/licenses/LICENSE-2.0) | 81 | en | 0.451736 |
# -*- coding: utf-8 -*-
"""Tests for various magic functions.
Needs to be run by nose (to make ipython session available).
"""
import io
import os
import re
import sys
import warnings
from unittest import TestCase
from importlib import invalidate_caches
from io import StringIO
import nose.tools as nt
import shlex
from IPython import get_ipython
from IPython.core import magic
from IPython.core.error import UsageError
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic,
register_line_magic, register_cell_magic)
from IPython.core.magics import execution, script, code, logging, osm
from IPython.testing import decorators as dec
from IPython.testing import tools as tt
from IPython.utils.io import capture_output
from IPython.utils.tempdir import (TemporaryDirectory,
TemporaryWorkingDirectory)
from IPython.utils.process import find_cmd
_ip = get_ipython()
@magic.magics_class
class DummyMagics(magic.Magics): pass
def test_extract_code_ranges():
instr = "1 3 5-6 7-9 10:15 17: :10 10- -13 :"
expected = [(0, 1),
(2, 3),
(4, 6),
(6, 9),
(9, 14),
(16, None),
(None, 9),
(9, None),
(None, 13),
(None, None)]
actual = list(code.extract_code_ranges(instr))
nt.assert_equal(actual, expected)
def test_extract_symbols():
source = """import foo\na = 10\ndef b():\n return 42\n\n\nclass A: pass\n\n\n"""
symbols_args = ["a", "b", "A", "A,b", "A,a", "z"]
expected = [([], ['a']),
(["def b():\n return 42\n"], []),
(["class A: pass\n"], []),
(["class A: pass\n", "def b():\n return 42\n"], []),
(["class A: pass\n"], ['a']),
([], ['z'])]
for symbols, exp in zip(symbols_args, expected):
nt.assert_equal(code.extract_symbols(source, symbols), exp)
def test_extract_symbols_raises_exception_with_non_python_code():
source = ("=begin A Ruby program :)=end\n"
"def hello\n"
"puts 'Hello world'\n"
"end")
with nt.assert_raises(SyntaxError):
code.extract_symbols(source, "hello")
def test_magic_not_found():
# magic not found raises UsageError
with nt.assert_raises(UsageError):
_ip.magic('doesntexist')
# ensure result isn't success when a magic isn't found
result = _ip.run_cell('%doesntexist')
assert isinstance(result.error_in_exec, UsageError)
def test_cell_magic_not_found():
# magic not found raises UsageError
with nt.assert_raises(UsageError):
_ip.run_cell_magic('doesntexist', 'line', 'cell')
# ensure result isn't success when a magic isn't found
result = _ip.run_cell('%%doesntexist')
assert isinstance(result.error_in_exec, UsageError)
def test_magic_error_status():
def fail(shell):
1/0
_ip.register_magic_function(fail)
result = _ip.run_cell('%fail')
assert isinstance(result.error_in_exec, ZeroDivisionError)
def test_config():
""" test that config magic does not raise
can happen if Configurable init is moved too early into
Magics.__init__ as then a Config object will be registered as a
magic.
"""
## should not raise.
_ip.magic('config')
def test_config_available_configs():
""" test that config magic prints available configs in unique and
sorted order. """
with capture_output() as captured:
_ip.magic('config')
stdout = captured.stdout
config_classes = stdout.strip().split('\n')[1:]
nt.assert_list_equal(config_classes, sorted(set(config_classes)))
def test_config_print_class():
""" test that config with a classname prints the class's options. """
with capture_output() as captured:
_ip.magic('config TerminalInteractiveShell')
stdout = captured.stdout
if not re.match("TerminalInteractiveShell.* options", stdout.splitlines()[0]):
print(stdout)
raise AssertionError("1st line of stdout not like "
"'TerminalInteractiveShell.* options'")
def test_rehashx():
# clear up everything
_ip.alias_manager.clear_aliases()
del _ip.db['syscmdlist']
_ip.magic('rehashx')
# Practically ALL ipython development systems will have more than 10 aliases
nt.assert_true(len(_ip.alias_manager.aliases) > 10)
for name, cmd in _ip.alias_manager.aliases:
# we must strip dots from alias names
nt.assert_not_in('.', name)
# rehashx must fill up syscmdlist
scoms = _ip.db['syscmdlist']
nt.assert_true(len(scoms) > 10)
def test_magic_parse_options():
"""Test that we don't mangle paths when parsing magic options."""
ip = get_ipython()
path = 'c:\\x'
m = DummyMagics(ip)
opts = m.parse_options('-f %s' % path,'f:')[0]
# argv splitting is os-dependent
if os.name == 'posix':
expected = 'c:x'
else:
expected = path
nt.assert_equal(opts['f'], expected)
def test_magic_parse_long_options():
"""Magic.parse_options can handle --foo=bar long options"""
ip = get_ipython()
m = DummyMagics(ip)
opts, _ = m.parse_options('--foo --bar=bubble', 'a', 'foo', 'bar=')
nt.assert_in('foo', opts)
nt.assert_in('bar', opts)
nt.assert_equal(opts['bar'], "bubble")
@dec.skip_without('sqlite3')
def doctest_hist_f():
"""Test %hist -f with temporary filename.
In [9]: import tempfile
In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
In [11]: %hist -nl -f $tfile 3
In [13]: import os; os.unlink(tfile)
"""
@dec.skip_without('sqlite3')
def doctest_hist_r():
"""Test %hist -r
XXX - This test is not recording the output correctly. For some reason, in
testing mode the raw history isn't getting populated. No idea why.
Disabling the output checking for now, though at least we do run it.
In [1]: 'hist' in _ip.lsmagic()
Out[1]: True
In [2]: x=1
In [3]: %hist -rl 2
x=1 # random
%hist -r 2
"""
@dec.skip_without('sqlite3')
def doctest_hist_op():
"""Test %hist -op
In [1]: class b(float):
...: pass
...:
In [2]: class s(object):
...: def __str__(self):
...: return 's'
...:
In [3]:
In [4]: class r(b):
...: def __repr__(self):
...: return 'r'
...:
In [5]: class sr(s,r): pass
...:
In [6]:
In [7]: bb=b()
In [8]: ss=s()
In [9]: rr=r()
In [10]: ssrr=sr()
In [11]: 4.5
Out[11]: 4.5
In [12]: str(ss)
Out[12]: 's'
In [13]:
In [14]: %hist -op
>>> class b:
... pass
...
>>> class s(b):
... def __str__(self):
... return 's'
...
>>>
>>> class r(b):
... def __repr__(self):
... return 'r'
...
>>> class sr(s,r): pass
>>>
>>> bb=b()
>>> ss=s()
>>> rr=r()
>>> ssrr=sr()
>>> 4.5
4.5
>>> str(ss)
's'
>>>
"""
def test_hist_pof():
ip = get_ipython()
ip.run_cell(u"1+2", store_history=True)
#raise Exception(ip.history_manager.session_number)
#raise Exception(list(ip.history_manager._get_range_session()))
with TemporaryDirectory() as td:
tf = os.path.join(td, 'hist.py')
ip.run_line_magic('history', '-pof %s' % tf)
assert os.path.isfile(tf)
@dec.skip_without('sqlite3')
def test_macro():
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = ["a=1", "def b():\n return a**2", "print(a,b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
ip.magic("macro test 1-3")
nt.assert_equal(ip.user_ns["test"].value, "\n".join(cmds)+"\n")
# List macros
nt.assert_in("test", ip.magic("macro"))
@dec.skip_without('sqlite3')
def test_macro_run():
"""Test that we can run a multi-line macro successfully."""
ip = get_ipython()
ip.history_manager.reset()
cmds = ["a=10", "a+=1", "print(a)", "%macro test 2-3"]
for cmd in cmds:
ip.run_cell(cmd, store_history=True)
nt.assert_equal(ip.user_ns["test"].value, "a+=1\nprint(a)\n")
with tt.AssertPrints("12"):
ip.run_cell("test")
with tt.AssertPrints("13"):
ip.run_cell("test")
def test_magic_magic():
"""Test %magic"""
ip = get_ipython()
with capture_output() as captured:
ip.magic("magic")
stdout = captured.stdout
nt.assert_in('%magic', stdout)
nt.assert_in('IPython', stdout)
nt.assert_in('Available', stdout)
@dec.skipif_not_numpy
def test_numpy_reset_array_undec():
"Test '%reset array' functionality"
_ip.ex('import numpy as np')
_ip.ex('a = np.empty(2)')
nt.assert_in('a', _ip.user_ns)
_ip.magic('reset -f array')
nt.assert_not_in('a', _ip.user_ns)
def test_reset_out():
"Test '%reset out' magic"
_ip.run_cell("parrot = 'dead'", store_history=True)
# test '%reset -f out', make an Out prompt
_ip.run_cell("parrot", store_history=True)
nt.assert_true('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
_ip.magic('reset -f out')
nt.assert_false('dead' in [_ip.user_ns[x] for x in ('_','__','___')])
nt.assert_equal(len(_ip.user_ns['Out']), 0)
def test_reset_in():
"Test '%reset in' magic"
# test '%reset -f in'
_ip.run_cell("parrot", store_history=True)
nt.assert_true('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
_ip.magic('%reset -f in')
nt.assert_false('parrot' in [_ip.user_ns[x] for x in ('_i','_ii','_iii')])
nt.assert_equal(len(set(_ip.user_ns['In'])), 1)
def test_reset_dhist():
"Test '%reset dhist' magic"
_ip.run_cell("tmp = [d for d in _dh]") # copy before clearing
_ip.magic('cd ' + os.path.dirname(nt.__file__))
_ip.magic('cd -')
nt.assert_true(len(_ip.user_ns['_dh']) > 0)
_ip.magic('reset -f dhist')
nt.assert_equal(len(_ip.user_ns['_dh']), 0)
_ip.run_cell("_dh = [d for d in tmp]") #restore
def test_reset_in_length():
"Test that '%reset in' preserves In[] length"
_ip.run_cell("print 'foo'")
_ip.run_cell("reset -f in")
nt.assert_equal(len(_ip.user_ns['In']), _ip.displayhook.prompt_count+1)
def test_tb_syntaxerror():
"""test %tb after a SyntaxError"""
ip = get_ipython()
ip.run_cell("for")
# trap and validate stdout
save_stdout = sys.stdout
try:
sys.stdout = StringIO()
ip.run_cell("%tb")
out = sys.stdout.getvalue()
finally:
sys.stdout = save_stdout
# trim output, and only check the last line
last_line = out.rstrip().splitlines()[-1].strip()
nt.assert_equal(last_line, "SyntaxError: invalid syntax")
def test_time():
ip = get_ipython()
with tt.AssertPrints("Wall time: "):
ip.run_cell("%time None")
ip.run_cell("def f(kmjy):\n"
" %time print (2*kmjy)")
with tt.AssertPrints("Wall time: "):
with tt.AssertPrints("hihi", suppress=False):
ip.run_cell("f('hi')")
@dec.skip_win32
def test_time2():
ip = get_ipython()
with tt.AssertPrints("CPU times: user "):
ip.run_cell("%time None")
def test_time3():
"""Erroneous magic function calls, issue gh-3334"""
ip = get_ipython()
ip.user_ns.pop('run', None)
with tt.AssertNotPrints("not found", channel='stderr'):
ip.run_cell("%%time\n"
"run = 0\n"
"run += 1")
def test_doctest_mode():
"Toggle doctest_mode twice, it should be a no-op and run without error"
_ip.magic('doctest_mode')
_ip.magic('doctest_mode')
def test_parse_options():
"""Tests for basic options parsing in magics."""
# These are only the most minimal of tests, more should be added later. At
# the very least we check that basic text/unicode calls work OK.
m = DummyMagics(_ip)
nt.assert_equal(m.parse_options('foo', '')[1], 'foo')
nt.assert_equal(m.parse_options(u'foo', '')[1], u'foo')
def test_dirops():
"""Test various directory handling operations."""
# curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\','/')
curpath = os.getcwd
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
_ip.magic('cd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('cd -')
nt.assert_equal(curpath(), startdir)
_ip.magic('pushd "%s"' % ipdir)
nt.assert_equal(curpath(), ipdir)
_ip.magic('popd')
nt.assert_equal(curpath(), startdir)
finally:
os.chdir(startdir)
def test_cd_force_quiet():
"""Test OSMagics.cd_force_quiet option"""
_ip.config.OSMagics.cd_force_quiet = True
osmagics = osm.OSMagics(shell=_ip)
startdir = os.getcwd()
ipdir = os.path.realpath(_ip.ipython_dir)
try:
with tt.AssertNotPrints(ipdir):
osmagics.cd('"%s"' % ipdir)
with tt.AssertNotPrints(startdir):
osmagics.cd('-')
finally:
os.chdir(startdir)
def test_xmode():
# Calling xmode three times should be a no-op
xmode = _ip.InteractiveTB.mode
for i in range(4):
_ip.magic("xmode")
nt.assert_equal(_ip.InteractiveTB.mode, xmode)
def test_reset_hard():
monitor = []
class A(object):
def __del__(self):
monitor.append(1)
def __repr__(self):
return "<A instance>"
_ip.user_ns["a"] = A()
_ip.run_cell("a")
nt.assert_equal(monitor, [])
_ip.magic("reset -f")
nt.assert_equal(monitor, [1])
class TestXdel(tt.TempFileMixin):
def test_xdel(self):
"""Test that references from %run are cleared by xdel."""
src = ("class A(object):\n"
" monitor = []\n"
" def __del__(self):\n"
" self.monitor.append(1)\n"
"a = A()\n")
self.mktmp(src)
# %run creates some hidden references...
_ip.magic("run %s" % self.fname)
# ... as does the displayhook.
_ip.run_cell("a")
monitor = _ip.user_ns["A"].monitor
nt.assert_equal(monitor, [])
_ip.magic("xdel a")
# Check that a's __del__ method has been called.
nt.assert_equal(monitor, [1])
def doctest_who():
"""doctest for %who
In [1]: %reset -f
In [2]: alpha = 123
In [3]: beta = 'beta'
In [4]: %who int
alpha
In [5]: %who str
beta
In [6]: %whos
Variable Type Data/Info
----------------------------
alpha int 123
beta str beta
In [7]: %who_ls
Out[7]: ['alpha', 'beta']
"""
def test_whos():
"""Check that whos is protected against objects where repr() fails."""
class A(object):
def __repr__(self):
raise Exception()
_ip.user_ns['a'] = A()
_ip.magic("whos")
def doctest_precision():
"""doctest for %precision
In [1]: f = get_ipython().display_formatter.formatters['text/plain']
In [2]: %precision 5
Out[2]: '%.5f'
In [3]: f.float_format
Out[3]: '%.5f'
In [4]: %precision %e
Out[4]: '%e'
In [5]: f(3.1415927)
Out[5]: '3.141593e+00'
"""
def test_psearch():
with tt.AssertPrints("dict.fromkeys"):
_ip.run_cell("dict.fr*?")
def test_timeit_shlex():
"""test shlex issues with timeit (#1109)"""
_ip.ex("def f(*a,**kw): pass")
_ip.magic('timeit -n1 "this is a bug".count(" ")')
_ip.magic('timeit -r1 -n1 f(" ", 1)')
_ip.magic('timeit -r1 -n1 f(" ", 1, " ", 2, " ")')
_ip.magic('timeit -r1 -n1 ("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b")')
_ip.magic('timeit -r1 -n1 f("a " + "b ")')
def test_timeit_special_syntax():
"Test %%timeit with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('timeit', '-n1 -r1 %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('timeit', '-n1 -r1', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
def test_timeit_return():
"""
test whether timeit -o return object
"""
res = _ip.run_line_magic('timeit','-n10 -r10 -o 1')
assert(res is not None)
def test_timeit_quiet():
"""
test quiet option of timeit magic
"""
with tt.AssertNotPrints("loops"):
_ip.run_cell("%timeit -n1 -r1 -q 1")
def test_timeit_return_quiet():
with tt.AssertNotPrints("loops"):
res = _ip.run_line_magic('timeit', '-n1 -r1 -q -o 1')
assert (res is not None)
def test_timeit_invalid_return():
with nt.assert_raises_regex(SyntaxError, "outside function"):
_ip.run_line_magic('timeit', 'return')
@dec.skipif(execution.profile is None)
def test_prun_special_syntax():
"Test %%prun with IPython special syntax"
@register_line_magic
def lmagic(line):
ip = get_ipython()
ip.user_ns['lmagic_out'] = line
# line mode test
_ip.run_line_magic('prun', '-q %lmagic my line')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line')
# cell mode test
_ip.run_cell_magic('prun', '-q', '%lmagic my line2')
nt.assert_equal(_ip.user_ns['lmagic_out'], 'my line2')
@dec.skipif(execution.profile is None)
def test_prun_quotes():
"Test that prun does not clobber string escapes (GH #1302)"
_ip.magic(r"prun -q x = '\t'")
nt.assert_equal(_ip.user_ns['x'], '\t')
def test_extension():
# Debugging information for failures of this test
print('sys.path:')
for p in sys.path:
print(' ', p)
print('CWD', os.getcwd())
nt.assert_raises(ImportError, _ip.magic, "load_ext daft_extension")
daft_path = os.path.join(os.path.dirname(__file__), "daft_extension")
sys.path.insert(0, daft_path)
try:
_ip.user_ns.pop('arq', None)
invalidate_caches() # Clear import caches
_ip.magic("load_ext daft_extension")
nt.assert_equal(_ip.user_ns['arq'], 185)
_ip.magic("unload_ext daft_extension")
assert 'arq' not in _ip.user_ns
finally:
sys.path.remove(daft_path)
def test_notebook_export_json():
_ip = get_ipython()
_ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print('noël, été', b())"]
for i, cmd in enumerate(cmds, start=1):
_ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as td:
outfile = os.path.join(td, "nb.ipynb")
_ip.magic("notebook -e %s" % outfile)
class TestEnv(TestCase):
def test_env(self):
env = _ip.magic("env")
self.assertTrue(isinstance(env, dict))
def test_env_get_set_simple(self):
env = _ip.magic("env var val1")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val1')
self.assertEqual(_ip.magic("env var"), 'val1')
env = _ip.magic("env var=val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2')
def test_env_get_set_complex(self):
env = _ip.magic("env var 'val1 '' 'val2")
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], "'val1 '' 'val2")
self.assertEqual(_ip.magic("env var"), "'val1 '' 'val2")
env = _ip.magic('env var=val2 val3="val4')
self.assertEqual(env, None)
self.assertEqual(os.environ['var'], 'val2 val3="val4')
def test_env_set_bad_input(self):
self.assertRaises(UsageError, lambda: _ip.magic("set_env var"))
def test_env_set_whitespace(self):
self.assertRaises(UsageError, lambda: _ip.magic("env var A=B"))
class CellMagicTestCase(TestCase):
def check_ident(self, magic):
# Manually called, we get the result
out = _ip.run_cell_magic(magic, 'a', 'b')
nt.assert_equal(out, ('a','b'))
# Via run_cell, it goes into the user's namespace via displayhook
_ip.run_cell('%%' + magic +' c\nd\n')
nt.assert_equal(_ip.user_ns['_'], ('c','d\n'))
def test_cell_magic_func_deco(self):
"Cell magic using simple decorator"
@register_cell_magic
def cellm(line, cell):
return line, cell
self.check_ident('cellm')
def test_cell_magic_reg(self):
"Cell magic manually registered"
def cellm(line, cell):
return line, cell
_ip.register_magic_function(cellm, 'cell', 'cellm2')
self.check_ident('cellm2')
def test_cell_magic_class(self):
"Cell magics declared via a class"
@magics_class
class MyMagics(Magics):
@cell_magic
def cellm3(self, line, cell):
return line, cell
_ip.register_magics(MyMagics)
self.check_ident('cellm3')
def test_cell_magic_class2(self):
"Cell magics declared via a class, #2"
@magics_class
class MyMagics2(Magics):
@cell_magic('cellm4')
def cellm33(self, line, cell):
return line, cell
_ip.register_magics(MyMagics2)
self.check_ident('cellm4')
# Check that nothing is registered as 'cellm33'
c33 = _ip.find_cell_magic('cellm33')
nt.assert_equal(c33, None)
def test_file():
"""Basic %%writefile"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
@dec.skip_win32
def test_file_single_quote():
"""Basic %%writefile with embedded single quotes"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, '\'file1\'')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
@dec.skip_win32
def test_file_double_quote():
"""Basic %%writefile with embedded double quotes"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, '"file1"')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_var_expand():
"""%%writefile $filename"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.user_ns['filename'] = fname
ip.run_cell_magic("writefile", '$filename', u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_file_unicode():
"""%%writefile with unicode cell"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file1')
ip.run_cell_magic("writefile", fname, u'\n'.join([
u'liné1',
u'liné2',
]))
with io.open(fname, encoding='utf-8') as f:
s = f.read()
nt.assert_in(u'liné1\n', s)
nt.assert_in(u'liné2', s)
def test_file_amend():
"""%%writefile -a amends files"""
ip = get_ipython()
with TemporaryDirectory() as td:
fname = os.path.join(td, 'file2')
ip.run_cell_magic("writefile", fname, u'\n'.join([
'line1',
'line2',
]))
ip.run_cell_magic("writefile", "-a %s" % fname, u'\n'.join([
'line3',
'line4',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line3\n', s)
def test_file_spaces():
"""%%file with spaces in filename"""
ip = get_ipython()
with TemporaryWorkingDirectory() as td:
fname = "file name"
ip.run_cell_magic("file", '"%s"'%fname, u'\n'.join([
'line1',
'line2',
]))
with open(fname) as f:
s = f.read()
nt.assert_in('line1\n', s)
nt.assert_in('line2', s)
def test_script_config():
ip = get_ipython()
ip.config.ScriptMagics.script_magics = ['whoda']
sm = script.ScriptMagics(shell=ip)
nt.assert_in('whoda', sm.magics['cell'])
@dec.skip_win32
def test_script_out():
ip = get_ipython()
ip.run_cell_magic("script", "--out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
@dec.skip_win32
def test_script_err():
ip = get_ipython()
ip.run_cell_magic("script", "--err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'], 'hi\n')
nt.assert_equal(ip.user_ns['error'], 'hello\n')
@dec.skip_win32
def test_script_bg_out():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output sh", "echo 'hi'")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
ip.user_ns['output'].close()
@dec.skip_win32
def test_script_bg_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --err error sh", "echo 'hello' >&2")
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
ip.user_ns['error'].close()
@dec.skip_win32
def test_script_bg_out_err():
ip = get_ipython()
ip.run_cell_magic("script", "--bg --out output --err error sh", "echo 'hi'\necho 'hello' >&2")
nt.assert_equal(ip.user_ns['output'].read(), b'hi\n')
nt.assert_equal(ip.user_ns['error'].read(), b'hello\n')
ip.user_ns['output'].close()
ip.user_ns['error'].close()
def test_script_defaults():
ip = get_ipython()
for cmd in ['sh', 'bash', 'perl', 'ruby']:
try:
find_cmd(cmd)
except Exception:
pass
else:
nt.assert_in(cmd, ip.magics_manager.magics['cell'])
@magics_class
class FooFoo(Magics):
"""class with both %foo and %%foo magics"""
@line_magic('foo')
def line_foo(self, line):
"I am line foo"
pass
@cell_magic("foo")
def cell_foo(self, line, cell):
"I am cell foo, not line foo"
pass
def test_line_cell_info():
"""%%foo and %foo magics are distinguishable to inspect"""
ip = get_ipython()
ip.magics_manager.register(FooFoo)
oinfo = ip.object_inspect('foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
oinfo = ip.object_inspect('%%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.cell_foo.__doc__)
oinfo = ip.object_inspect('%foo')
nt.assert_true(oinfo['found'])
nt.assert_true(oinfo['ismagic'])
nt.assert_equal(oinfo['docstring'], FooFoo.line_foo.__doc__)
def test_multiple_magics():
ip = get_ipython()
foo1 = FooFoo(ip)
foo2 = FooFoo(ip)
mm = ip.magics_manager
mm.register(foo1)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo1)
mm.register(foo2)
nt.assert_true(mm.magics['line']['foo'].__self__ is foo2)
def test_alias_magic():
"""Test %alias_magic."""
ip = get_ipython()
mm = ip.magics_manager
# Basic operation: both cell and line magics are created, if possible.
ip.run_line_magic('alias_magic', 'timeit_alias timeit')
nt.assert_in('timeit_alias', mm.magics['line'])
nt.assert_in('timeit_alias', mm.magics['cell'])
# --cell is specified, line magic not created.
ip.run_line_magic('alias_magic', '--cell timeit_cell_alias timeit')
nt.assert_not_in('timeit_cell_alias', mm.magics['line'])
nt.assert_in('timeit_cell_alias', mm.magics['cell'])
# Test that line alias is created successfully.
ip.run_line_magic('alias_magic', '--line env_alias env')
nt.assert_equal(ip.run_line_magic('env', ''),
ip.run_line_magic('env_alias', ''))
# Test that line alias with parameters passed in is created successfully.
ip.run_line_magic('alias_magic', '--line history_alias history --params ' + shlex.quote('3'))
nt.assert_in('history_alias', mm.magics['line'])
def test_save():
"""Test %save."""
ip = get_ipython()
ip.history_manager.reset() # Clear any existing history.
cmds = [u"a=1", u"def b():\n return a**2", u"print(a, b())"]
for i, cmd in enumerate(cmds, start=1):
ip.history_manager.store_inputs(i, cmd)
with TemporaryDirectory() as tmpdir:
file = os.path.join(tmpdir, "testsave.py")
ip.run_line_magic("save", "%s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 1)
nt.assert_in('coding: utf-8', content)
ip.run_line_magic("save", "-a %s 1-10" % file)
with open(file) as f:
content = f.read()
nt.assert_equal(content.count(cmds[0]), 2)
nt.assert_in('coding: utf-8', content)
def test_store():
"""Test %store."""
ip = get_ipython()
ip.run_line_magic('load_ext', 'storemagic')
# make sure the storage is empty
ip.run_line_magic('store', '-z')
ip.user_ns['var'] = 42
ip.run_line_magic('store', 'var')
ip.user_ns['var'] = 39
ip.run_line_magic('store', '-r')
nt.assert_equal(ip.user_ns['var'], 42)
ip.run_line_magic('store', '-d var')
ip.user_ns['var'] = 39
ip.run_line_magic('store' , '-r')
nt.assert_equal(ip.user_ns['var'], 39)
def _run_edit_test(arg_s, exp_filename=None,
exp_lineno=-1,
exp_contents=None,
exp_is_temp=None):
ip = get_ipython()
M = code.CodeMagics(ip)
last_call = ['','']
opts,args = M.parse_options(arg_s,'prxn:')
filename, lineno, is_temp = M._find_edit_target(ip, args, opts, last_call)
if exp_filename is not None:
nt.assert_equal(exp_filename, filename)
if exp_contents is not None:
with io.open(filename, 'r', encoding='utf-8') as f:
contents = f.read()
nt.assert_equal(exp_contents, contents)
if exp_lineno != -1:
nt.assert_equal(exp_lineno, lineno)
if exp_is_temp is not None:
nt.assert_equal(exp_is_temp, is_temp)
def test_edit_interactive():
"""%edit on interactively defined objects"""
ip = get_ipython()
n = ip.execution_count
ip.run_cell(u"def foo(): return 1", store_history=True)
try:
_run_edit_test("foo")
except code.InteractivelyDefined as e:
nt.assert_equal(e.index, n)
else:
raise AssertionError("Should have raised InteractivelyDefined")
def test_edit_cell():
"""%edit [cell id]"""
ip = get_ipython()
ip.run_cell(u"def foo(): return 1", store_history=True)
# test
_run_edit_test("1", exp_contents=ip.user_ns['In'][1], exp_is_temp=True)
def test_bookmark():
ip = get_ipython()
ip.run_line_magic('bookmark', 'bmname')
with tt.AssertPrints('bmname'):
ip.run_line_magic('bookmark', '-l')
ip.run_line_magic('bookmark', '-d bmname')
def test_ls_magic():
ip = get_ipython()
json_formatter = ip.display_formatter.formatters['application/json']
json_formatter.enabled = True
lsmagic = ip.magic('lsmagic')
with warnings.catch_warnings(record=True) as w:
j = json_formatter(lsmagic)
nt.assert_equal(sorted(j), ['cell', 'line'])
nt.assert_equal(w, []) # no warnings
def test_strip_initial_indent():
def sii(s):
lines = s.splitlines()
return '\n'.join(code.strip_initial_indent(lines))
nt.assert_equal(sii(" a = 1\nb = 2"), "a = 1\nb = 2")
nt.assert_equal(sii(" a\n b\nc"), "a\n b\nc")
nt.assert_equal(sii("a\n b"), "a\n b")
def test_logging_magic_quiet_from_arg():
_ip.config.LoggingMagics.quiet = False
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertNotPrints(re.compile("Activating.*")):
lm.logstart('-q {}'.format(
os.path.join(td, "quiet_from_arg.log")))
finally:
_ip.logger.logstop()
def test_logging_magic_quiet_from_config():
_ip.config.LoggingMagics.quiet = True
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertNotPrints(re.compile("Activating.*")):
lm.logstart(os.path.join(td, "quiet_from_config.log"))
finally:
_ip.logger.logstop()
def test_logging_magic_not_quiet():
_ip.config.LoggingMagics.quiet = False
lm = logging.LoggingMagics(shell=_ip)
with TemporaryDirectory() as td:
try:
with tt.AssertPrints(re.compile("Activating.*")):
lm.logstart(os.path.join(td, "not_quiet.log"))
finally:
_ip.logger.logstop()
def test_time_no_var_expand():
_ip.user_ns['a'] = 5
_ip.user_ns['b'] = []
_ip.magic('time b.append("{a}")')
assert _ip.user_ns['b'] == ['{a}']
# this is slow, put at the end for local testing.
def test_timeit_arguments():
"Test valid timeit arguments, should not cause SyntaxError (GH #1269)"
if sys.version_info < (3,7):
_ip.magic("timeit ('#')")
else:
# 3.7 optimize no-op statement like above out, and complain there is
# nothing in the for loop.
_ip.magic("timeit a=('#')")
| env/lib/python3.6/site-packages/IPython/core/tests/test_magic.py | 34,178 | class with both %foo and %%foo magics
I am cell foo, not line foo
Test %hist -f with temporary filename.
In [9]: import tempfile
In [10]: tfile = tempfile.mktemp('.py','tmp-ipython-')
In [11]: %hist -nl -f $tfile 3
In [13]: import os; os.unlink(tfile)
Test %hist -op
In [1]: class b(float):
...: pass
...:
In [2]: class s(object):
...: def __str__(self):
...: return 's'
...:
In [3]:
In [4]: class r(b):
...: def __repr__(self):
...: return 'r'
...:
In [5]: class sr(s,r): pass
...:
In [6]:
In [7]: bb=b()
In [8]: ss=s()
In [9]: rr=r()
In [10]: ssrr=sr()
In [11]: 4.5
Out[11]: 4.5
In [12]: str(ss)
Out[12]: 's'
In [13]:
In [14]: %hist -op
>>> class b:
... pass
...
>>> class s(b):
... def __str__(self):
... return 's'
...
>>>
>>> class r(b):
... def __repr__(self):
... return 'r'
...
>>> class sr(s,r): pass
>>>
>>> bb=b()
>>> ss=s()
>>> rr=r()
>>> ssrr=sr()
>>> 4.5
4.5
>>> str(ss)
's'
>>>
Test %hist -r
XXX - This test is not recording the output correctly. For some reason, in
testing mode the raw history isn't getting populated. No idea why.
Disabling the output checking for now, though at least we do run it.
In [1]: 'hist' in _ip.lsmagic()
Out[1]: True
In [2]: x=1
In [3]: %hist -rl 2
x=1 # random
%hist -r 2
doctest for %precision
In [1]: f = get_ipython().display_formatter.formatters['text/plain']
In [2]: %precision 5
Out[2]: '%.5f'
In [3]: f.float_format
Out[3]: '%.5f'
In [4]: %precision %e
Out[4]: '%e'
In [5]: f(3.1415927)
Out[5]: '3.141593e+00'
doctest for %who
In [1]: %reset -f
In [2]: alpha = 123
In [3]: beta = 'beta'
In [4]: %who int
alpha
In [5]: %who str
beta
In [6]: %whos
Variable Type Data/Info
----------------------------
alpha int 123
beta str beta
In [7]: %who_ls
Out[7]: ['alpha', 'beta']
I am line foo
Test %alias_magic.
Test OSMagics.cd_force_quiet option
Cell magics declared via a class
Cell magics declared via a class, #2
Cell magic using simple decorator
Cell magic manually registered
test that config magic does not raise
can happen if Configurable init is moved too early into
Magics.__init__ as then a Config object will be registered as a
magic.
test that config magic prints available configs in unique and
sorted order.
test that config with a classname prints the class's options.
Test various directory handling operations.
Toggle doctest_mode twice, it should be a no-op and run without error
%edit [cell id]
%edit on interactively defined objects
Basic %%writefile
%%writefile -a amends files
Basic %%writefile with embedded double quotes
Basic %%writefile with embedded single quotes
%%file with spaces in filename
%%writefile with unicode cell
%%writefile $filename
%%foo and %foo magics are distinguishable to inspect
Test that we can run a multi-line macro successfully.
Test %magic
Magic.parse_options can handle --foo=bar long options
Test that we don't mangle paths when parsing magic options.
Test '%reset array' functionality
Tests for basic options parsing in magics.
Test that prun does not clobber string escapes (GH #1302)
Test %%prun with IPython special syntax
Test '%reset dhist' magic
Test '%reset in' magic
Test that '%reset in' preserves In[] length
Test '%reset out' magic
Test %save.
Test %store.
test %tb after a SyntaxError
Erroneous magic function calls, issue gh-3334
Test valid timeit arguments, should not cause SyntaxError (GH #1269)
test quiet option of timeit magic
test whether timeit -o return object
test shlex issues with timeit (#1109)
Test %%timeit with IPython special syntax
Check that whos is protected against objects where repr() fails.
Test that references from %run are cleared by xdel.
Tests for various magic functions.
Needs to be run by nose (to make ipython session available).
-*- coding: utf-8 -*- magic not found raises UsageError ensure result isn't success when a magic isn't found magic not found raises UsageError ensure result isn't success when a magic isn't found should not raise. clear up everything Practically ALL ipython development systems will have more than 10 aliases we must strip dots from alias names rehashx must fill up syscmdlist argv splitting is os-dependentraise Exception(ip.history_manager.session_number)raise Exception(list(ip.history_manager._get_range_session())) Clear any existing history. List macros test '%reset -f out', make an Out prompt test '%reset -f in' copy before clearingrestore trap and validate stdout trim output, and only check the last line These are only the most minimal of tests, more should be added later. At the very least we check that basic text/unicode calls work OK. curpath = lambda :os.path.splitdrive(os.getcwd())[1].replace('\\','/') Calling xmode three times should be a no-op %run creates some hidden references... ... as does the displayhook. Check that a's __del__ method has been called. line mode test cell mode test line mode test cell mode test Debugging information for failures of this test Clear import caches Clear any existing history. Manually called, we get the result Via run_cell, it goes into the user's namespace via displayhook Check that nothing is registered as 'cellm33' Basic operation: both cell and line magics are created, if possible. --cell is specified, line magic not created. Test that line alias is created successfully. Test that line alias with parameters passed in is created successfully. Clear any existing history. make sure the storage is empty test no warnings this is slow, put at the end for local testing. 3.7 optimize no-op statement like above out, and complain there is nothing in the for loop. | 5,670 | en | 0.706979 |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Dict, Tuple
from ethtx.models.semantics_model import (
ParameterSemantics,
EventSemantics,
FunctionSemantics,
TransformationSemantics,
)
def _decode_parameters_list(raw_parameters_list: list) -> List[ParameterSemantics]:
parameters_list = []
if not raw_parameters_list:
return parameters_list
for raw_parameter_semantics in raw_parameters_list:
if "indexed" in raw_parameter_semantics:
indexed = raw_parameter_semantics["indexed"]
else:
indexed = False
if "dynamic" in raw_parameter_semantics:
dynamic = raw_parameter_semantics["dynamic"]
else:
dynamic = False
if raw_parameter_semantics["type"] == "tuple":
components = _decode_parameters_list(raw_parameter_semantics["components"])
else:
components = []
parameters_list.append(
ParameterSemantics(
raw_parameter_semantics["name"],
raw_parameter_semantics["type"],
components,
indexed,
dynamic,
)
)
return parameters_list
def decode_events_and_functions(
abi: dict,
) -> Tuple[Dict[str, EventSemantics], Dict[str, FunctionSemantics]]:
events = dict()
for signature, raw_event_semantics in abi.get("events", {}).items():
parameters = _decode_parameters_list(raw_event_semantics.get("parameters"))
events[signature] = EventSemantics(
signature,
raw_event_semantics["anonymous"],
raw_event_semantics["name"],
parameters,
)
functions = dict()
for signature, raw_function_semantics in abi.get("functions", {}).items():
if raw_function_semantics:
inputs = _decode_parameters_list(raw_function_semantics.get("inputs"))
outputs = _decode_parameters_list(raw_function_semantics.get("outputs"))
name = raw_function_semantics["name"]
else:
inputs = outputs = []
name = signature
functions[signature] = FunctionSemantics(signature, name, inputs, outputs)
return events, functions
def decode_transformations(
raw_transformations: dict,
) -> Dict[str, Dict[str, TransformationSemantics]]:
transformations = dict()
if raw_transformations:
for signature, transformation in raw_transformations.items():
transformations[signature] = dict()
for parameter_name, parameter_transformation in transformation.get(
"arguments", dict()
).items():
transformations[signature][parameter_name] = TransformationSemantics(
parameter_transformation.get("name"),
parameter_transformation.get("type"),
parameter_transformation.get("value"),
)
return transformations
| ethtx/decoders/decoders/semantics.py | 3,532 | Copyright 2021 DAI Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 556 | en | 0.848164 |
"""
Support for Xiaomi Yeelight Wifi color bulb.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.yeelight/
"""
import logging
import voluptuous as vol
from homeassistant.util.color import (
color_temperature_mired_to_kelvin as mired_to_kelvin,
color_temperature_kelvin_to_mired as kelvin_to_mired)
from homeassistant.const import CONF_DEVICES, CONF_NAME
from homeassistant.components.light import (
ATTR_BRIGHTNESS, ATTR_HS_COLOR, ATTR_TRANSITION, ATTR_COLOR_TEMP,
ATTR_FLASH, FLASH_SHORT, FLASH_LONG, ATTR_EFFECT, SUPPORT_BRIGHTNESS,
SUPPORT_COLOR, SUPPORT_TRANSITION, SUPPORT_COLOR_TEMP, SUPPORT_FLASH,
SUPPORT_EFFECT, Light, PLATFORM_SCHEMA, ATTR_ENTITY_ID, DOMAIN)
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
REQUIREMENTS = ['yeelight==0.4.0']
_LOGGER = logging.getLogger(__name__)
LEGACY_DEVICE_TYPE_MAP = {
'color1': 'rgb',
'mono1': 'white',
'strip1': 'strip',
'bslamp1': 'bedside',
'ceiling1': 'ceiling',
}
DEFAULT_NAME = 'Yeelight'
DEFAULT_TRANSITION = 350
CONF_TRANSITION = 'transition'
CONF_SAVE_ON_CHANGE = 'save_on_change'
CONF_MODE_MUSIC = 'use_music_mode'
DATA_KEY = 'light.yeelight'
DEVICE_SCHEMA = vol.Schema({
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TRANSITION, default=DEFAULT_TRANSITION): cv.positive_int,
vol.Optional(CONF_MODE_MUSIC, default=False): cv.boolean,
vol.Optional(CONF_SAVE_ON_CHANGE, default=True): cv.boolean,
})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA}, })
SUPPORT_YEELIGHT = (SUPPORT_BRIGHTNESS |
SUPPORT_TRANSITION |
SUPPORT_FLASH)
SUPPORT_YEELIGHT_RGB = (SUPPORT_YEELIGHT |
SUPPORT_COLOR |
SUPPORT_EFFECT |
SUPPORT_COLOR_TEMP)
YEELIGHT_MIN_KELVIN = YEELIGHT_MAX_KELVIN = 2700
YEELIGHT_RGB_MIN_KELVIN = 1700
YEELIGHT_RGB_MAX_KELVIN = 6500
EFFECT_DISCO = "Disco"
EFFECT_TEMP = "Slow Temp"
EFFECT_STROBE = "Strobe epilepsy!"
EFFECT_STROBE_COLOR = "Strobe color"
EFFECT_ALARM = "Alarm"
EFFECT_POLICE = "Police"
EFFECT_POLICE2 = "Police2"
EFFECT_CHRISTMAS = "Christmas"
EFFECT_RGB = "RGB"
EFFECT_RANDOM_LOOP = "Random Loop"
EFFECT_FAST_RANDOM_LOOP = "Fast Random Loop"
EFFECT_SLOWDOWN = "Slowdown"
EFFECT_WHATSAPP = "WhatsApp"
EFFECT_FACEBOOK = "Facebook"
EFFECT_TWITTER = "Twitter"
EFFECT_STOP = "Stop"
YEELIGHT_EFFECT_LIST = [
EFFECT_DISCO,
EFFECT_TEMP,
EFFECT_STROBE,
EFFECT_STROBE_COLOR,
EFFECT_ALARM,
EFFECT_POLICE,
EFFECT_POLICE2,
EFFECT_CHRISTMAS,
EFFECT_RGB,
EFFECT_RANDOM_LOOP,
EFFECT_FAST_RANDOM_LOOP,
EFFECT_SLOWDOWN,
EFFECT_WHATSAPP,
EFFECT_FACEBOOK,
EFFECT_TWITTER,
EFFECT_STOP]
SERVICE_SET_MODE = 'yeelight_set_mode'
ATTR_MODE = 'mode'
YEELIGHT_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
def _cmd(func):
"""Define a wrapper to catch exceptions from the bulb."""
def _wrap(self, *args, **kwargs):
import yeelight
try:
_LOGGER.debug("Calling %s with %s %s", func, args, kwargs)
return func(self, *args, **kwargs)
except yeelight.BulbException as ex:
_LOGGER.error("Error when calling %s: %s", func, ex)
return _wrap
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Yeelight bulbs."""
from yeelight.enums import PowerMode
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
lights = []
if discovery_info is not None:
_LOGGER.debug("Adding autodetected %s", discovery_info['hostname'])
device_type = discovery_info['device_type']
device_type = LEGACY_DEVICE_TYPE_MAP.get(device_type, device_type)
# Not using hostname, as it seems to vary.
name = "yeelight_%s_%s" % (device_type,
discovery_info['properties']['mac'])
host = discovery_info['host']
device = {'name': name, 'ipaddr': host}
light = YeelightLight(device, DEVICE_SCHEMA({}))
lights.append(light)
hass.data[DATA_KEY][host] = light
else:
for host, device_config in config[CONF_DEVICES].items():
device = {'name': device_config[CONF_NAME], 'ipaddr': host}
light = YeelightLight(device, device_config)
lights.append(light)
hass.data[DATA_KEY][host] = light
add_devices(lights, True)
def service_handler(service):
"""Dispatch service calls to target entities."""
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
target_devices = [dev for dev in hass.data[DATA_KEY].values()
if dev.entity_id in entity_ids]
else:
target_devices = hass.data[DATA_KEY].values()
for target_device in target_devices:
if service.service == SERVICE_SET_MODE:
target_device.set_mode(**params)
service_schema_set_mode = YEELIGHT_SERVICE_SCHEMA.extend({
vol.Required(ATTR_MODE):
vol.In([mode.name.lower() for mode in PowerMode])
})
hass.services.register(
DOMAIN, SERVICE_SET_MODE, service_handler,
schema=service_schema_set_mode)
class YeelightLight(Light):
"""Representation of a Yeelight light."""
def __init__(self, device, config):
"""Initialize the Yeelight light."""
self.config = config
self._name = device['name']
self._ipaddr = device['ipaddr']
self._supported_features = SUPPORT_YEELIGHT
self._available = False
self._bulb_device = None
self._brightness = None
self._color_temp = None
self._is_on = None
self._hs = None
@property
def available(self) -> bool:
"""Return if bulb is available."""
return self._available
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def effect_list(self):
"""Return the list of supported effects."""
return YEELIGHT_EFFECT_LIST
@property
def color_temp(self) -> int:
"""Return the color temperature."""
return self._color_temp
@property
def name(self) -> str:
"""Return the name of the device if any."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._is_on
@property
def brightness(self) -> int:
"""Return the brightness of this light between 1..255."""
return self._brightness
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
if self.supported_features & SUPPORT_COLOR_TEMP:
return kelvin_to_mired(YEELIGHT_RGB_MAX_KELVIN)
return kelvin_to_mired(YEELIGHT_MAX_KELVIN)
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
if self.supported_features & SUPPORT_COLOR_TEMP:
return kelvin_to_mired(YEELIGHT_RGB_MIN_KELVIN)
return kelvin_to_mired(YEELIGHT_MIN_KELVIN)
def _get_hs_from_properties(self):
rgb = self._properties.get('rgb', None)
color_mode = self._properties.get('color_mode', None)
if not rgb or not color_mode:
return None
color_mode = int(color_mode)
if color_mode == 2: # color temperature
temp_in_k = mired_to_kelvin(self._color_temp)
return color_util.color_temperature_to_hs(temp_in_k)
if color_mode == 3: # hsv
hue = int(self._properties.get('hue'))
sat = int(self._properties.get('sat'))
return (hue / 360 * 65536, sat / 100 * 255)
rgb = int(rgb)
blue = rgb & 0xff
green = (rgb >> 8) & 0xff
red = (rgb >> 16) & 0xff
return color_util.color_RGB_to_hs(red, green, blue)
@property
def hs_color(self) -> tuple:
"""Return the color property."""
return self._hs
@property
def _properties(self) -> dict:
return self._bulb.last_properties
@property
def _bulb(self) -> 'yeelight.Bulb':
import yeelight
if self._bulb_device is None:
try:
self._bulb_device = yeelight.Bulb(self._ipaddr)
self._bulb_device.get_properties() # force init for type
self._available = True
except yeelight.BulbException as ex:
self._available = False
_LOGGER.error("Failed to connect to bulb %s, %s: %s",
self._ipaddr, self._name, ex)
return self._bulb_device
def set_music_mode(self, mode) -> None:
"""Set the music mode on or off."""
if mode:
self._bulb.start_music()
else:
self._bulb.stop_music()
def update(self) -> None:
"""Update properties from the bulb."""
import yeelight
try:
self._bulb.get_properties()
if self._bulb_device.bulb_type == yeelight.BulbType.Color:
self._supported_features = SUPPORT_YEELIGHT_RGB
self._is_on = self._properties.get('power') == 'on'
bright = self._properties.get('bright', None)
if bright:
self._brightness = round(255 * (int(bright) / 100))
temp_in_k = self._properties.get('ct', None)
if temp_in_k:
self._color_temp = kelvin_to_mired(int(temp_in_k))
self._hs = self._get_hs_from_properties()
self._available = True
except yeelight.BulbException as ex:
if self._available: # just inform once
_LOGGER.error("Unable to update bulb status: %s", ex)
self._available = False
@_cmd
def set_brightness(self, brightness, duration) -> None:
"""Set bulb brightness."""
if brightness:
_LOGGER.debug("Setting brightness: %s", brightness)
self._bulb.set_brightness(brightness / 255 * 100,
duration=duration)
@_cmd
def set_rgb(self, rgb, duration) -> None:
"""Set bulb's color."""
if rgb and self.supported_features & SUPPORT_COLOR:
_LOGGER.debug("Setting RGB: %s", rgb)
self._bulb.set_rgb(rgb[0], rgb[1], rgb[2], duration=duration)
@_cmd
def set_colortemp(self, colortemp, duration) -> None:
"""Set bulb's color temperature."""
if colortemp and self.supported_features & SUPPORT_COLOR_TEMP:
temp_in_k = mired_to_kelvin(colortemp)
_LOGGER.debug("Setting color temp: %s K", temp_in_k)
self._bulb.set_color_temp(temp_in_k, duration=duration)
@_cmd
def set_default(self) -> None:
"""Set current options as default."""
self._bulb.set_default()
@_cmd
def set_flash(self, flash) -> None:
"""Activate flash."""
if flash:
from yeelight import (RGBTransition, SleepTransition, Flow,
BulbException)
if self._bulb.last_properties["color_mode"] != 1:
_LOGGER.error("Flash supported currently only in RGB mode.")
return
transition = int(self.config[CONF_TRANSITION])
if flash == FLASH_LONG:
count = 1
duration = transition * 5
if flash == FLASH_SHORT:
count = 1
duration = transition * 2
red, green, blue = color_util.color_hs_to_RGB(*self._hs)
transitions = list()
transitions.append(
RGBTransition(255, 0, 0, brightness=10, duration=duration))
transitions.append(SleepTransition(
duration=transition))
transitions.append(
RGBTransition(red, green, blue, brightness=self.brightness,
duration=duration))
flow = Flow(count=count, transitions=transitions)
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error("Unable to set flash: %s", ex)
@_cmd
def set_effect(self, effect) -> None:
"""Activate effect."""
if effect:
from yeelight import (Flow, BulbException)
from yeelight.transitions import (disco, temp, strobe, pulse,
strobe_color, alarm, police,
police2, christmas, rgb,
randomloop, slowdown)
if effect == EFFECT_STOP:
self._bulb.stop_flow()
return
if effect == EFFECT_DISCO:
flow = Flow(count=0, transitions=disco())
if effect == EFFECT_TEMP:
flow = Flow(count=0, transitions=temp())
if effect == EFFECT_STROBE:
flow = Flow(count=0, transitions=strobe())
if effect == EFFECT_STROBE_COLOR:
flow = Flow(count=0, transitions=strobe_color())
if effect == EFFECT_ALARM:
flow = Flow(count=0, transitions=alarm())
if effect == EFFECT_POLICE:
flow = Flow(count=0, transitions=police())
if effect == EFFECT_POLICE2:
flow = Flow(count=0, transitions=police2())
if effect == EFFECT_CHRISTMAS:
flow = Flow(count=0, transitions=christmas())
if effect == EFFECT_RGB:
flow = Flow(count=0, transitions=rgb())
if effect == EFFECT_RANDOM_LOOP:
flow = Flow(count=0, transitions=randomloop())
if effect == EFFECT_FAST_RANDOM_LOOP:
flow = Flow(count=0, transitions=randomloop(duration=250))
if effect == EFFECT_SLOWDOWN:
flow = Flow(count=0, transitions=slowdown())
if effect == EFFECT_WHATSAPP:
flow = Flow(count=2, transitions=pulse(37, 211, 102))
if effect == EFFECT_FACEBOOK:
flow = Flow(count=2, transitions=pulse(59, 89, 152))
if effect == EFFECT_TWITTER:
flow = Flow(count=2, transitions=pulse(0, 172, 237))
try:
self._bulb.start_flow(flow)
except BulbException as ex:
_LOGGER.error("Unable to set effect: %s", ex)
def turn_on(self, **kwargs) -> None:
"""Turn the bulb on."""
import yeelight
brightness = kwargs.get(ATTR_BRIGHTNESS)
colortemp = kwargs.get(ATTR_COLOR_TEMP)
hs_color = kwargs.get(ATTR_HS_COLOR)
rgb = color_util.color_hs_to_RGB(*hs_color) if hs_color else None
flash = kwargs.get(ATTR_FLASH)
effect = kwargs.get(ATTR_EFFECT)
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
try:
self._bulb.turn_on(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error("Unable to turn the bulb on: %s", ex)
return
if self.config[CONF_MODE_MUSIC] and not self._bulb.music_mode:
try:
self.set_music_mode(self.config[CONF_MODE_MUSIC])
except yeelight.BulbException as ex:
_LOGGER.error("Unable to turn on music mode,"
"consider disabling it: %s", ex)
try:
# values checked for none in methods
self.set_rgb(rgb, duration)
self.set_colortemp(colortemp, duration)
self.set_brightness(brightness, duration)
self.set_flash(flash)
self.set_effect(effect)
except yeelight.BulbException as ex:
_LOGGER.error("Unable to set bulb properties: %s", ex)
return
# save the current state if we had a manual change.
if self.config[CONF_SAVE_ON_CHANGE] and (brightness
or colortemp
or rgb):
try:
self.set_default()
except yeelight.BulbException as ex:
_LOGGER.error("Unable to set the defaults: %s", ex)
return
def turn_off(self, **kwargs) -> None:
"""Turn off."""
import yeelight
duration = int(self.config[CONF_TRANSITION]) # in ms
if ATTR_TRANSITION in kwargs: # passed kwarg overrides config
duration = int(kwargs.get(ATTR_TRANSITION) * 1000) # kwarg in s
try:
self._bulb.turn_off(duration=duration)
except yeelight.BulbException as ex:
_LOGGER.error("Unable to turn the bulb off: %s", ex)
def set_mode(self, mode: str):
"""Set a power mode."""
import yeelight
try:
self._bulb.set_power_mode(yeelight.enums.PowerMode[mode.upper()])
except yeelight.BulbException as ex:
_LOGGER.error("Unable to set the power mode: %s", ex)
| homeassistant/components/light/yeelight.py | 17,617 | Representation of a Yeelight light.
Initialize the Yeelight light.
Define a wrapper to catch exceptions from the bulb.
Return if bulb is available.
Return the brightness of this light between 1..255.
Return the color temperature.
Return the list of supported effects.
Return the color property.
Return true if device is on.
Return maximum supported color temperature.
Return minimum supported color temperature.
Return the name of the device if any.
Dispatch service calls to target entities.
Set bulb brightness.
Set bulb's color temperature.
Set current options as default.
Activate effect.
Activate flash.
Set a power mode.
Set the music mode on or off.
Set bulb's color.
Set up the Yeelight bulbs.
Flag supported features.
Turn off.
Turn the bulb on.
Update properties from the bulb.
Support for Xiaomi Yeelight Wifi color bulb.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.yeelight/
Not using hostname, as it seems to vary. color temperature hsv force init for type just inform once in ms passed kwarg overrides config kwarg in s values checked for none in methods save the current state if we had a manual change. in ms passed kwarg overrides config kwarg in s | 1,242 | en | 0.784439 |
import numpy as np
import sys
class RBF():
def __init__(self, Input, Output, Ptypes, Nclasses):
self.input = Input
self.hidden = Ptypes * Nclasses
self.output = Output
self.ptypes = Ptypes
self.nclasses = Nclasses
self.protos = 0
self.weights = 0
self.spread = 0
def createPrototypes(self, data):
groups = np.random.randint(0, data.shape[0], size = (self.hidden))
prototypes = np.zeros((self.hidden, data.shape[1]))
i = 0
for element in groups:
prototypes[i] = data[element, :]
i += 1
self.protos = prototypes
def sigma(self):
temp = 0
for i in range(self.hidden):
for j in range(self.hidden):
distance = np.square(np.linalg.norm(self.protos[i] - self.protos[j]))
if distance > temp:
temp = distance
self.spread = temp/np.sqrt(self.hidden)
def train(self, data, classes):
self.createPrototypes(data)
self.sigma()
hidden_out = np.zeros(shape=(0,self.hidden))
for data in data:
output=[]
for proto in self.protos:
distance = np.square(np.linalg.norm(data - proto))
neuron_output = np.exp(-(distance)/(np.square(self.spread)))
output.append(neuron_output)
hidden_out = np.vstack([hidden_out,np.array(output)])
self.weights = np.dot(np.linalg.pinv(hidden_out), classes)
def test(self, data, classes):
right = 0
for i in range(len(data)):
d = data[i]
output = []
for proto in self.protos:
distance = np.square(np.linalg.norm(d-proto))
neuron_output = np.exp(-(distance)/np.square(self.spread))
output.append(neuron_output)
network_output = np.dot(np.array(output),self.weights)
print ("Expected: ", classes[i].argmax(axis=0) +1)
print ("Result: ", network_output.argmax(axis=0) + 1)
print ()
if network_output.argmax(axis=0) + 1 == classes[i].argmax(axis=0) +1:
right += 1
print ("Accuracy(%): ", (right * 100) / len(data))
def read_iris(percentage):
dataset = np.loadtxt('iris.data', delimiter=',', skiprows=0)
np.random.shuffle(dataset)
q = int(dataset.shape[0] * percentage) + 2
X_training = dataset[0:q, 0:4]
Y_training = dataset[0:q, 4]
X_test = dataset[q:150, 0:4]
Y_test = dataset[q:150, 4]
return X_training, Y_training, X_test, Y_test
def process_iris_data(data):
p_data = np.zeros((data.shape[0], data.shape[1]))
max_col1 = np.amax(data[:,0])
max_col2 = np.amax(data[:,1])
max_col3 = np.amax(data[:,2])
max_col4 = np.amax(data[:,3])
for n in range(len(data)):
p_data[n, 0] = data[n,0] / max_col1
p_data[n, 1] = data[n,1] / max_col2
p_data[n, 2] = data[n,2] / max_col3
p_data[n, 3] = data[n,3] / max_col4
return p_data
def process_iris_labels(labels, operation):
if operation == 0:
p_labels = np.zeros((labels.shape[0], 3))
for n in range(len(labels)):
p_labels[n, int(labels[n])] = 1
return p_labels
else:
p_labels = np.argmax(labels, axis=1)
return p_labels
if __name__ == '__main__':
# input params
# percentage
parameters = (sys.argv)
print(parameters)
x1, y1, x2, y2 = read_iris(float(parameters[1]))
xp = process_iris_data(x1)
yp = process_iris_labels(y1,0)
nn = RBF(xp.shape[1], y1.shape[0], xp.shape[1], 3)
nn.train(xp, yp)
xp = process_iris_data(x2)
yp = process_iris_labels(y2,0)
nn.test(xp, yp) | Assignment 3/rbf.py | 4,072 | input params percentage | 23 | en | 0.344562 |
#-------------------------------------------------------------------------------
# Name:GUI Calculator
# Purpose:Simple calculator with GUI using tkinter
#
# Author: Daniel Campos
#
# Created: Monday Dec 1st, 2014
#-------------------------------------------------------------------------------
from tkinter import *
import math
class Calculator:
'''GUI for the calculator'''
def __init__(self, master):
self.master = master
self.stringContents = ''
self.displayStr = StringVar(self.stringContents)
self.display = Label(master, textvariable=self.displayStr, width=25, anchor=E, relief=SUNKEN).grid(row=0, columnspan=4)
self.seven = Button(master, width=3, text='7', command=lambda: self.addSymbol('7')).grid(row=1, column=0)
self.eight = Button(master, width=3, text='8', command=lambda: self.addSymbol('8')).grid(row=1, column=1)
self.nine = Button(master, width=3, text='9', command=lambda: self.addSymbol('9')).grid(row=1, column=2)
self.div = Button(master, width=3, text='/', command=lambda: self.addSymbol('/')).grid(row=1, column=3)
self.master.bind('7', self.addKeyboardSymbol)
self.master.bind('8', self.addKeyboardSymbol)
self.master.bind('9', self.addKeyboardSymbol)
self.master.bind('/', self.addKeyboardSymbol)
self.four = Button(master, width=3, text='4', command=lambda: self.addSymbol('4')).grid(row=3, column=0)
self.five = Button(master, width=3, text='5', command=lambda: self.addSymbol('5')).grid(row=3, column=1)
self.six = Button(master, width=3, text='6', command=lambda: self.addSymbol('6')).grid(row=3, column=2)
self.times = Button(master, width=3, text='*', command=lambda: self.addSymbol('*')).grid(row=3, column=3)
self.master.bind('4', self.addKeyboardSymbol)
self.master.bind('5', self.addKeyboardSymbol)
self.master.bind('6', self.addKeyboardSymbol)
self.master.bind('*', self.addKeyboardSymbol)
self.one = Button(master, width=3, text='1', command=lambda: self.addSymbol('1')).grid(row=4, column=0)
self.two = Button(master, width=3, text='2', command=lambda: self.addSymbol('2')).grid(row=4, column=1)
self.three = Button(master, width=3, text='3', command=lambda: self.addSymbol('3')).grid(row=4, column=2)
self.minus = Button(master, width=3, text='-', command=lambda: self.addSymbol('-')).grid(row=4, column=3)
self.master.bind('1', self.addKeyboardSymbol)
self.master.bind('2', self.addKeyboardSymbol)
self.master.bind('3', self.addKeyboardSymbol)
self.master.bind('-', self.addKeyboardSymbol)
self.zero = Button(master, width=3, text='0', command=lambda: self.addSymbol('0')).grid(row=5, column=0)
self.point = Button(master, width=3, text='.', command=lambda: self.addSymbol('.')).grid(row=5, column=1)
self.equals = Button(master, width=3, text='=', command=lambda: self.evaluate()).grid(row=5, column=2)
self.plus = Button(master, width=3, text='+', command=lambda: self.addSymbol('+')).grid(row=5, column=3)
self.master.bind('0', self.addKeyboardSymbol)
self.master.bind('.', self.addKeyboardSymbol)
self.master.bind('<Return>', self.evaluate)
self.master.bind('+', self.addKeyboardSymbol)
self.c = Button(master, width=3, text='C', command=lambda: self.clear()).grid(row=6, column=0)
self.d = Button(master, width=3, text='D', command=lambda: self.backSpace()).grid(row=6, column=1)
self.lparren = Button(master, width=3, text='(', command=lambda: self.addSymbol('(')).grid(row=6, column=2)
self.rparren = Button(master, width=3, text=')', command=lambda: self.addSymbol(')')).grid(row=6, column=3)
self.master.bind('C', self.clear)
self.master.bind('c', self.clear)
self.master.bind('<BackSpace>', self.backSpace)
self.master.bind('(', self.addKeyboardSymbol)
self.master.bind(')', self.addKeyboardSymbol)
def addSymbol(self, char):
'''Displays the inputted char onto the display'''
self.stringContents += char
self.displayStr.set(self.stringContents)
def addKeyboardSymbol(self,event):
'''Displays the inputted char onto the display'''
self.stringContents += str(repr(event.char))[1:-1]
self.displayStr.set(self.stringContents)
def evaluate(self, evt=None):
'''Evalutes the expression'''
try:
self.displayStr.set(eval(self.stringContents))
self.stringContents = str(eval(self.stringContents))
except Exception as e:
self.displayStr.set('Error')
self.stringContents = ''
def clear(self, evt=None):
'''Clears the expression'''
self.stringContents = ''
self.displayStr.set(self.stringContents)
def backSpace(self, evt=None):
'''Backspace on expression'''
self.stringContents = self.stringContents[:-1]
self.displayStr.set(self.stringContents)
def Main():
master = Tk()
calculator = Calculator(master)
calculator.master.title('Calculator')
calculator.master.resizable(False, False)
master.mainloop()
if __name__ == '__main__':
Main() | ProgrammingInPython/proj08_daniel_campos.py | 5,273 | GUI for the calculator
Displays the inputted char onto the display
Displays the inputted char onto the display
Backspace on expression
Clears the expression
Evalutes the expression
------------------------------------------------------------------------------- Name:GUI Calculator Purpose:Simple calculator with GUI using tkinter Author: Daniel Campos Created: Monday Dec 1st, 2014------------------------------------------------------------------------------- | 461 | en | 0.41298 |
"""
URLConf for Satchmo Contacts.
"""
from django.conf.urls.defaults import patterns
from signals_ahoy.signals import collect_urls
from satchmo_store import contact
from satchmo_store.shop.satchmo_settings import get_satchmo_setting
ssl = get_satchmo_setting('SSL', default_value=False)
urlpatterns = patterns('satchmo_store.contact.views',
(r'^$', 'view', {}, 'satchmo_account_info'),
(r'^update/$', 'update', {}, 'satchmo_profile_update'),
(r'^ajax_state/$', 'ajax_get_state', {'SSL': ssl}, 'satchmo_contact_ajax_state'),
)
collect_urls.send(sender=contact, patterns=urlpatterns)
| satchmo/apps/satchmo_store/contact/urls.py | 598 | URLConf for Satchmo Contacts. | 29 | en | 0.797734 |
import sys
import requests
import argparse
import json
import os
import configparser
import arrow
from colorama import init
import traceback
def get_color(color_code):
return '\x1b[%sm' % color_code
def parse_brief(brief):
sentences = None
if args.news:
sentences = json.loads(
requests.get(
"https://corpus.vocabulary.com/api/1.0/examples.json?maxResults=5&query=" + args.word).text)[
'result']['sentences']
word = WORD_COLOR + brief['wordOut'] + ": "
if 'relation' in brief['lemma']:
word += TEXT_COLOR + (
"%s为%s的%s" % (
brief['wordOut'], brief['lemma']['lemma'],
brief['lemma']['relation']))
print(word)
pron = ""
if 'usPron' in brief:
pron += HINT_COLOR + " 美音 " + TEXT_COLOR + "/%s/" % brief['usPron'][
'ps']
if 'ukPron' in brief:
pron += HINT_COLOR + " 英音 " + TEXT_COLOR + "/%s/" % brief['ukPron'][
'ps']
if pron:
print(pron)
if 'chnDefinitions' in brief:
print(SECTION_COLOR + "中文释义")
for chn_def in brief['chnDefinitions']:
if 'pos' in chn_def:
print(
" " + HINT_COLOR + chn_def['pos'].ljust(8) + TEXT_COLOR +
chn_def[
'meaning'])
else:
print(" " + "".ljust(8) + TEXT_COLOR + chn_def['meaning'])
if 'engDefinitions' in brief:
print(SECTION_COLOR + "英文释义")
for eng_def in brief['engDefinitions']:
if 'pos' in eng_def:
print(
" " + HINT_COLOR + eng_def['pos'].ljust(8) + TEXT_COLOR +
eng_def[
'meaning'])
else:
print(" " + "".ljust(8) + TEXT_COLOR + eng_def['meaning'])
if sentences:
print(SECTION_COLOR + "新闻例句")
for i, sentence in enumerate(sentences):
print(TEXT_COLOR,
"".ljust(4) + (str(i + 1) + ".").ljust(3) + sentence[
'sentence'])
print(SOURCE_COLOR,
"".ljust(7) + sentence['volume']['corpus']['name'] + "".ljust(
4) +
arrow.get(sentence['volume']['dateAdded']).format(
"MMM DD, YYYY"))
def parse_source(sentence_group):
if 'source' not in sentence_group:
return "牛津高阶英汉双解词典"
else:
return sourceDict[sentence_group['source']]
def parse_detail(detail):
parse_brief(detail['wordBrief'])
if 'sentenceLists' in detail:
print(SECTION_COLOR + "双语例句")
for sentenceGroup in detail['sentenceLists']:
count = 1
print("".ljust(4) + HINT_COLOR + parse_source(sentenceGroup))
for sentence in sentenceGroup['sentences']:
print(TEXT_COLOR + "".ljust(8) + ("%s." % str(count)).ljust(3) +
sentence['eng'])
print("".ljust(8) + "".ljust(3) + sentence['chn'])
if count >= default_sent:
break
count += 1
init()
sourceDict = {"CAMBRIDGE": "剑桥高阶英汉双解词典", "LONGMAN": "朗文当代高级英语词典",
"COLLINS": "柯林斯英汉双解大词典", "ONLINE": "金山词霸"}
parser = argparse.ArgumentParser(description='manual to this script')
parser.add_argument('word', type=str, help="The word you want to query")
parser.add_argument('--detail', '-d', action='store', default=0, const=2,
nargs='?', type=int, dest='detail',
help="Show the detailed meaning of the word")
parser.add_argument('--brief', '-b', action='store_true', default=True,
help="Show the brief meaning of the word", )
parser.add_argument('--news', '-n', action='store_true', default=False,
help="Whether show sentence examples from news")
args = parser.parse_args()
if getattr(sys, 'frozen', False):
# we are running in a bundle
bundle_dir = os.path.split(sys.executable)[0]
else:
# we are running in a normal Python environment
bundle_dir = os.path.dirname(os.path.abspath(__file__))
config_path = os.path.join(bundle_dir, "color.ini")
config = configparser.ConfigParser()
config.read(config_path)
WORD_COLOR = get_color(
config.getint('COLOR', 'word_color') if config.getint('COLOR',
'word_color') else 91)
HINT_COLOR = get_color(
config.getint('COLOR', 'hint_color') if config.getint('COLOR',
'hint_color') else 92)
SECTION_COLOR = get_color(
config.getint('COLOR', 'section_color') if config.getint('COLOR',
'section_color') else 93)
TEXT_COLOR = get_color(
config.getint('COLOR', 'text_color') if config.getint('COLOR',
'text_color') else 97)
SOURCE_COLOR = get_color(
config.getint('COLOR', 'source_color') if config.getint('COLOR',
'source_color') else 90)
ENDPOINT = config.get("CONFIG", "endpoint")
detail = json.loads(
requests.get(ENDPOINT + "/word/detail?json=true&word=" + args.word).text)
default_sent = args.detail
try:
if args.detail:
parse_detail(detail)
else:
parse_brief(detail['wordBrief'])
except Exception as e:
traceback.print_exc()
print("该单词不存在")
| win_python/idict.py | 5,642 | we are running in a bundle we are running in a normal Python environment | 72 | en | 0.953497 |
# Python
import unittest
from copy import deepcopy
from unittest.mock import Mock
# ATS
from ats.topology import Device
# Genie
from genie.libs.ops.igmp.iosxe.igmp import Igmp
from genie.libs.ops.igmp.iosxe.tests.igmp_output import IgmpOutput
# Parser
from genie.libs.parser.iosxe.show_igmp import ShowIpIgmpInterface, \
ShowIpIgmpGroupsDetail, \
ShowIpIgmpSsmMapping
# iosxe show_vrf
from genie.libs.parser.iosxe.show_vrf import ShowVrfDetail
outputs = {}
outputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default
outputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1
outputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default
outputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1
outputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1
outputs['show ip igmp ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_default_2
outputs['show ip igmp ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_default_3
outputs['show ip igmp ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_default_4
outputs['show ip igmp ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_default_5
outputs['show ip igmp ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_default_6
outputs['show ip igmp ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_default_7
outputs['show ip igmp ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_default_8
outputs['show ip igmp ssm-mapping 239.9.9.9'] = IgmpOutput.ShowIpIgmpSsmMapping_default_9
outputs['show ip igmp ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_default_10
outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1
outputs['show ip igmp vrf VRF1 ssm-mapping 239.2.2.2'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_2
outputs['show ip igmp vrf VRF1 ssm-mapping 239.3.3.3'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_3
outputs['show ip igmp vrf VRF1 ssm-mapping 239.4.4.4'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_4
outputs['show ip igmp vrf VRF1 ssm-mapping 239.5.5.5'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_5
outputs['show ip igmp vrf VRF1 ssm-mapping 239.6.6.6'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_6
outputs['show ip igmp vrf VRF1 ssm-mapping 239.7.7.7'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_7
outputs['show ip igmp vrf VRF1 ssm-mapping 239.8.8.8'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_8
outputs['show ip igmp vrf VRF1 ssm-mapping 224.0.1.40'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_10
def mapper(key):
return outputs[key]
class test_igmp(unittest.TestCase):
def setUp(self):
self.device = Device(name='aDevice')
self.device.os = 'iosxe'
self.device.mapping={}
self.device.mapping['cli']='cli'
# Give the device as a connection type
# This is done in order to call the parser on the output provided
self.device.connectionmgr.connections['cli'] = self.device
def test_complete_output(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': IgmpOutput.ShowVrfDetail}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# Verify Ops was created successfully
self.assertEqual(igmp.info, IgmpOutput.Igmp_info)
def test_empty_output(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': {}}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
outputs['show ip igmp interface'] = ''
outputs['show ip igmp vrf VRF1 interface'] = ''
outputs['show ip igmp groups detail'] = ''
outputs['show ip igmp vrf VRF1 groups detail'] = ''
outputs['show ip igmp ssm-mapping 239.1.1.1'] = ''
outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = ''
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# revert the outputs
outputs['show ip igmp interface'] = IgmpOutput.ShowIpIgmpInterface_default
outputs['show ip igmp vrf VRF1 interface'] = IgmpOutput.ShowIpIgmpInterface_VRF1
outputs['show ip igmp groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_default
outputs['show ip igmp vrf VRF1 groups detail'] = IgmpOutput.ShowIpIgmpGroupsDetail_VRF1
outputs['show ip igmp ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_default_1
outputs['show ip igmp vrf VRF1 ssm-mapping 239.1.1.1'] = IgmpOutput.ShowIpIgmpSsmMapping_VRF1_1
# Check no attribute not found
with self.assertRaises(AttributeError):
igmp.info['vrfs']
def test_selective_attribute(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': IgmpOutput.ShowVrfDetail}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# Check specific attribute values
# info - default vrf
self.assertEqual(igmp.info['vrfs']['default']['max_groups'], 20)
# info - vrf VRF1
self.assertEqual(igmp.info['vrfs']['VRF1']['interfaces']\
['GigabitEthernet2']['querier'], '20.1.2.1')
def test_incomplete_output(self):
self.maxDiff = None
igmp = Igmp(device=self.device)
# Get outputs
igmp.maker.outputs[ShowVrfDetail] = \
{'': IgmpOutput.ShowVrfDetail}
# Return outputs above as inputs to parser when called
self.device.execute = Mock()
# overwrite output with empty output
outputs['show ip igmp vrf VRF1 groups detail'] = '''\
show ip igmp vrf VRF1 groups detail
'''
self.device.execute.side_effect = mapper
# Learn the feature
igmp.learn()
# Delete missing specific attribute values
expect_dict = deepcopy(IgmpOutput.Igmp_info)
del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['join_group'])
del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['static_group'])
del(expect_dict['vrfs']['VRF1']['interfaces']['GigabitEthernet2']['group'])
del(expect_dict['vrfs']['VRF1']['ssm_map'])
# Verify Ops was created successfully
self.assertEqual(igmp.info, expect_dict)
if __name__ == '__main__':
unittest.main()
| pkgs/ops-pkg/src/genie/libs/ops/igmp/iosxe/tests/test_igmp.py | 6,950 | Python ATS Genie Parser iosxe show_vrf Give the device as a connection type This is done in order to call the parser on the output provided Get outputs Return outputs above as inputs to parser when called Learn the feature Verify Ops was created successfully Get outputs Return outputs above as inputs to parser when called Learn the feature revert the outputs Check no attribute not found Get outputs Return outputs above as inputs to parser when called Learn the feature Check specific attribute values info - default vrf info - vrf VRF1 Get outputs Return outputs above as inputs to parser when called overwrite output with empty output Learn the feature Delete missing specific attribute values Verify Ops was created successfully | 734 | en | 0.841744 |
#!/usr/bin/python
#
# linearize-hashes.py: List blocks in a linear, no-fork version of the chain.
#
# Copyright (c) 2013 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import json
import struct
import re
import base64
import httplib
import sys
settings = {}
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblock(self, hash, verbose=True):
return self.rpc('getblock', [hash, verbose])
def getblockhash(self, index):
return self.rpc('getblockhash', [index])
def get_block_hashes(settings):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpassword'])
for height in xrange(settings['min_height'], settings['max_height']+1):
hash = rpc.getblockhash(height)
print(hash)
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: linearize-hashes.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 4242
if 'min_height' not in settings:
settings['min_height'] = 0
if 'max_height' not in settings:
settings['max_height'] = 319000
if 'rpcuser' not in settings or 'rpcpassword' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['min_height'] = int(settings['min_height'])
settings['max_height'] = int(settings['max_height'])
get_block_hashes(settings)
| contrib/linearize/linearize-hashes.py | 2,761 | !/usr/bin/python linearize-hashes.py: List blocks in a linear, no-fork version of the chain. Copyright (c) 2013 The Bitcoin developers Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. skip comment lines parse key=value lines | 313 | en | 0.673579 |
#!/usr/bin/env python
"""
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
"""
import functools
import numpy as np
import scipy
import nestcheck.ns_run_utils
# Estimators
# ----------
def count_samples(ns_run, **kwargs):
r"""Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
"""
kwargs.pop('logw', None)
kwargs.pop('simulate', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
return ns_run['logl'].shape[0]
def logz(ns_run, logw=None, simulate=False):
r"""Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return scipy.special.logsumexp(logw)
def evidence(ns_run, logw=None, simulate=False):
r"""Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
return np.exp(scipy.special.logsumexp(logw))
def param_mean(ns_run, logw=None, simulate=False, param_ind=0,
handle_indexerror=False):
"""Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
try:
return (np.sum(w_relative * ns_run['theta'][:, param_ind])
/ np.sum(w_relative))
except IndexError:
if handle_indexerror:
return np.nan
else:
raise
def param_cred(ns_run, logw=None, simulate=False, probability=0.5,
param_ind=0):
"""One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
return weighted_quantile(probability, ns_run['theta'][:, param_ind],
w_relative)
def param_squared_mean(ns_run, logw=None, simulate=False, param_ind=0):
"""Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
w_relative /= np.sum(w_relative)
return np.sum(w_relative * (ns_run['theta'][:, param_ind] ** 2))
def r_mean(ns_run, logw=None, simulate=False):
"""Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max())
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return np.sum(w_relative * r) / np.sum(w_relative)
def r_cred(ns_run, logw=None, simulate=False, probability=0.5):
"""One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
"""
if logw is None:
logw = nestcheck.ns_run_utils.get_logw(ns_run, simulate=simulate)
w_relative = np.exp(logw - logw.max()) # protect against overflow
r = np.sqrt(np.sum(ns_run['theta'] ** 2, axis=1))
return weighted_quantile(probability, r, w_relative)
# Helper functions
# ----------------
def get_latex_name(func_in, **kwargs):
"""
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
"""
if isinstance(func_in, functools.partial):
func = func_in.func
assert not set(func_in.keywords) & set(kwargs), (
'kwargs={0} and func_in.keywords={1} contain repeated keys'
.format(kwargs, func_in.keywords))
kwargs.update(func_in.keywords)
else:
func = func_in
param_ind = kwargs.pop('param_ind', 0)
probability = kwargs.pop('probability', 0.5)
kwargs.pop('handle_indexerror', None)
if kwargs:
raise TypeError('Unexpected **kwargs: {0}'.format(kwargs))
ind_str = r'{\hat{' + str(param_ind + 1) + '}}'
latex_name_dict = {
'count_samples': r'samples',
'logz': r'$\mathrm{log} \mathcal{Z}$',
'evidence': r'$\mathcal{Z}$',
'r_mean': r'$\overline{|\theta|}$',
'param_mean': r'$\overline{\theta_' + ind_str + '}$',
'param_squared_mean': r'$\overline{\theta^2_' + ind_str + '}$'}
# Add credible interval names
if probability == 0.5:
cred_str = r'$\mathrm{median}('
else:
# format percent without trailing zeros
percent_str = ('%f' % (probability * 100)).rstrip('0').rstrip('.')
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict['param_cred'] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict['r_cred'] = cred_str + r'|\theta|)$'
try:
return latex_name_dict[func.__name__]
except KeyError as err:
err.args = err.args + ('get_latex_name not yet set up for ' +
func.__name__,)
raise
def weighted_quantile(probability, values, weights):
"""
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
"""
assert 1 > probability > 0, (
'credible interval prob= ' + str(probability) + ' not in (0, 1)')
assert values.shape == weights.shape
assert values.ndim == 1
assert weights.ndim == 1
sorted_inds = np.argsort(values)
quantiles = np.cumsum(weights[sorted_inds]) - (0.5 * weights[sorted_inds])
quantiles /= np.sum(weights)
return np.interp(probability, quantiles, values[sorted_inds])
| nestcheck/estimators.py | 11,030 | Number of samples in run.
Unlike most estimators this does not require log weights, but for
convenience will not throw an error if they are specified.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
Returns
-------
int
Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
Produce a latex formatted name for each function for use in labelling
results.
Parameters
----------
func_in: function
kwargs: dict, optional
Kwargs for function.
Returns
-------
latex_name: str
Latex formatted name for the function.
Natural log of Bayesian evidence :math:`\log \mathcal{Z}`.
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
One-tailed credible interval on the value of a single parameter
(component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
param_ind: int, optional
Index of parameter for which the credible interval should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
Mean of a single parameter (single component of theta).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the mean should be calculated. This
corresponds to the column of ns_run['theta'] which contains the
parameter.
handle_indexerror: bool, optional
Make the function function return nan rather than raising an
IndexError if param_ind >= ndim. This is useful when applying
the same list of estimators to data sets of different dimensions.
Returns
-------
float
Mean of the square of single parameter (second moment of its
posterior distribution).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
param_ind: int, optional
Index of parameter for which the second moment should be
calculated. This corresponds to the column of ns_run['theta']
which contains the parameter.
Returns
-------
float
One-tailed credible interval on the value of the radial coordinate
(magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
probability: float, optional
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile. Passed to weighted_quantile.
Returns
-------
float
Mean of the radial coordinate (magnitude of theta vector).
Parameters
----------
ns_run: dict
Nested sampling run dict (see the data_processing module
docstring for more details).
logw: None or 1d numpy array, optional
Log weights of samples.
simulate: bool, optional
Passed to ns_run_utils.get_logw if logw needs to be
calculated.
Returns
-------
float
Get quantile estimate for input probability given weighted samples using
linear interpolation.
Parameters
----------
probability: float
Quantile to estimate - must be in open interval (0, 1).
For example, use 0.5 for the median and 0.84 for the upper
84% quantile.
values: 1d numpy array
Sample values.
weights: 1d numpy array
Corresponding sample weights (same shape as values).
Returns
-------
quantile: float
Functions for estimating quantities from nested sampling runs.
Each estimator function should have arguments:
.. code-block:: python
def estimator_func(self, ns_run, logw=None, simulate=False):
...
Any additional arguments required for the function should be keyword
arguments.
The ``logw`` argument allows the log weights for the points in the run to be
provided - this is useful if many estimators are being calculated from
the same run as it allows ``logw`` to only be calculated once. If it is not
specified, ``logw`` is calculated from the run when required.
The simulate argument is passed to ``ns_run_utils.get_logw``, and is only used
if the function needs to calculate ``logw``.
!/usr/bin/env python Estimators ---------- protect against overflow protect against overflow protect against overflow Helper functions ---------------- Add credible interval names format percent without trailing zeros | 5,795 | en | 0.554408 |
"""Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
"""
import itertools
import os
import re
import sqlite3
from contextlib import contextmanager
from json import loads as _loads
from urllib.parse import quote as urllib_parse_quote
from ._exceptions import ToronError
sqlite3.register_converter('TEXT_JSON', _loads)
sqlite3.register_converter('TEXT_ATTRIBUTES', _loads)
def _is_sqlite_json1_enabled():
"""Check if SQLite implementation includes JSON1 extension."""
# The inclusion of JSON functions is optional when compiling SQLite.
# In versions 3.38.0 and newer, JSON functions are included by
# default but can be disabled (opt-out policy). For older versions
# of SQLite, JSON functions are available on an opt-in basis. It is
# necessary to test for their presence rathern than referencing the
# SQLite version number.
#
# For more information, see:
# https://www.sqlite.org/json1.html#compiling_in_json_support
con = sqlite3.connect(':memory:')
try:
con.execute("SELECT json_valid('123')")
except sqlite3.OperationalError:
return False
finally:
con.close()
return True
SQLITE_JSON1_ENABLED = _is_sqlite_json1_enabled()
_schema_script = """
PRAGMA foreign_keys = ON;
CREATE TABLE edge(
edge_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
user_properties TEXT_USERPROPERTIES,
other_uuid TEXT CHECK (other_uuid LIKE '________-____-____-____-____________') NOT NULL,
other_filename_hint TEXT NOT NULL,
other_element_hash TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name, other_uuid)
);
CREATE TABLE relation(
relation_id INTEGER PRIMARY KEY,
edge_id INTEGER,
other_element_id INTEGER NOT NULL,
element_id INTEGER,
proportion REAL CHECK (0.0 < proportion AND proportion <= 1.0) NOT NULL,
mapping_level INTEGER NOT NULL,
FOREIGN KEY(edge_id) REFERENCES edge(edge_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (edge_id, other_element_id, element_id)
);
CREATE TABLE element(
element_id INTEGER PRIMARY KEY AUTOINCREMENT /* <- Must not reuse id values. */
/* label columns added programmatically */
);
CREATE TABLE location(
_location_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE structure(
_structure_id INTEGER PRIMARY KEY
/* label columns added programmatically */
);
CREATE TABLE quantity(
quantity_id INTEGER PRIMARY KEY,
_location_id INTEGER,
attributes TEXT_ATTRIBUTES NOT NULL,
value NUMERIC NOT NULL,
FOREIGN KEY(_location_id) REFERENCES location(_location_id)
);
CREATE TABLE weight(
weight_id INTEGER PRIMARY KEY,
name TEXT NOT NULL,
type_info TEXT_ATTRIBUTES NOT NULL,
description TEXT,
is_complete INTEGER CHECK (is_complete IN (0, 1)),
UNIQUE (name)
);
CREATE TABLE element_weight(
element_weight_id INTEGER PRIMARY KEY,
weight_id INTEGER,
element_id INTEGER,
value REAL NOT NULL,
FOREIGN KEY(weight_id) REFERENCES weight(weight_id) ON DELETE CASCADE,
FOREIGN KEY(element_id) REFERENCES element(element_id) DEFERRABLE INITIALLY DEFERRED,
UNIQUE (element_id, weight_id)
);
CREATE TABLE property(
key TEXT PRIMARY KEY NOT NULL,
value TEXT_JSON
);
INSERT INTO property VALUES ('schema_version', '1');
"""
def _is_wellformed_json(x):
"""Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
"""
try:
_loads(x)
except (ValueError, TypeError):
return 0
return 1
def _make_trigger_for_json(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND json_valid(NEW.{column}) = 0
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_json(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON');
END;
'''
def _is_wellformed_user_properties(x):
"""Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if isinstance(obj, dict):
return 1
return 0
def _make_trigger_for_user_properties(insert_or_update, table, column):
"""Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
"""
if SQLITE_JSON1_ENABLED:
user_properties_check = f"(json_valid(NEW.{column}) = 0 OR json_type(NEW.{column}) != 'object')"
else:
user_properties_check = f'is_wellformed_user_properties(NEW.{column}) = 0'
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN
NEW.{column} IS NOT NULL
AND {user_properties_check}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be wellformed JSON object');
END;
'''
def _is_wellformed_attributes(x):
"""Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
"""
try:
obj = _loads(x)
except (ValueError, TypeError):
return 0
if not isinstance(obj, dict):
return 0
for value in obj.values():
if not isinstance(value, str):
return 0
return 1
def _make_trigger_for_attributes(insert_or_update, table, column):
"""Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
"""
if insert_or_update.upper() not in {'INSERT', 'UPDATE'}:
msg = f"expected 'INSERT' or 'UPDATE', got {insert_or_update!r}"
raise ValueError(msg)
if SQLITE_JSON1_ENABLED:
when_clause = f"""
NEW.{column} IS NOT NULL
AND (json_valid(NEW.{column}) = 0
OR json_type(NEW.{column}) != 'object'
OR (SELECT COUNT(*)
FROM json_each(NEW.{column})
WHERE json_each.type != 'text') != 0)
""".rstrip()
else:
when_clause = f"""
NEW.{column} IS NOT NULL
AND is_wellformed_attributes(NEW.{column}) = 0
""".rstrip()
return f'''
CREATE TEMPORARY TRIGGER IF NOT EXISTS trigger_check_{insert_or_update.lower()}_{table}_{column}
BEFORE {insert_or_update.upper()} ON main.{table} FOR EACH ROW
WHEN{when_clause}
BEGIN
SELECT RAISE(ABORT, '{table}.{column} must be a JSON object with text values');
END;
'''
def _add_functions_and_triggers(connection):
"""Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
"""
if not SQLITE_JSON1_ENABLED:
try:
connection.create_function(
'is_wellformed_json', 1, _is_wellformed_json, deterministic=True)
connection.create_function(
'is_wellformed_user_properties', 1, _is_wellformed_user_properties, deterministic=True)
connection.create_function(
'is_wellformed_attributes', 1, _is_wellformed_attributes, deterministic=True)
except TypeError:
connection.create_function('is_wellformed_json', 1, _is_wellformed_json)
connection.create_function('is_wellformed_user_properties', 1, _is_wellformed_user_properties)
connection.create_function('is_wellformed_attributes', 1, _is_wellformed_attributes)
connection.execute(_make_trigger_for_json('INSERT', 'property', 'value'))
connection.execute(_make_trigger_for_json('UPDATE', 'property', 'value'))
connection.execute(_make_trigger_for_user_properties('INSERT', 'edge', 'user_properties'))
connection.execute(_make_trigger_for_user_properties('UPDATE', 'edge', 'user_properties'))
jsonflatobj_columns = [
('edge', 'type_info'),
('quantity', 'attributes'),
('weight', 'type_info'),
]
for table, column in jsonflatobj_columns:
connection.execute(_make_trigger_for_attributes('INSERT', table, column))
connection.execute(_make_trigger_for_attributes('UPDATE', table, column))
def _path_to_sqlite_uri(path):
"""Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
"""
if os.name == 'nt': # Windows
if re.match(r'^[a-zA-Z]:', path):
path = os.path.abspath(path) # If drive-letter, must be absolute.
drive_prefix = f'/{path[:2]}' # Must not url-quote colon after drive-letter.
path = path[2:]
else:
drive_prefix = ''
path = path.replace('\\', '/')
path = urllib_parse_quote(path)
path = f'{drive_prefix}{path}'
else:
path = urllib_parse_quote(path)
path = re.sub('/+', '/', path)
return f'file:{path}'
def connect(path, mode='rwc'):
"""Returns a sqlite3 connection to a Toron node file."""
uri_path = _path_to_sqlite_uri(path)
uri_path = f'{uri_path}?mode={mode}'
try:
get_connection = lambda: sqlite3.connect(
database=uri_path,
detect_types=sqlite3.PARSE_DECLTYPES,
isolation_level=None,
uri=True,
)
if os.path.exists(path):
con = get_connection()
else:
con = get_connection()
con.executescript(_schema_script) # Create database schema.
except sqlite3.OperationalError as err:
msg = str(err).replace('database file', f'node file {path!r}')
raise ToronError(msg)
try:
_add_functions_and_triggers(con)
except (sqlite3.OperationalError, sqlite3.DatabaseError):
# Raises OperationalError when *path* is a database with an unknown
# schema and DatabaseError when *path* is a file but not a database.
con.close()
raise ToronError(f'Path is not a Toron node: {path!r}')
cur = con.execute("SELECT value FROM property WHERE key='schema_version'")
schema_version, *_ = cur.fetchone() or (None,)
cur.close()
if schema_version != 1: # When schema version is unsupported.
msg = f'Unsupported Toron node format: schema version {schema_version!r}'
raise ToronError(msg)
return con
_SAVEPOINT_NAME_GENERATOR = (f'svpnt{n}' for n in itertools.count())
class savepoint(object):
"""Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
"""
def __init__(self, cursor):
if cursor.connection.isolation_level is not None:
isolation_level = cursor.connection.isolation_level
msg = (
f'isolation_level must be None, got: {isolation_level!r}\n'
'\n'
'For explicit transaction handling, the connection must '
'be operating in "autocommit" mode. Turn on autocommit '
'mode by setting "con.isolation_level = None".'
)
raise sqlite3.OperationalError(msg)
self.name = next(_SAVEPOINT_NAME_GENERATOR)
self.cursor = cursor
def __enter__(self):
self.cursor.execute(f'SAVEPOINT {self.name}')
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is None:
self.cursor.execute(f'RELEASE {self.name}')
else:
self.cursor.execute(f'ROLLBACK TO {self.name}')
@contextmanager
def transaction(path_or_connection, mode=None):
"""A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
"""
if isinstance(path_or_connection, sqlite3.Connection):
connection = path_or_connection
connection_close = lambda: None # Don't close already-existing cursor.
else:
connection = connect(path_or_connection, mode=mode)
connection_close = connection.close
cursor = connection.cursor()
try:
with savepoint(cursor):
yield cursor
finally:
cursor.close()
connection_close()
| toron/_node_schema.py | 18,111 | Context manager to wrap a block of code inside a SAVEPOINT.
If the block exists without errors, the SAVEPOINT is released
and the changes are committed. If an error occurs, all of the
changes are rolled back:
cur = con.cursor()
with savepoint(cur):
cur.execute(...)
Create triggers and application-defined functions *connection*.
Note: This function must not be executed on an empty connection.
The table schema must exist before triggers can be created.
Check if SQLite implementation includes JSON1 extension.
Returns 1 if *x* is a wellformed TEXT_ATTRIBUTES column
value else returns 0. TEXT_ATTRIBUTES should be flat, JSON
object strings. This function should be registered with SQLite
(via the create_function() method) when the JSON1 extension
is not available.
Return 1 if *x* is well-formed JSON or return 0 if *x* is not
well-formed. This function should be registered with SQLite (via
the create_function() method) when the JSON1 extension is not
available.
This function mimics the JSON1 json_valid() function, see:
https://www.sqlite.org/json1.html#jvalid
Check if *x* is a wellformed TEXT_USERPROPERTIES value.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object. Returns 1 if *x* is valid or 0 if
it's not.
This function should be registered as an application-defined
SQL function and used in queries when SQLite's JSON1 extension
is not enabled.
Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_ATTRIBUTES
type columns.
The trigger will pass without error if the JSON is a wellformed
"object" containing "text" values.
The trigger will raise an error if the value is:
* not wellformed JSON
* not an "object" type
* an "object" type that contains one or more "integer", "real",
"true", "false", "null", "object" or "array" types
Return a SQL statement for creating a temporary trigger. The
trigger is used to validate the contents of TEXT_JSON type columns.
The trigger will pass without error if the JSON is wellformed.
Return a CREATE TRIGGER statement to check TEXT_USERPROPERTIES
values. This trigger is used to check values before they are saved
in the database.
A wellformed TEXT_USERPROPERTIES value is a string containing
a JSON formatted object.
The trigger will pass without error if the value is wellformed.
Convert a path into a SQLite compatible URI.
Unlike pathlib's URI handling, SQLite accepts relative URI paths.
For details, see:
https://www.sqlite.org/uri.html#the_uri_path
Returns a sqlite3 connection to a Toron node file.
A context manager that yields a cursor that runs in an
isolated transaction. If the context manager exits without
errors, the transaction is committed. If an exception is
raised, all changes are rolled-back.
Database schema functions and information for Toron node files.
Toron nodes are stored as individual files. The file format is
managed, internally, as a relational database. The schema for this
database is shown below as a simplified ERD (entity relationship
diagram). SQL foreign key relationships are represented with hyphen
and pipe characters ('-' and '|'). Other, more complex relationships
are represented with bullet points ('•') and these are enforced at
the application layer:
+------------------+
+---------------------+ | relation |
| edge | +------------------+
+---------------------+ | relation_id | •••• <Other Node>
| edge_id |------->| edge_id | •
| name | ••••••| other_element_id |<•••••
| type_info | • •••| element_id |<-+ +--------------+
| description | • • | proportion | | | quantity |
| user_properties | • • | mapping_level | | +--------------+
| other_uuid | • • +------------------+ | | quantity_id |
| other_filename_hint | • • | +->| _location_id |
| other_element_hash |<•• • | | | attributes |
| is_complete |<••••• +-----------------+ | | value |
+---------------------+ | | +--------------+
| |
+------------+ | +--------------+ | +---------------+
| element | | | location | | | structure |
+------------+ | +--------------+ | +---------------+
+------| element_id |--+ | _location_id |--+ | _structure_id |
| | label_a |••••>| label_a |<••••| label_a |
| | label_b |••••>| label_b |<••••| label_b |
| | label_c |••••>| label_c |<••••| label_c |
| | ... |••••>| ... |<••••| ... |
| +------------+ +--------------+ +---------------+
|
| +-------------------+ +----------+
| | element_weight | +-------------+ | property |
| +-------------------+ | weight | +----------+
| | element_weight_id | +-------------+ | key |
| | weight_id |<----| weight_id | | value |
+->| element_id |••• | name | +----------+
| value | • | type_info |
+-------------------+ • | description |
••>| is_complete |
+-------------+
The inclusion of JSON functions is optional when compiling SQLite. In versions 3.38.0 and newer, JSON functions are included by default but can be disabled (opt-out policy). For older versions of SQLite, JSON functions are available on an opt-in basis. It is necessary to test for their presence rathern than referencing the SQLite version number. For more information, see: https://www.sqlite.org/json1.htmlcompiling_in_json_support Windows If drive-letter, must be absolute. Must not url-quote colon after drive-letter. Create database schema. Raises OperationalError when *path* is a database with an unknown schema and DatabaseError when *path* is a file but not a database. When schema version is unsupported. Don't close already-existing cursor. | 6,512 | en | 0.679119 |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 09:10:56 2018
@author: gtucker
"""
import numpy as np
import datetime
from grainhill import GrainFacetSimulator
from grainhill import SlopeMeasurer
import landlab
from landlab.io.native_landlab import save_grid
import os
def create_folder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print('Error: Creating directory ' + directory)
params = {
'grid_size' : (111, 81),
'report_interval' : 5.0,
'output_interval' : 1.0e99,
'disturbance_rate' : 1.0e-4,
'weathering_rate' : 0.0,
'dissolution_rate': 0.0,
'friction_coef' : 1.0,
'fault_x' : -0.01,
'cell_width' : 0.5,
'grav_accel' : 9.8,
}
# Open a file to record output:
d = datetime.datetime.today()
today_str = str(d.year) + str(d.month).zfill(2) + str(d.day).zfill(2)
results_file = open('results_v_vs_w' + today_str + '.csv', 'w')
results_file.write('Landlab version,' + landlab.__version__ + ',\n')
# Print header in file
results_file.write('Uplift interval (yr),Weathering rate '
+ 'parameter (1/yr),Gradient (m/m),'
+ 'Slope angle (deg)\n')
# Sweep through a range of dissolution rate parameters
for uplift_interval_exp in np.arange(2, 5.2, 0.2):
for weath_exp in np.arange(-5, -1.8, 0.2):
weath_rate = 10.0**weath_exp
uplift_interval = 10.0**uplift_interval_exp
params['uplift_interval'] = uplift_interval
params['weathering_rate'] = weath_rate
# Set run duration long enough for uplift of 150 rows
params['run_duration'] = 100 * uplift_interval
params['plot_interval'] = 10 * uplift_interval
print('Uplift interval: ' + str(params['uplift_interval']) + ' 1/y')
print('Weathering rate: ' + str(params['weathering_rate']) + ' 1/y')
opname = ('tau' + str(int(round(10 * uplift_interval_exp))) + 'w' + str(int(round(10 * weath_exp))))
create_folder(opname)
params['plot_file_name'] = opname + '/' + opname
gfs = GrainFacetSimulator(**params)
gfs.run()
sm = SlopeMeasurer(gfs)
sm.pick_rock_surface()
(m, b) = sm.fit_straight_line_to_surface()
angle = np.degrees(np.arctan(m))
results_file.write(str(uplift_interval) + ',' + str(weath_rate) + ','
+ str(m) + ',' + str(angle) + '\n')
results_file.flush()
save_grid(gfs.grid, opname + '/' + opname + '.grid', clobber=True)
results_file.close()
| ModelRunScripts/SensitivityAnalysisDandV/run_v_w.py | 2,601 | Created on Thu Jul 5 09:10:56 2018
@author: gtucker
!/usr/bin/env python2 -*- coding: utf-8 -*- Open a file to record output: Print header in file Sweep through a range of dissolution rate parameters Set run duration long enough for uplift of 150 rows | 254 | en | 0.718933 |
from Jumpscale import j
import re
# ACTIONS
## R = Regex Replace
## RI = Regex Replace case insensitive
DO = """
RI| j.application.JSBase$ | j.baseclasses.object
RI| j.data.cache. | j.core.cache.
RI| j.data.text. | j.core.text.
RI| from jumpscale import j | from Jumpscale import j
RI| j.application.jsbase_get_class() | j.baseclasses.object
RI| .base_class_config | .JSBaseClassConfig
RI| .base_class_configs | .JSBaseClassConfigs
RI| j.logging. | j.logger.
RI | Jumpscale.logging. | Jumpscale.core.logging.
RI| self._location | self.__jslocation__
RI| j.data.serializer. | j.data.serializers.
RI| self.prefab.core.file_write | j.sal.fs.writeFile
RI| self.prefab.core.run | j.sal.process.execute
RI| self.prefab.core.createDir | j.sal.fs.createDir
RI| self.prefab.core.file_download | self.prefab.core.file_download
RI| self.prefab.system.package.install | j.builders.system.package.ensure
"""
ERRORS = """
configmanager._base_class_config
"""
JSBASE = j.baseclasses.object
class FixerReplacer(j.baseclasses.object):
def __init__(self):
JSBASE.__init__(self)
self.rules = []
for rule in DO.split("\n"):
if rule.strip() == "":
continue
if rule.strip().startswith("#"):
continue
cmd, from_, to_ = rule.split("|")
if cmd.lower().strip() == "ri":
self.rules.append(ReplaceIgnoreCase(from_, to_))
elif cmd.lower().strip() == "r":
self.rules.append(ReplaceNormal(from_, to_))
else:
raise j.exceptions.Base("unknown rule:%s" % rule)
def line_process(self, line):
changed = False
# if "\t" in line:
# line = line.replace("\t"," ")
# changed = True
for rule in self.rules:
line1 = rule.replace(line)
if line1 != line:
changed = True
line = line1
return changed, line
def file_process(self, path, write=False, root=""):
out = ""
nr = 0
for line in j.sal.fs.readFile(path).split("\n"):
nr += 1
changed, line2 = self.line_process(line)
if changed:
path2 = j.sal.fs.pathRemoveDirPart(path, root)
if path2 not in self.changes:
self.changes[path2] = {}
changes = self.changes[path2]
changes["line"] = nr
changes["from"] = line
changes["to.."] = line2
out += "%s\n" % line2
else:
out += "%s\n" % line
if len(self.changes) > 0 and write:
j.sal.fs.writeFile(path, out)
def dir_process(self, path, extensions=["py", "txt", "md"], recursive=True, write=False):
path = j.sal.fs.pathNormalize(path)
self.changes = {}
for ext in extensions:
for p in j.sal.fs.listFilesInDir(path, recursive=recursive, filter="*.%s" % ext, followSymlinks=False):
self._log_debug("process file:%s" % p)
self.file_process(root=path, path=p, write=write)
print(j.data.serializers.yaml.dumps(self.changes))
class ReplaceIgnoreCase:
def __init__(self, from_, to_, prepend="", append=""):
self.from_ = from_.strip()
self.to_ = to_.strip()
self.regex = re.compile(re.escape(prepend + self.from_ + append), re.IGNORECASE | re.VERBOSE)
def replace(self, txt):
m = self.regex.search(txt)
if m:
found = m.string[m.start() : m.end()]
txt2 = txt.replace(found, self.to_)
return txt2
else:
return txt
class ReplaceNormal(ReplaceIgnoreCase):
def __init__(self, from_, to_, prepend="", append=""):
ReplaceIgnoreCase.__init__(self, from_, to_, re.VERBOSE)
self.regex = re.compile(re.escape(prepend + self.from_ + append))
| sandbox/lib/jumpscale/JumpscaleLibs/tools/fixer/FixerReplace.py | 3,927 | ACTIONS R = Regex Replace RI = Regex Replace case insensitive if "\t" in line: line = line.replace("\t"," ") changed = True | 134 | en | 0.676282 |
"""Client for Triton Inference Server using REST API.
References:
-
https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest
-
https://github.com/triton-inference-server/client/tree/master/src/python/examples
-
https://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py
"""
import json
import time
import threading
import distribution
import clients.base_rest_client
import clients.utils
import tensorflow.compat.v1 as tf
import requests as r
import numpy as np
import tritonclient.http as triton_httpclient
import tritonclient.utils as triton_utils
from tensorflow.python.framework import dtypes
class TritonRest(clients.base_rest_client.BaseRestClient):
def generate_rest_request_from_dictionary(self, row_dict):
triton_request_inputs = []
for key, value in row_dict.items():
t = clients.utils.get_type(value, self._default_float_type,
self._default_int_type)
if t == np.object_:
value = clients.utils.map_multi_dimensional_list(
value, lambda s: s.encode("utf-8"))
numpy_value = np.array(value, dtype=t)
triton_request_input = triton_httpclient.InferInput(
key, list(numpy_value.shape), triton_utils.np_to_triton_dtype(t))
triton_request_input.set_data_from_numpy(
numpy_value, binary_data=True) # binary_data=True by default
triton_request_inputs.append(triton_request_input)
# https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.py#L81
# Returns tuple - request and request len to pass in Infer-Header-Content-Length header
(request, json_size) = triton_httpclient._get_inference_request(
inputs=triton_request_inputs,
request_id="",
outputs=None,
sequence_id=0,
sequence_start=0,
sequence_end=0,
priority=0,
timeout=None)
headers = {}
if json_size:
headers["Inference-Header-Content-Length"] = str(json_size)
return (request, headers)
def get_requests_from_dictionary(self, path):
rows = []
with tf.gfile.GFile(path, "r") as f:
for line in f:
row_dict = eval(line)
rows.append(self.generate_rest_request_from_dictionary(row_dict))
return rows
def get_requests_from_tfrecord(self, path, count, batch_size):
raise NotImplementedError()
def get_requests_from_file(self, path):
raise NotImplementedError()
def get_uri(self):
if self._host.startswith("http"):
return self._host
else:
# https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest
if self._model_version:
return f"http://{self._host}:{self._port}/v2/models/{self._model_name}/versions/{self._model_version}/infer"
else:
return f"http://{self._host}:{self._port}/v2/models/{self._model_name}/infer"
| clients/triton_rest.py | 3,001 | Client for Triton Inference Server using REST API.
References:
-
https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.md#httprest
-
https://github.com/triton-inference-server/client/tree/master/src/python/examples
-
https://github.com/triton-inference-server/client/blob/master/src/python/library/tritonclient/http/__init__.py
binary_data=True by default https://github.com/triton-inference-server/client/blob/530bcac5f1574aa2222930076200544eb274245c/src/python/library/tritonclient/http/__init__.pyL81 Returns tuple - request and request len to pass in Infer-Header-Content-Length header https://github.com/kubeflow/kfserving/blob/master/docs/predict-api/v2/required_api.mdhttprest | 715 | en | 0.537996 |
# encoding: utf-8
"""
This module defines the things that are used in setup.py for building JupyterLab
This includes:
* Functions for finding things like packages, package data, etc.
* A function for checking dependencies.
"""
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import json
import os
import pipes
import sys
import shutil
import tempfile
import os.path as osp
from os.path import join as pjoin
from distutils import log
from distutils.cmd import Command
from distutils.version import LooseVersion
from setuptools.command.egg_info import egg_info
from setuptools.command.bdist_egg import bdist_egg
from subprocess import check_call
if sys.platform == 'win32':
from subprocess import list2cmdline
else:
def list2cmdline(cmd_list):
return ' '.join(map(pipes.quote, cmd_list))
# the name of the project
name = 'jupyterlab'
here = osp.dirname(osp.abspath(__file__))
is_repo = osp.exists(pjoin(here, '.git'))
version_ns = {}
with io.open(pjoin(here, name, '_version.py'), encoding="utf8") as f:
exec(f.read(), {}, version_ns)
def run(cmd, *args, **kwargs):
"""Echo a command before running it"""
log.info('> ' + list2cmdline(cmd))
kwargs['shell'] = (sys.platform == 'win32')
return check_call(cmd, *args, **kwargs)
#---------------------------------------------------------------------------
# Find packages
#---------------------------------------------------------------------------
def find_packages():
"""
Find all of the packages.
"""
packages = []
for dir, subdirs, files in os.walk('jupyterlab'):
if 'node_modules' in subdirs:
subdirs.remove('node_modules')
package = dir.replace(osp.sep, '.')
if '__init__.py' not in files:
# not a package
continue
packages.append(package)
return packages
#---------------------------------------------------------------------------
# Find package data
#---------------------------------------------------------------------------
def find_package_data():
"""
Find package_data.
"""
theme_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'themes')):
slice_len = len('jupyterlab' + os.sep)
theme_dirs.append(pjoin(dir[slice_len:], '*'))
schema_dirs = []
for dir, subdirs, files in os.walk(pjoin('jupyterlab', 'schemas')):
slice_len = len('jupyterlab' + os.sep)
schema_dirs.append(pjoin(dir[slice_len:], '*'))
return {
'jupyterlab': ['build/*', '*.js', 'package.app.json',
'yarn.lock', 'yarn.app.lock', '.yarnrc'
] + theme_dirs + schema_dirs
}
def find_data_files():
"""
Find data_files.
"""
if not os.path.exists(pjoin('jupyterlab', 'build')):
return []
files = []
static_files = os.listdir(pjoin('jupyterlab', 'build'))
files.append(('share/jupyter/lab/static',
['jupyterlab/build/%s' % f for f in static_files]))
for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'schemas')):
dir = dir.replace(os.sep, '/')
schema_files = []
for fname in fnames:
schema_files.append('%s/%s' % (dir, fname))
slice_len = len('jupyterlab/')
files.append(('share/jupyter/lab/%s' % dir[slice_len:], schema_files))
for dir, subdirs, fnames in os.walk(pjoin('jupyterlab', 'themes')):
dir = dir.replace(os.sep, '/')
themes_files = []
for fname in fnames:
themes_files.append('%s/%s' % (dir, fname))
slice_len = len('jupyterlab/')
files.append(('share/jupyter/lab/%s' % dir[slice_len:], themes_files))
return files
def js_prerelease(command, strict=False):
"""decorator for building minified js/css prior to another command"""
class DecoratedCommand(command):
def run(self):
jsdeps = self.distribution.get_command_obj('jsdeps')
if not is_repo and all(osp.exists(t) for t in jsdeps.targets):
# sdist, nothing to do
command.run(self)
return
try:
self.distribution.run_command('jsdeps')
except Exception as e:
missing = [t for t in jsdeps.targets if not osp.exists(t)]
if strict or missing:
log.warn('js check failed')
if missing:
log.error('missing files: %s' % missing)
raise e
else:
log.warn('js check failed (not a problem)')
log.warn(str(e))
command.run(self)
return DecoratedCommand
def update_package_data(distribution):
"""update build_py options to get package_data changes"""
build_py = distribution.get_command_obj('build_py')
build_py.finalize_options()
class CheckAssets(Command):
description = 'check for required assets'
user_options = []
# Representative files that should exist after a successful build
targets = [
pjoin(here, 'jupyterlab', 'build', 'release_data.json'),
pjoin(here, 'jupyterlab', 'build', 'main.bundle.js'),
pjoin(here, 'jupyterlab', 'schemas', '@jupyterlab',
'shortcuts-extension', 'plugin.json'),
pjoin(here, 'jupyterlab', 'themes', '@jupyterlab',
'theme-light-extension',
'images', 'jupyterlab.svg')
]
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
for t in self.targets:
if not osp.exists(t):
msg = 'Missing file: %s' % t
raise ValueError(msg)
target = pjoin(here, 'jupyterlab', 'build', 'release_data.json')
with open(target) as fid:
data = json.load(fid)
if (LooseVersion(data['version']) !=
LooseVersion(version_ns['__version__'])):
msg = 'Release assets version mismatch, please run npm publish'
raise ValueError(msg)
# update package data in case this created new files
update_package_data(self.distribution)
class bdist_egg_disabled(bdist_egg):
"""Disabled version of bdist_egg
Prevents setup.py install performing setuptools' default easy_install,
which it should never ever do.
"""
def run(self):
sys.exit("Aborting implicit building of eggs. Use `pip install .` to install from source.")
class custom_egg_info(egg_info):
"""Prune JavaScript folders from egg_info to avoid locking up pip.
"""
def run(self):
folders = ['examples', 'packages', 'test', 'node_modules']
folders = [f for f in folders if os.path.exists(pjoin(here, f))]
tempdir = tempfile.mkdtemp()
for folder in folders:
shutil.move(pjoin(here, folder), tempdir)
value = egg_info.run(self)
for folder in folders:
shutil.move(pjoin(tempdir, folder), here)
shutil.rmtree(tempdir)
return value
| setupbase.py | 7,123 | Disabled version of bdist_egg
Prevents setup.py install performing setuptools' default easy_install,
which it should never ever do.
Prune JavaScript folders from egg_info to avoid locking up pip.
Find data_files.
Find package_data.
Find all of the packages.
decorator for building minified js/css prior to another command
Echo a command before running it
update build_py options to get package_data changes
This module defines the things that are used in setup.py for building JupyterLab
This includes:
* Functions for finding things like packages, package data, etc.
* A function for checking dependencies.
encoding: utf-8 Copyright (c) Jupyter Development Team. Distributed under the terms of the Modified BSD License. the name of the project--------------------------------------------------------------------------- Find packages--------------------------------------------------------------------------- not a package--------------------------------------------------------------------------- Find package data--------------------------------------------------------------------------- sdist, nothing to do Representative files that should exist after a successful build update package data in case this created new files | 1,241 | en | 0.683699 |
# # SPDX-License-Identifier: MIT
# from augur.augurplugin import AugurPlugin
# from augur.application import Application
# class HousekeeperPlugin(AugurPlugin):
# """
# This plugin serves as an example as to how to load plugins into Augur
# """
# def __init__(self, augur_app):
# super().__init__(augur_app)
# self.__housekeeper = self.__call__()
# def __call__(self):
# from .housekeeper import Housekeeper
# return Housekeeper(
# user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'),
# password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASS', 'password'),
# host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'),
# port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'),
# dbname=self._augur.read_config('Database', 'database', 'AUGUR_DB_NAME', 'msr14')
# )
# HousekeeperPlugin.augur_plugin_meta = {
# 'name': 'housekeeper',
# 'datasource': True
# }
# Application.register_plugin(HousekeeperPlugin)
# __all__ = ['HousekeeperPlugin'] | augur/housekeeper/__init__.py | 1,173 | SPDX-License-Identifier: MIT from augur.augurplugin import AugurPlugin from augur.application import Application class HousekeeperPlugin(AugurPlugin): """ This plugin serves as an example as to how to load plugins into Augur """ def __init__(self, augur_app): super().__init__(augur_app) self.__housekeeper = self.__call__() def __call__(self): from .housekeeper import Housekeeper return Housekeeper( user=self._augur.read_config('Database', 'user', 'AUGUR_DB_USER', 'root'), password=self._augur.read_config('Database', 'password', 'AUGUR_DB_PASS', 'password'), host=self._augur.read_config('Database', 'host', 'AUGUR_DB_HOST', '127.0.0.1'), port=self._augur.read_config('Database', 'port', 'AUGUR_DB_PORT', '3306'), dbname=self._augur.read_config('Database', 'database', 'AUGUR_DB_NAME', 'msr14') ) HousekeeperPlugin.augur_plugin_meta = { 'name': 'housekeeper', 'datasource': True } Application.register_plugin(HousekeeperPlugin) __all__ = ['HousekeeperPlugin'] | 1,116 | en | 0.300772 |
""" Video Link: https://youtu.be/1s-Tj65AKZA """
from seleniumbase import __version__
from seleniumbase import BaseCase
class HackTests(BaseCase):
def test_all_your_base_are_belong_to_us(self):
# First make sure that seleniumbase 1.65.0 or newer is installed
version = __version__.split(".")
if version[0] == "1" and int(version[1]) < 65:
raise Exception(
"This test requires minimum seleniumbase version: 1.65.0"
)
self.set_window_size(1220, 740)
ayb = "ALL YOUR BASE"
abtu = "ARE BELONG TO US"
aybabtu = "%s %s" % (ayb, abtu)
sb_banner_logo = "//seleniumbase.io/cdn/img/sb_logo_10.png"
sb_dashboard_logo = "//seleniumbase.io/img/dash_pie_3.png"
yt_chip = "#chips yt-chip-cloud-chip-renderer:nth-of-type"
wiki = "https://en.wikipedia.org/wiki/All_your_base_are_belong_to_us"
self.open(wiki)
self.click_if_visible('button[aria-label="Close"]')
self.set_text_content("h1#firstHeading", aybabtu)
self.set_text_content("#ca-history a", aybabtu)
self.set_text_content('#n-mainpage-description a', "ALL")
self.set_text_content('#n-contents a', "YOUR")
self.set_text_content('#n-currentevents a', "BASE")
self.set_text_content('#n-randompage a', "ARE")
self.set_text_content('#n-aboutsite a', "BELONG")
self.set_text_content('#n-contactpage a', "TO")
self.set_text_content('#n-sitesupport a', "US")
self.set_text_content('.tocsection-1 span.toctext', "ALL")
self.set_text_content('.tocsection-2 span.toctext', "YOUR")
self.set_text_content('.tocsection-3 span.toctext', "BASE")
self.set_text_content('.tocsection-4 span.toctext', "ARE")
self.set_text_content('.tocsection-5 span.toctext', "BELONG")
self.set_text_content('.tocsection-6 span.toctext', "TO")
self.set_text_content('.tocsection-7 span.toctext', "US")
self.highlight("h1#firstHeading", loops=2, scroll=False)
self.highlight("#ca-history a", loops=2, scroll=False)
self.highlight("nav#p-navigation", loops=2, scroll=False)
self.highlight("div#toc", loops=2, scroll=False)
self.highlight('.tocsection-1 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-2 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-3 span.toctext', loops=2, scroll=False)
self.highlight('.tocsection-4 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-5 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-6 span.toctext', loops=1, scroll=False)
self.highlight('.tocsection-7 span.toctext', loops=2, scroll=False)
zoom_in = 'div.thumbinner{zoom: 1.4;-moz-transform: scale(1.4);}'
self.add_css_style(zoom_in)
self.highlight("div.thumbinner", loops=8, scroll=False)
self.open("https://www.apple.com/store")
self.set_text_content("div.rs-shop-subheader", aybabtu)
self.set_text_content('#shelf-1 a[href*="mac"]', "ALL")
self.set_text_content('#shelf-1 a[href*="iphone"]', "YOUR")
self.set_text_content('#shelf-1 a[href*="ipad"]', "BASE")
self.set_text_content('#shelf-1 a[href*="watch"]', "ARE")
self.set_text_content('#shelf-1 a[href*="airpods"]', "BELONG")
self.set_text_content('#shelf-1 a[href*="airtag"]', "TO")
self.set_text_content('#shelf-1 a[href*="tv"]', "US")
self.set_text_content('#shelf-1 a[href*="homepod"]', ".")
self.set_text_content("h2", aybabtu + ". ")
self.highlight("div.rs-shop-subheader", loops=6, scroll=False)
self.highlight("#shelf-1", loops=2, scroll=False)
self.highlight('#shelf-1 a[href*="mac"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="iphone"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="ipad"]', loops=3, scroll=False)
self.highlight('#shelf-1 a[href*="watch"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="airpods"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="airtag"]', loops=1, scroll=False)
self.highlight('#shelf-1 a[href*="tv"]', loops=3, scroll=False)
self.highlight("h2", loops=9, scroll=False)
self.open("https://google.com/ncr")
self.set_text_content('a[href*="about.google"]', ayb)
self.set_text_content('a[href*="store.google"]', abtu)
self.set_text_content('a[href*="mail.google.com"]', ayb)
self.set_text_content('a[href*="google.com/img"]', abtu)
self.set_attributes('[value="Google Search"]', "value", ayb)
self.set_attributes('[value="I\'m Feeling Lucky"]', "value", abtu)
zoom_in = 'a{zoom: 1.2;-moz-transform: scale(1.2);}'
self.add_css_style(zoom_in)
zoom_in = (
'[value="ALL YOUR BASE"]{zoom: 1.3;-moz-transform: scale(1.3);}'
'[value="ARE BELONG TO US"]{zoom: 1.3;-moz-transform: scale(1.3);}'
)
self.add_css_style(zoom_in)
self.highlight('a[href*="about.google"]', loops=3)
self.highlight('a[href*="store.google"]', loops=3)
self.highlight('a[href*="mail.google.com"]', loops=3)
self.highlight('a[href*="google.com/img"]', loops=3)
self.highlight('form[role="search"]', loops=8)
self.open("https://twitter.com/")
if not self.is_element_visible('a[href*="w/signup"] span'):
self.refresh()
if self.is_element_visible('a[href*="w/signup"] span'):
self.set_text_content('a[href*="w/signup"] span', aybabtu)
self.highlight('a[href*="w/signup"] span', loops=6, scroll=False)
self.highlight('a[href*="w/signup"]', loops=6, scroll=False)
self.open("https://www.youtube.com/")
self.set_text_content('%s(1)' % yt_chip, "ALL")
self.set_text_content('%s(2)' % yt_chip, "YOUR")
self.set_text_content('%s(3)' % yt_chip, "BASE")
self.set_text_content('%s(4)' % yt_chip, "ARE")
self.set_text_content('%s(5)' % yt_chip, "BELONG")
self.set_text_content('%s(6)' % yt_chip, "TO")
self.set_text_content('%s(7)' % yt_chip, "US")
self.set_text_content('%s(8)' % yt_chip, "!")
self.set_text_content('%s(9)' % yt_chip, "!")
self.set_text_content('%s(10)' % yt_chip, "!")
self.click_if_visible("#dismiss-button")
self.click_if_visible('button[aria-label="Close"]')
self.highlight("#scroll-container", loops=5, scroll=False)
self.highlight('%s(1)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(2)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(3)' % yt_chip, loops=3, scroll=False)
self.highlight('%s(4)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(5)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(6)' % yt_chip, loops=1, scroll=False)
self.highlight('%s(7)' % yt_chip, loops=3, scroll=False)
self.highlight("#scroll-container", loops=7, scroll=False)
self.open("https://github.com/features/actions")
self.set_text_content('a[href="/team"]', ayb)
self.set_text_content('a[href="/enterprise"]', abtu)
self.set_text_content('h1 span:nth-child(1)', ayb)
self.set_text_content('h1 span:nth-of-type(2)', "ARE")
self.set_text_content('h1 span:nth-of-type(3)', "BELONG")
self.set_text_content('h1 span:nth-of-type(4)', "TO")
self.set_text_content('h1 span:nth-of-type(5)', "US")
self.type('input[name="q"]', aybabtu.lower())
self.click("h1", scroll=False)
self.highlight("nav", loops=5, scroll=False)
self.highlight('input[name="q"]', loops=5, scroll=False)
self.highlight("h1", loops=8, scroll=False)
self.open("https://dev.to/top/infinity")
self.click_if_visible('button[aria-label="Close campaign banner"]')
self.set_text_content('nav a[data-text="Relevant"]', "ALL")
self.set_text_content('nav a[data-text="Latest"]', "YOUR")
self.set_text_content('nav a[data-text="Top"]', "BASE")
self.set_text_content('nav a[data-text="Week"]', "ARE")
self.set_text_content('nav a[data-text="Month"]', "BELONG")
self.set_text_content('nav a[data-text="Year"]', "TO")
self.set_text_content('nav a[data-text="Infinity"]', "US")
self.set_text_content('aside a[class*="tful"]', aybabtu)
self.set_text_content('aside a[aria-label="Create new account"]', ayb)
self.set_text_content('aside a[aria-label="Log in"]', abtu)
self.set_text_content('aside a[class*="tful"]:nth-child(2)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(3)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(4)', aybabtu)
self.set_text_content('aside a[class*="tful"]:nth-child(5)', aybabtu)
self.set_attribute("a.crayons-avatar img", "src", sb_dashboard_logo)
self.set_text_content('.profile-preview-card button', "SeleniumBase")
self.set_text_content('h2.crayons-story__title a', aybabtu)
self.type('input[name="q"]', aybabtu)
self.highlight('input[name="q"]', loops=4, scroll=False)
self.highlight('[aria-label="Primary sidebar"] div div', scroll=False)
self.highlight('nav a[data-text="Relevant"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Latest"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Top"]', loops=2, scroll=False)
self.highlight('nav a[data-text="Week"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Month"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Year"]', loops=1, scroll=False)
self.highlight('nav a[data-text="Infinity"]', loops=2, scroll=False)
self.highlight('aside[id*="sidebar"] section', loops=5, scroll=False)
self.highlight("div.crayons-story__body", loops=7, scroll=False)
self.open("https://azure.microsoft.com/en-us/services/playfab/")
self.set_text_content("h1", aybabtu)
self.set_text_content('a[aria-label*="Try PlayF"]', ayb)
self.set_text_content('a[aria-label*="Sign in to"]', abtu)
self.set_text_content('span:contains("Chat with Sales")', aybabtu)
self.highlight("h1", loops=6, scroll=False)
self.highlight('a[aria-label*="Try PlayF"]', loops=4, scroll=False)
self.highlight('a[aria-label*="Sign in to"]', loops=4, scroll=False)
self.highlight('button#live-engage-btn', loops=6, scroll=False)
self.open("https://www.snapchat.com/")
self.set_text_content("h1", ayb)
self.set_text_content("form .button-large span span", abtu)
zoom_in = 'a.button-large span{zoom: 1.2;-moz-transform: scale(1.2);}'
self.add_css_style(zoom_in)
self.highlight("h1", loops=6, scroll=False)
self.highlight("form .button-large span span", loops=8, scroll=False)
self.open("https://store.steampowered.com/")
self.set_text_content('div.content a[href*="/about/"]', " ")
self.set_text_content('div.content a[href*="help.steam"]', aybabtu)
self.set_text_content("#foryou_tab a", "ALL")
self.set_text_content("#noteworthy_tab a", "YOUR BASE")
self.set_text_content("#genre_tab a", "ARE")
self.set_text_content('span:contains("Points Shop")', "BELONG")
self.set_text_content('span:contains("News")', "TO")
self.set_text_content('span:contains("Labs")', "US")
self.set_value("input#store_nav_search_term", ayb + " . . . .")
self.highlight('div.content a[href*="help.steam"]', loops=6)
self.highlight('#store_nav_area', loops=2, scroll=False)
self.highlight("#foryou_tab a", loops=1, scroll=False)
self.highlight("#noteworthy_tab a", loops=3, scroll=False)
self.highlight("#genre_tab a", loops=1, scroll=False)
self.highlight('span:contains("BELONG")', loops=1, scroll=False)
self.highlight('span:contains("TO")', loops=1, scroll=False)
self.highlight('span:contains("US")', loops=2, scroll=False)
self.js_click('input[id*="nav_search"]')
self.highlight('input[id*="nav_search"]', loops=6, scroll=False)
self.open("https://xkcd.com/286/")
self.set_text_content('a[href="/archive"]', "ALL")
self.set_text_content('a[href*="what-if"]', "YOUR")
self.set_text_content('a[href*="//blag."]', "BASE")
self.set_text_content('a[href*="/about"]', abtu)
self.remove_element('li:contains("Feed")')
self.remove_element('li:contains("TW")')
self.remove_element('li:contains("Books")')
self.remove_element('li:contains("What")')
self.remove_element('li:contains("WI")')
self.set_attributes("#news img", "src", sb_banner_logo)
self.set_text_content('#ctitle', aybabtu)
self.set_text_content('a[rel="prev"]', "All")
self.set_text_content('a[href*="random"]', "Your")
self.set_text_content('a[rel="next"]', "Base")
self.highlight("#topLeft ul", loops=5, scroll=False)
self.highlight('a[href="/archive"]', loops=1, scroll=False)
self.highlight('a[href*="what-if"]', loops=1, scroll=False)
self.highlight('a[href*="//blag."]', loops=2, scroll=False)
self.highlight('a[href*="/about"]', loops=5, scroll=False)
self.highlight('a[rel="prev"]', loops=1, scroll=False)
self.highlight('a[href*="random"]', loops=1, scroll=False)
self.highlight('a[rel="next"]', loops=3, scroll=False)
self.highlight("#ctitle", loops=7, scroll=False)
self.open("https://www.nintendo.com/whatsnew/")
self.set_text_content('button[aria-label="Search"]', aybabtu)
self.set_text_content('button[data-section="newsevents"]', aybabtu)
self.set_text_content("h2", aybabtu)
self.highlight('div.search-flex', loops=4, scroll=False)
self.highlight('button[data-section*="news"]', loops=4, scroll=False)
self.highlight("h2", loops=6, scroll=False)
self.open("https://support.gog.com/hc/en-us?product=gog")
self.set_text_content("div.intro-title", aybabtu)
self.set_text_content("h4", aybabtu)
self.highlight("div.intro-title", loops=8, scroll=False)
self.highlight("h4", loops=8, scroll=False)
self.open("https://slack.com/help/articles/204714258-Giphy-for-Slack")
self.set_text_content("h1", aybabtu)
self.set_text_content('a[prettyslug="getting-started"]', "ALL")
self.set_text_content('a[prettyslug="using-slack"]', "YOUR")
self.set_text_content('a[prettyslug="your-profile"]', "BASE")
self.set_text_content('a[prettyslug="connect-tools"]', "ARE")
self.set_text_content('a[prettyslug="administration"]', "BELONG")
self.set_text_content('a[prettyslug="tutorials"]', "TO US")
self.highlight("h1", loops=4, scroll=False)
self.highlight("div#global_menu", loops=2, scroll=False)
self.highlight('a[prettyslug*="g-started"]', loops=1, scroll=False)
self.highlight('a[prettyslug="using-slack"]', loops=1, scroll=False)
self.highlight('a[prettyslug="your-profile"]', loops=2, scroll=False)
self.highlight('a[prettyslug="connect-tools"]', loops=1, scroll=False)
self.highlight('a[prettyslug="administration"]', loops=1, scroll=False)
self.highlight('a[prettyslug="tutorials"]', loops=2, scroll=False)
self.open("https://kubernetes.io/")
self.set_text_content('nav a[href="/docs/"]', "ALL")
self.set_text_content('nav a[href="/blog/"]', "YOUR")
self.set_text_content('nav a[href="/training/"]', "BASE")
self.set_text_content('nav a[href="/partners/"]', "ARE")
self.set_text_content('nav a[href="/community/"]', "BELONG")
self.set_text_content('nav a[href="/case-studies/"]', "TO")
self.set_text_content('nav #navbarDropdown', "US")
self.set_text_content('nav #navbarDropdownMenuLink', ".")
if self.is_element_visible("h1"):
self.set_text_content("h1", aybabtu)
self.highlight("nav ul.navbar-nav", loops=3, scroll=False)
self.highlight('nav a[href="/docs/"]', loops=1, scroll=False)
self.highlight('nav a[href="/blog/"]', loops=1, scroll=False)
self.highlight('nav a[href="/training/"]', loops=2, scroll=False)
self.highlight('nav a[href="/partners/"]', loops=1, scroll=False)
self.highlight('nav a[href="/community/"]', loops=1, scroll=False)
self.highlight('nav a[href="/case-studies/"]', loops=1, scroll=False)
self.highlight('nav #navbarDropdown', loops=2, scroll=False)
if self.is_element_visible("h1"):
self.highlight('h1', loops=6, scroll=False)
self.open("https://www.selenium.dev/")
self.set_attributes("a.dropdown-toggle", "class", "nav-link")
self.set_text_content('li a:contains("About")', "ALL")
self.set_text_content('li a:contains("Downloads")', "YOUR")
self.set_text_content('li a:contains("Documentation")', "BASE")
self.set_text_content('li a:contains("Projects")', "ARE")
self.set_text_content('li a:contains("Support")', "BELONG")
self.set_text_content('li a:contains("Blog")', "TO")
self.set_text_content('li a:contains("English")', "US")
self.set_text_content("div.lead", aybabtu)
self.set_text_content("h2", aybabtu)
zoom_in = 'div.lead{zoom: 1.25;-moz-transform: scale(1.25);}'
self.add_css_style(zoom_in)
self.highlight("div#main_navbar", loops=1, scroll=False)
self.highlight('li a:contains("ALL")', loops=1, scroll=False)
self.highlight('li a:contains("YOUR")', loops=1, scroll=False)
self.highlight('li a:contains("BASE")', loops=2, scroll=False)
self.highlight('li a:contains("ARE")', loops=1, scroll=False)
self.highlight('li a:contains("BELONG")', loops=1, scroll=False)
self.highlight('li a:contains("TO")', loops=1, scroll=False)
self.highlight('li a:contains("US")', loops=2, scroll=False)
self.highlight("div.lead", loops=6, scroll=False)
self.highlight("h2", loops=8, scroll=False)
self.open("https://www.python.org/")
self.set_text_content('a[class="donate-button"]', ayb)
self.set_text_content("#about a", "ALL")
self.set_text_content("#downloads a", "YOUR")
self.set_text_content("#documentation a", "BASE")
self.set_text_content("#community a", "ARE")
self.set_text_content("#success-stories a", "BELONG")
self.set_text_content("#news a", "TO")
self.set_text_content("#events a", "US")
self.highlight('a[class="donate-button"]', loops=4, scroll=False)
self.highlight("nav#mainnav", loops=5, scroll=False)
self.highlight("#about a", loops=1, scroll=False)
self.highlight("#downloads a", loops=1, scroll=False)
self.highlight("#documentation a", loops=2, scroll=False)
self.highlight("#community a", loops=1, scroll=False)
self.highlight("#success-stories a", loops=1, scroll=False)
self.highlight("#news a", loops=1, scroll=False)
self.highlight("#events a", loops=2, scroll=False)
self.open("https://docs.pytest.org/")
self.set_text_content("h1", "pytest: " + aybabtu)
self.highlight("h1", loops=10, scroll=False)
self.open("https://wordpress.com/")
self.set_text_content('a[title="Plans & Pricing"]', aybabtu)
self.set_text_content('a[title="Get Started"]', ayb)
self.set_text_content("p.no-widows", aybabtu)
self.set_text_content("a#lpc-button", "Automate with SeleniumBase")
self.highlight('a[title="Plans & Pricing"]', loops=6, scroll=False)
self.highlight('a[title="Get Started"]', loops=4, scroll=False)
self.highlight("p.no-widows", loops=8, scroll=False)
self.highlight("a#lpc-button", loops=4, scroll=False)
self.open("https://seleniumbase.com/")
self.set_text_content("h1", aybabtu)
self.highlight("h1", loops=10, scroll=False)
self.open("https://pypi.org/")
self.set_text_content('a[href="/sponsors/"]', aybabtu)
self.set_text_content("h1", aybabtu)
self.set_value("input#search", aybabtu, scroll=False)
self.highlight('a[href="/sponsors/"]', loops=6, scroll=False)
self.highlight("h1", loops=6, scroll=False)
self.highlight("input#search", loops=8, scroll=False)
self.open("https://www.atlassian.com/software/jira")
self.set_text_content('a[href*="jira/pricing"]', ayb)
self.set_text_content('a[href*="jira/enterprise"]', abtu)
self.set_text_content('a[href="/software/jira/features"]', "")
self.set_text_content('a[href="/software/jira/guides"]', "")
self.set_text_content("h1", ayb)
self.set_text_content('div.xs-none-bottom a[href*="free"]', abtu)
self.highlight("ul.imkt-navbar__link-list", loops=2, scroll=False)
self.highlight('a[href*="jira/pricing"]', loops=3, scroll=False)
self.highlight('a[href*="jira/enterprise"]', loops=3, scroll=False)
self.highlight("h1", loops=3, scroll=False)
self.highlight('div.xs-none-bottom a[href*="free"]', scroll=False)
self.open("https://status.iboss.com/ibcloud/app/cloudStatus.html")
self.set_text_content('div[translate*="cloudStatus"]', ayb)
self.set_text_content('div[translate*="maintenance"]', "ARE")
self.set_text_content('div[translate*="advisory"]', "BELONG")
self.set_text_content('div[translate*="incident"]', "TO US")
self.set_text_content("h1", "Cloud Status - " + aybabtu)
self.highlight("nav div.ibcloud-header-contents", loops=3)
self.highlight('div[translate*="cloudStatus"]', loops=4)
self.highlight('div[translate*="maintenance"]', loops=1)
self.highlight('div[translate*="advisory"]', loops=1)
self.highlight('div[translate*="incident"]', loops=3)
self.highlight("h1", loops=9, scroll=False)
self.open("https://git-scm.com/")
self.set_text_content("span#tagline", aybabtu)
self.set_text_content("#nav-about h3", ayb)
self.set_text_content("#nav-documentation h3", abtu)
self.highlight("span#tagline", loops=8, scroll=False)
self.highlight("#nav-about h3", loops=5, scroll=False)
self.highlight("#nav-documentation h3", loops=6, scroll=False)
self.open("https://teamtreehouse.com/")
self.set_text_content("li.nav-item-free-trial", aybabtu)
self.set_text_content("h1", aybabtu)
self.set_text_content("h2", aybabtu)
self.set_text_content("p.homepage-signup-form-banner", aybabtu)
self.highlight("li.nav-item-free-trial", loops=6, scroll=False)
self.highlight("h1", loops=6, scroll=False)
self.highlight('p[class*="signup-form"]', loops=8, scroll=False)
self.open("https://pragprog.com/")
self.set_text_content("header p", aybabtu)
zoom_in = 'header p{zoom: 1.35;-moz-transform: scale(1.35);}'
self.add_css_style(zoom_in)
self.highlight("header p", loops=10, scroll=False)
self.open("https://seleniumbase.io/")
self.set_text_content("h1", aybabtu)
self.highlight("h1", loops=10, scroll=False)
| examples/hack_the_planet.py | 23,602 | Video Link: https://youtu.be/1s-Tj65AKZA
First make sure that seleniumbase 1.65.0 or newer is installed | 106 | en | 0.737358 |
import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import KFold
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
# import data and preprocess it
def preprocessing(file_name: str):
# data import
fish_df = pd.read_csv(file_name)
fish_df = pd.get_dummies(fish_df, columns=['Species'], prefix='Species')
return fish_df
# train-test split by a percentage.
# input: dataframe, label column name, split ration, and random state
# returns: x_train, x_test, y_train, y_test
def split_df(user_df: pd.DataFrame, label_name: str, split_ratio=0.8, random_value=42):
x_train = user_df.sample(frac=split_ratio, random_state=random_value)
x_test = user_df.drop(x_train.index)
return x_train.drop(label_name, axis=1), x_test.drop(label_name, axis=1), pd.DataFrame(
x_train[label_name]), pd.DataFrame(x_test[label_name])
# Create as arrays of trees in a given size and depth
def create_random_forest(forest_size: int, max_depth: int, random_state_local: int):
random_forest = []
for i in range(0, forest_size, 1):
random_forest.append(DecisionTreeRegressor(criterion='friedman_mse', max_depth=max_depth,
random_state=random_state_local))
return random_forest
# train trees in a forest by fitting each tree to the previous tree's error
# input: forest of trees, initial training guess, x and y databases, alpha coefficient.
# returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error)
def train_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,
alpha: float = 0.1):
# initial average weight and residuals to be used in the 1st tree
predictions = np.ones(len(y_df))*initial_average_weight
residuals = np.array(y_df['Weight'])-predictions
residuals_matrix = [residuals]
# calculates the first mse value
mse_list = [(np.square(residuals)).sum()/len(predictions)]
for tree in random_forest:
# train the current stump
tree.fit(x_df, residuals)
# predict results based on its training error
residuals = tree.predict(x_df)
# record residuals and calculate mse
residuals_matrix.append(residuals)
mse_list.append((np.square(residuals)).sum()/len(predictions))
# update predictions and calculate new residuals
predictions = predictions + alpha * residuals
residuals = np.array(y_df['Weight']) - predictions
return random_forest, predictions, residuals_matrix, mse_list
# predict test database by the trained random forest
# input: forest of trees, initial training guess, x and y databases.
# returns: mse_list of the forest (mean square error)
def test_forest(random_forest: list, initial_average_weight: float, x_df: pd.DataFrame, y_df: pd.DataFrame,
alpha: float = 0.1):
predictions = np.ones(len(y_df))*initial_average_weight
mse_list = [(np.square(np.array(y_df['Weight']) - predictions)).sum()/len(predictions)]
for tree in random_forest:
predictions = predictions + alpha * tree.predict(x_df)
mse_list.append((np.square(np.array(y_df['Weight']) - predictions)).sum()//len(predictions))
return predictions, mse_list
def main():
# data import and preprocessing
fish_df = preprocessing("Fish.csv")
# splitting of the data
x_train, x_test, y_train, y_test = split_df(fish_df, 'Weight', 0.8, 42)
# setting up a random forest:
#forest_size_list = [4, 5, 6, 7, 8] # variable calibrated by KFold train-validate
forest_size = 20
# max_depth_list = [1, 2, 3, 4, 5] # variable calibrated by KFold train-validate
max_depth = 3
random_state_local = 42
random_forest = create_random_forest(forest_size, max_depth, random_state_local)
#%% Train
#alpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] # variable calibrated by KFold train-validate
alpha = 0.5 # gradiant coefficient
kf = KFold(n_splits=2, shuffle=True, random_state=42)
for train_index, test_index in kf.split(x_train, y_train):
X_train, X_validate = x_train.iloc[train_index], x_train.iloc[test_index]
Y_train, Y_validate = y_train.iloc[train_index], y_train.iloc[test_index]
# first guess
initial_average_weight = np.average(Y_train['Weight'].tolist())
# train forest
random_forest, predictions_train, r_matrix, mse_list_train = train_forest(random_forest, initial_average_weight,
X_train, Y_train, alpha)
# validate
predictions_validate, mse_list_validate = test_forest(random_forest, initial_average_weight, X_validate,
Y_validate, alpha)
results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])
results['Train'] = mse_list_train
results['Validation'] = mse_list_validate
fig = px.scatter(results, x='tree_intervals', y=['Train', 'Validation'], size='tree_intervals')
fig.update_layout(xaxis_title="Amount of Intervals (num.)", yaxis_title="mean square error")
fig.show()
#%% Test
predictions_test, mse_list_test = test_forest(random_forest, initial_average_weight, x_test, y_test, alpha)
# %% plot success rate vs tree intervals
fig = make_subplots(rows=1, cols=3, subplot_titles=('Train', 'Validation', 'Test'),
x_title='Amount of Intervals (num.)', y_title='mean square error')
results = pd.DataFrame(data=np.arange(0, forest_size+1, 1), columns=['tree_intervals'])
results['Train'] = mse_list_train
fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Train'], name='Train'), row=1, col=1)
results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])
results['Validation'] = mse_list_validate
fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Validation'], name='Validation'), row=1, col=2)
results = pd.DataFrame(data=np.arange(0, forest_size + 1, 1), columns=['tree_intervals'])
results['Test'] = mse_list_test
fig.add_trace(go.Scatter(x=results['tree_intervals'], y=results['Test'], name='Test'), row=1, col=3)
fig.update_layout(title_text="Random Forest Gradient Boosting")
fig.show()
if __name__ == '__main__':
main()
| gradient-boosting/main.py | 6,766 | import data and preprocess it data import train-test split by a percentage. input: dataframe, label column name, split ration, and random state returns: x_train, x_test, y_train, y_test Create as arrays of trees in a given size and depth train trees in a forest by fitting each tree to the previous tree's error input: forest of trees, initial training guess, x and y databases, alpha coefficient. returns: trained forest, initial average value, r_matrix of solutions and mse_list of the results (mean square error) initial average weight and residuals to be used in the 1st tree calculates the first mse value train the current stump predict results based on its training error record residuals and calculate mse update predictions and calculate new residuals predict test database by the trained random forest input: forest of trees, initial training guess, x and y databases. returns: mse_list of the forest (mean square error) data import and preprocessing splitting of the data setting up a random forest:forest_size_list = [4, 5, 6, 7, 8] variable calibrated by KFold train-validate max_depth_list = [1, 2, 3, 4, 5] variable calibrated by KFold train-validate%% Trainalpha_list = [0.1, 0.3, 0.5, 0.7, 0.9] variable calibrated by KFold train-validate gradiant coefficient first guess train forest validate%% Test %% plot success rate vs tree intervals | 1,359 | en | 0.806059 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http:# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
DJANGO_APPS = [ "kafka" ]
REQUIRES_HADOOP = False
MENU_INDEX = 100
NICE_NAME = "Kafka"
ICON = "kafka/art/icon_kafka_24.png"
IS_URL_NAMESPACED = True
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
METRICS_INI = os.path.join(PROJECT_ROOT, 'metrics.ini')
| kafka/src/kafka/settings.py | 1,057 | Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http: www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. | 748 | en | 0.886713 |
# -*- coding: utf-8 -*-
SUCCESSFUL_TERMINAL_STATUSES = ('complete', )
UNSUCCESSFUL_TERMINAL_STATUSES = ('cancelled', 'unsuccessful')
CONTRACT_REQUIRED_FIELDS = [
'awardID', 'contractID', 'items', 'suppliers',
'value', 'dateSigned',
#'documents'
]
CONTRACT_NOT_REQUIRED_FIELDS = [
'contractNumber', 'title', 'title_en', 'title_ru',
'description', 'description_en', 'description_ru'
]
| openregistry/convoy/loki/constants.py | 403 | -*- coding: utf-8 -*-'documents' | 32 | en | 0.907091 |
"""
Plugin for Czech TV (Ceska televize).
Following channels are working:
* CT1 - https://www.ceskatelevize.cz/porady/ct1/
* CT2 - https://www.ceskatelevize.cz/porady/ct2/
* CT24 - https://ct24.ceskatelevize.cz/#live
* CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/
* CT Decko - https://decko.ceskatelevize.cz/zive
* CT Art - https://www.ceskatelevize.cz/porady/art/
Additionally, videos from iVysilani archive should work as well.
"""
import json
import logging
import re
from html import unescape as html_unescape
from urllib.parse import quote
from streamlink.plugin import Plugin, PluginError, pluginmatcher
from streamlink.plugin.api import useragents, validate
from streamlink.stream import DASHStream, HLSStream
log = logging.getLogger(__name__)
@pluginmatcher(re.compile(
r'https?://([\w-]+\.)*ceskatelevize\.cz'
))
class Ceskatelevize(Plugin):
ajax_url = 'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist'
_player_re = re.compile(
r'ivysilani/embed/iFramePlayer[^"]+'
)
_hash_re = re.compile(
r'hash:"([0-9a-z]+)"'
)
_playlist_info_re = re.compile(
r'{"type":"([a-z]+)","id":"([0-9]+)"'
)
_playlist_url_schema = validate.Schema({
validate.optional("streamingProtocol"): validate.text,
"url": validate.any(
validate.url(),
"Error",
"error_region"
)
})
_playlist_schema = validate.Schema({
"playlist": [{
validate.optional("type"): validate.text,
"streamUrls": {
"main": validate.url(),
}
}]
})
def _get_streams(self):
self.session.http.headers.update({'User-Agent': useragents.IPAD})
self.session.http.verify = False
log.warning('SSL certificate verification is disabled.')
# fetch requested url and find playlist info
response = self.session.http.get(self.url)
info = self._find_playlist_info(response)
if not info:
# do next try with new API
def _fallback_api(*args, **kwargs):
self.api2 = CeskatelevizeAPI2(self.session, self.url, *args, **kwargs)
return self.api2._get_streams()
# playlist info not found, let's try to find player url
player_url = self._find_player_url(response)
if not player_url:
log.debug('Cannot find playlist info or player url, do next try with new API')
return _fallback_api(res=response)
# get player url and try to find playlist info in it
response = self.session.http.get(player_url)
info = self._find_playlist_info(response)
if not info:
log.debug('Cannot find playlist info in the player url, do next try with new API')
return _fallback_api()
log.trace('{0!r}'.format(info))
data = {
'playlist[0][type]': info['type'],
'playlist[0][id]': info['id'],
'requestUrl': '/ivysilani/embed/iFramePlayer.php',
'requestSource': 'iVysilani',
'type': 'html'
}
headers = {
'x-addr': '127.0.0.1',
}
# fetch playlist url
response = self.session.http.post(
self.ajax_url,
data=data,
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_url_schema)
log.trace('{0!r}'.format(json_data))
if json_data['url'] in ['Error', 'error_region']:
log.error('This stream is not available')
return
# fetch playlist
response = self.session.http.post(json_data['url'])
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['playlist'][0]['streamUrls']['main']
return HLSStream.parse_variant_playlist(self.session, playlist)
@classmethod
def _find_playlist_info(cls, response):
"""
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
"""
values = {}
matches = cls._playlist_info_re.search(response.text)
if matches:
values['type'] = matches.group(1)
values['id'] = matches.group(2)
return values
@classmethod
def _find_player_url(cls, response):
"""
Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
"""
url = ''
matches = cls._player_re.search(response.text)
if matches:
tmp_url = matches.group(0).replace('&', '&')
if 'hash' not in tmp_url:
# there's no hash in the URL, try to find it
matches = cls._hash_re.search(response.text)
if matches:
url = tmp_url + '&hash=' + matches.group(1)
else:
url = tmp_url
return 'http://ceskatelevize.cz/' + url
class CeskatelevizeAPI2:
_player_api = 'https://playlist.ceskatelevize.cz/'
_url_re = re.compile(r'http(s)?://([^.]*.)?ceskatelevize.cz')
_playlist_info_re = re.compile(r'{\s*"type":\s*"([a-z]+)",\s*"id":\s*"(\w+)"')
_playlist_schema = validate.Schema({
"CODE": validate.contains("OK"),
"RESULT": {
"playlist": [{
"streamUrls": {
"main": validate.url(),
}
}]
}
})
_ctcomp_re = re.compile(r'data-ctcomp="Video"\sdata-video-id="(?P<val1>[^"]*)"\sdata-ctcomp-data="(?P<val2>[^"]+)">')
_ctcomp_schema = validate.Schema(
validate.text,
validate.transform(_ctcomp_re.findall),
validate.transform(lambda vl: [{"video-id": v[0], "ctcomp-data": json.loads(html_unescape(v[1]))} for v in vl])
)
_playlist_info_schema = validate.Schema({
"type": validate.text,
"id": validate.any(validate.text, int),
"key": validate.text,
"date": validate.text,
"requestSource": validate.text,
"drm": int,
validate.optional("canBePlay"): int,
validate.optional("assetId"): validate.text,
"quality": validate.text,
validate.optional("region"): int
})
def __init__(self, session, url, res=None):
self.session = session
self.url = url
self.response = res
def _get_streams(self):
if self.response is None:
infos = self.session.http.get(self.url, schema=self._ctcomp_schema)
else:
infos = self.session.http.json(self.response, schema=self._ctcomp_schema)
if not infos:
# playlist infos not found
raise PluginError('Cannot find playlist infos!')
vod_prio = len(infos) == 2
for info in infos:
try:
pl = info['ctcomp-data']['source']['playlist'][0]
except KeyError:
raise PluginError('Cannot find playlist info!')
pl = self._playlist_info_schema.validate(pl)
if vod_prio and pl['type'] != 'VOD':
continue
log.trace('{0!r}'.format(info))
if pl['type'] == 'LIVE':
data = {
"contentType": "live",
"items": [{
"id": pl["id"],
"assetId": pl["assetId"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"quality": pl["quality"],
}]
}
elif pl['type'] == 'VOD':
data = {
"contentType": "vod",
"items": [{
"id": pl["id"],
"key": pl["key"],
"playerType": "dash",
"date": pl["date"],
"requestSource": pl["requestSource"],
"drm": pl["drm"],
"canBePlay": pl["canBePlay"],
"quality": pl["quality"],
"region": pl["region"]
}]
}
headers = {
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
}
data = json.dumps(data)
response = self.session.http.post(
self._player_api,
data="data={}".format(quote(data)),
headers=headers
)
json_data = self.session.http.json(response, schema=self._playlist_schema)
log.trace('{0!r}'.format(json_data))
playlist = json_data['RESULT']['playlist'][0]['streamUrls']['main']
yield from DASHStream.parse_manifest(self.session, playlist).items()
__plugin__ = Ceskatelevize
| src/streamlink/plugins/ceskatelevize.py | 9,161 | Finds embedded player url in HTTP response.
:param response: Response object.
:returns: Player url (str).
Finds playlist info (type, id) in HTTP response.
:param response: Response object.
:returns: Dictionary with type and id.
Plugin for Czech TV (Ceska televize).
Following channels are working:
* CT1 - https://www.ceskatelevize.cz/porady/ct1/
* CT2 - https://www.ceskatelevize.cz/porady/ct2/
* CT24 - https://ct24.ceskatelevize.cz/#live
* CT sport - https://www.ceskatelevize.cz/sport/zive-vysilani/
* CT Decko - https://decko.ceskatelevize.cz/zive
* CT Art - https://www.ceskatelevize.cz/porady/art/
Additionally, videos from iVysilani archive should work as well.
fetch requested url and find playlist info do next try with new API playlist info not found, let's try to find player url get player url and try to find playlist info in it fetch playlist url fetch playlist there's no hash in the URL, try to find it playlist infos not found | 974 | en | 0.759209 |
#basic example of dict synat
my_dict = {'key1':'value1','key2':'value2','key3':'value3'}
print(my_dict)
print(my_dict['key3'])
#xmpl 2
prices = {'apple':100,'banana':60,'gavava':90,'rice':50}
print(prices['rice'])
| python_basics/Dictionary/dict.py | 215 | basic example of dict synatxmpl 2 | 33 | en | 0.414212 |
import numpy as np
from time import sleep
import struct
import matplotlib.pyplot as plt
# input raw samples from MCU
# in_data = 'out/data_raw.txt'
in_data = 'out/8bit.txt'
fs = 5000
in_bits = 8
# load file
raw = np.loadtxt(in_data)
# Stats
print("Max=%d Min=%d Mean=%d swing=%d %.1fbits" % \
(np.max(raw), np.min(raw), np.mean(raw),
np.max(raw) - np.min(raw), np.log2(np.max(raw) - np.min(raw))))
# generate different bit audio
data_depth = {}
print(raw)
data_depth['16bit'] = 2**(in_bits-16)*(raw / (2**(in_bits-16))).astype('int')
print(data_depth['16bit'])
data_depth['10bit'] = 2**(in_bits-10)*(raw / (2**(in_bits-10))).astype('int')
data_depth['8bit'] = 2**(in_bits-8)*(raw / (2**(in_bits-8))).astype('int')
data_depth['7bit'] = 2**(in_bits-7)*(raw / (2**(in_bits-7))).astype('int')
data_depth['6bit'] = 2**(in_bits-6)*(raw / (2**(in_bits-6))).astype('int')
data_depth['2bit'] = 2**(in_bits-2)*(raw / (2**(in_bits-2))).astype('int')
# normalize and zero mean all
for key in data_depth:
data_depth[key] = data_depth[key] - np.mean(data_depth[key])
data_depth[key] = data_depth[key] / np.max(np.abs(data_depth[key]))
# write audio files
from scipy.io.wavfile import write
for key in data_depth:
write('out/test'+key+'.wav', fs, data_depth[key])
# plot some
t = np.arange(0, len(raw)/fs, 1/fs)
fig, axs = plt.subplots(1, 1)
axs.step(t, data_depth['16bit'], label='16bit')
axs.step(t, data_depth['8bit'], label='8bit')
axs.step(t, data_depth['7bit'], label='7bit')
axs.step(t, data_depth['6bit'], label='6bit')
axs.step(t, data_depth['2bit'], label='2bit')
# axs.set_xlim(0, 6e-3)
# axs.set_ylim(-1, 1)
axs.set_xlabel('time [s]')
axs.set_ylabel('mic data')
axs.grid(True)
axs.legend()
fig.tight_layout()
plt.show()
| audio/edison/audio/bit_depth_analyze.py | 1,741 | input raw samples from MCU in_data = 'out/data_raw.txt' load file Stats generate different bit audio normalize and zero mean all write audio files plot some axs.set_xlim(0, 6e-3) axs.set_ylim(-1, 1) | 199 | en | 0.498117 |
#!/home/pi/Documents/Codigos/API_Estacao/bin/python3
"""Simple FTDI EEPROM configurator.
"""
# Copyright (c) 2019-2020, Emmanuel Blot <emmanuel.blot@free.fr>
# All rights reserved.
#
# SPDX-License-Identifier: BSD-3-Clause
from argparse import ArgumentParser, FileType
from io import StringIO
from logging import Formatter, StreamHandler, DEBUG, ERROR
from sys import modules, stderr
from textwrap import fill
from traceback import format_exc
from pyftdi import FtdiLogger
from pyftdi.eeprom import FtdiEeprom
from pyftdi.ftdi import Ftdi
from pyftdi.misc import add_custom_devices, hexdump
#pylint: disable-msg=too-many-locals
#pylint: disable-msg=too-many-branches
#pylint: disable-msg=too-many-statements
def main():
"""Main routine"""
debug = False
try:
argparser = ArgumentParser(description=modules[__name__].__doc__)
argparser.add_argument('device', nargs='?', default='ftdi:///?',
help='serial port device name')
argparser.add_argument('-x', '--hexdump', action='store_true',
help='dump EEPROM content as ASCII')
argparser.add_argument('-X', '--hexblock', type=int,
help='dump EEPROM as indented hexa blocks')
argparser.add_argument('-i', '--input', type=FileType('rt'),
help='input ini file to load EEPROM content')
argparser.add_argument('-l', '--load', default='all',
choices=('all', 'raw', 'values'),
help='section(s) to load from input file')
argparser.add_argument('-o', '--output', type=FileType('wt'),
help='output ini file to save EEPROM content')
argparser.add_argument('-s', '--serial-number',
help='set serial number')
argparser.add_argument('-m', '--manufacturer',
help='set manufacturer name')
argparser.add_argument('-p', '--product',
help='set product name')
argparser.add_argument('-c', '--config', action='append',
help='change/configure a property '
'as key=value pair')
argparser.add_argument('-e', '--erase', action='store_true',
help='erase the whole EEPROM content')
argparser.add_argument('-u', '--update', action='store_true',
help='perform actual update, use w/ care')
argparser.add_argument('-P', '--vidpid', action='append',
help='specify a custom VID:PID device ID, '
'may be repeated')
argparser.add_argument('-V', '--virtual', type=FileType('r'),
help='use a virtual device, specified as YaML')
argparser.add_argument('-v', '--verbose', action='count', default=0,
help='increase verbosity')
argparser.add_argument('-d', '--debug', action='store_true',
help='enable debug mode')
args = argparser.parse_args()
debug = args.debug
if not args.device:
argparser.error('Serial device not specified')
loglevel = max(DEBUG, ERROR - (10 * args.verbose))
loglevel = min(ERROR, loglevel)
if debug:
formatter = Formatter('%(asctime)s.%(msecs)03d %(name)-20s '
'%(message)s', '%H:%M:%S')
else:
formatter = Formatter('%(message)s')
FtdiLogger.set_formatter(formatter)
FtdiLogger.set_level(loglevel)
FtdiLogger.log.addHandler(StreamHandler(stderr))
if args.virtual:
#pylint: disable-msg=import-outside-toplevel
from pyftdi.usbtools import UsbTools
# Force PyUSB to use PyFtdi test framework for USB backends
UsbTools.BACKENDS = ('pyftdi.tests.backend.usbvirt', )
# Ensure the virtual backend can be found and is loaded
backend = UsbTools.find_backend()
loader = backend.create_loader()()
loader.load(args.virtual)
try:
add_custom_devices(Ftdi, args.vidpid, force_hex=True)
except ValueError as exc:
argparser.error(str(exc))
eeprom = FtdiEeprom()
eeprom.open(args.device)
if args.erase:
eeprom.erase()
if args.input:
eeprom.load_config(args.input, args.load)
if args.serial_number:
eeprom.set_serial_number(args.serial_number)
if args.manufacturer:
eeprom.set_manufacturer_name(args.manufacturer)
if args.product:
eeprom.set_product_name(args.product)
for conf in args.config or []:
if conf == '?':
helpstr = ', '.join(sorted(eeprom.properties))
print(fill(helpstr, initial_indent=' ',
subsequent_indent=' '))
exit(1)
for sep in ':=':
if sep in conf:
name, value = conf.split(sep, 1)
if not value:
argparser.error('Configuration %s without value' %
conf)
helpio = StringIO()
eeprom.set_property(name, value, helpio)
helpstr = helpio.getvalue()
if helpstr:
print(fill(helpstr, initial_indent=' ',
subsequent_indent=' '))
exit(1)
break
else:
argparser.error('Missing name:value separator in %s' % conf)
if args.hexdump:
print(hexdump(eeprom.data))
if args.hexblock is not None:
indent = ' ' * args.hexblock
for pos in range(0, len(eeprom.data), 16):
hexa = ' '.join(['%02x' % x for x in eeprom.data[pos:pos+16]])
print(indent, hexa, sep='')
if args.update:
if eeprom.commit(False):
eeprom.reset_device()
if args.verbose > 0:
eeprom.dump_config()
if args.output:
eeprom.save_config(args.output)
except (ImportError, IOError, NotImplementedError, ValueError) as exc:
print('\nError: %s' % exc, file=stderr)
if debug:
print(format_exc(chain=False), file=stderr)
exit(1)
except KeyboardInterrupt:
exit(2)
if __name__ == '__main__':
main()
| bin/ftconf.py | 6,686 | Main routine
Simple FTDI EEPROM configurator.
!/home/pi/Documents/Codigos/API_Estacao/bin/python3 Copyright (c) 2019-2020, Emmanuel Blot <emmanuel.blot@free.fr> All rights reserved. SPDX-License-Identifier: BSD-3-Clausepylint: disable-msg=too-many-localspylint: disable-msg=too-many-branchespylint: disable-msg=too-many-statementspylint: disable-msg=import-outside-toplevel Force PyUSB to use PyFtdi test framework for USB backends Ensure the virtual backend can be found and is loaded | 486 | en | 0.553341 |
#!/usr/bin/env python3
######################################################
## Calibrating the extrinsics between T265 and D4xx ##
## Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355
## with changes and modifications.
######################################################
######################################################
#
# General steps:
# 1. Mount the two cameras rigidly
# 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection
# - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf
# - Measure the actual printed grid size of the squares and modify size.
# 3. Modify the script:
# - Change grid_H, grid_W and size according to the actual printed checkerboard.
# - Change the path and file_name if necessary (ex: use this script as standalone).
# 4. Run the script online:
# - python calibrate_extrinsics.py
# 5. The results include intrinsics (save file) and extrinsics (terminal output)
#
######################################################
from __future__ import print_function
import pyrealsense2 as rs
import numpy as np
np.set_printoptions(suppress=True,precision=5)
import cv2
assert cv2.__version__[0] >= '3', 'The fisheye module requires opencv version >= 3.0.0'
import os
import shutil
import json
import argparse
import glob
from collections import OrderedDict
parser = argparse.ArgumentParser()
parser.add_argument('--SN_T265', help='serial number of T265')
parser.add_argument('--SN_D4xx', help='serial number of D4xx')
parser.add_argument('--path', default="calibration_results", help='image path')
parser.add_argument('--file_name', default="/intrinsics.json", help='intrinsics calibration file name')
parser.add_argument('--save_tmp', default=False, help='save the temporary files of this program, useful for debugging purposes')
parser.add_argument('--grid_H', default=8, help='grid height (inner corners)')
parser.add_argument('--grid_W', default=6, help='grid width (inner corners)')
parser.add_argument('--size', default=0.0282, help='grid side length')
parser.add_argument('--calibrate', default=False, help='run calibration (only)', action='store_true')
parser.add_argument('--visualize', default=True, help='with GUI', action='store_true')
args = parser.parse_args()
CHECKERBOARD = (args.grid_H, args.grid_W)
SIDE_LENGTH = args.size
tmp_folder = args.path + "/tmp"
def add_camera_calibration(intrinsics, streams = None):
cam = {}
cam['center_px'] = [intrinsics.ppx, intrinsics.ppy]
cam['focal_length_px'] = [intrinsics.fx, intrinsics.fy]
cam['distortion'] = {}
cam['distortion']['type'] = 'kannalabrandt4'
cam['distortion']['k'] = intrinsics.coeffs[:4]
if streams:
ext = streams["cam1"].get_extrinsics_to(streams["pose"]) # w.r.t.
#print(ext)
cam["extrinsics"] = {}
cam["extrinsics"]["T"] = ext.translation
#print(ext.rotation)
cam["extrinsics"]["R"] = ext.rotation
return cam
def save_intrinsics(directory, file_name, intrinsics, streams):
D = OrderedDict() # in order (cam1,cam2)
D['cameras'] = []
D['cameras'].append(add_camera_calibration(intrinsics["cam1"], streams))
D['cameras'].append(add_camera_calibration(intrinsics["cam2"]))
if not os.path.exists(directory):
os.mkdir(directory)
with open(directory + file_name, 'w') as f:
json.dump(D, f, indent=4)
print("Intrinsics output written to " + directory + file_name)
def read_calibration(cam, extrinsics = False):
#print("read_calibration")
# intrinsics
K = np.array([[cam['focal_length_px'][0], 0, cam['center_px'][0]],
[ 0, cam['focal_length_px'][1], cam['center_px'][1]],
[ 0, 0, 1]])
D = np.array(cam['distortion']['k'])
if extrinsics:
H = np.eye(4)
H[:3,:3] = np.reshape(cam["extrinsics"]["R"],(3,3))
H[:3,3] = cam["extrinsics"]["T"]
#print(H)
return (K, D, H)
return (K, D)
def load_calibration(directory, file_name):
with open(directory + file_name, 'r') as f:
D = json.load(f)
(K1, D1, H1) = read_calibration(D['cameras'][0], True)
(K2, D2) = read_calibration(D['cameras'][1])
return (K1, D1, K2, D2, H1)
def find_realsense_serial_no(type):
camera_name = ['Intel RealSense T265', 'Intel RealSense D435']
# Get realsense pipeline handle
pipe = rs.pipeline()
# Find the T265
devices = rs.context().devices
for i in range(len(devices)):
if (devices[i].get_info(rs.camera_info.name) == camera_name[type]):
print('Found one connected ' + camera_name[type] + ' with serial no:', devices[i].get_info(rs.camera_info.serial_number))
return devices[i].get_info(rs.camera_info.serial_number)
print('No ' + camera_name[type] + ' found, please check connection or input serial manually')
return None
if not args.calibrate:
# Obtain the serial number of the cameras, either automatically or from user's input
print("Trying to connect devices...")
serial_t265 = None
serial_d4xx = None
if (not args.SN_T265):
serial_t265 = find_realsense_serial_no(0)
else:
serial_t265 = args.SN_T265
if (not args.SN_D4xx):
serial_d4xx = find_realsense_serial_no(1)
else:
serial_d4xx = args.SN_D4xx
if (not serial_t265) or (not serial_d4xx):
print("Specify serial numbers --SN_T265 and --SN_D4xx (for online calibration, or --calibrate for prerecorded images with --path path to folder)")
exit()
# cam 1
pipe1 = rs.pipeline()
cfg1 = rs.config()
cfg1.enable_device(serial_t265)
pipe1.start(cfg1)
# cam 2
pipe2 = rs.pipeline()
cfg2 = rs.config()
cfg2.enable_device(serial_d4xx)
cfg2.enable_all_streams()
pipe2_profile = pipe2.start(cfg2)
sensor_depth = pipe2_profile.get_device().first_depth_sensor()
sensor_depth.set_option(rs.option.emitter_enabled, 0) # turn OFF projector
try:
# Retreive the stream and intrinsic properties for both cameras
profile1 = pipe1.get_active_profile()
profile2 = pipe2.get_active_profile()
# future improvements: make both stream configureable
streams = {"cam1" : profile1.get_stream(rs.stream.fisheye, 1).as_video_stream_profile(),
"pose" : profile1.get_stream(rs.stream.pose),
"cam2" : profile2.get_stream(rs.stream.infrared, 1).as_video_stream_profile()} # IR1
#"cam2" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} # test
intrinsics = {"cam1" : streams["cam1"].get_intrinsics(),
"cam2" : streams["cam2"].get_intrinsics()}
#print("cam1:", intrinsics["cam1"])
#print("cam2:", intrinsics["right"])
save_intrinsics(args.path, args.file_name, intrinsics, streams)
# capture images
i = 0
print("Press 's' to save image.\nPress 'q' or 'c' to quit recording and start the calibration.")
while True:
# cam 1
frames1 = pipe1.wait_for_frames()
f_fe1 = frames1.get_fisheye_frame(1) # left fisheye
f_fe2 = frames1.get_fisheye_frame(2) # right fisheye
if not f_fe1 or not f_fe2:
continue
img_fe1 = np.asanyarray(f_fe1.get_data())
img_fe2 = np.asanyarray(f_fe2.get_data())
# cam 2
frames2 = pipe2.wait_for_frames()
f_ir1 = frames2.get_infrared_frame(1) # left infrared
f_ir2 = frames2.get_infrared_frame(2) # right infrared
f_color = frames2.get_color_frame()
if not f_ir1 or not f_ir2 or not f_color:
continue
img_ir1 = np.asanyarray(f_ir1.get_data())
img_ir2 = np.asanyarray(f_ir2.get_data())
img_color = np.asanyarray(f_color.get_data())
# TODO: configure streams
img1 = img_fe1
img2 = img_ir1
# display
cv2.imshow('cam1', img1)
cv2.imshow('cam2', img2)
# save or quit
k = cv2.waitKey(1)
if k == ord('s'):
print("'s' key pressed. Saving temp images..")
if not os.path.exists(tmp_folder):
os.mkdir(tmp_folder)
cv2.imwrite(tmp_folder + '/fe1_' + str(i) + '.png', img_fe1)
cv2.imwrite(tmp_folder + '/fe2_' + str(i) + '.png', img_fe2)
cv2.imwrite(tmp_folder + '/ir1_' + str(i) + '.png', img_ir1)
# cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2)
cv2.imwrite(tmp_folder + '/color_' + str(i) + '.png', img_color)
print("Saved temp images in temp folder " + tmp_folder)
i = i+1
if k == ord('q') or k == ord('c'):
break
finally:
pipe1.stop()
pipe2.stop()
# calibrate
print("Calibrate extrinsics now...")
# arrays to store detections
P3 = [] # w.r.t. target frame
P2_1 = [] # in image #1
P2_2 = [] # in image #2
# TODO: configure streams
images1 = glob.glob(tmp_folder + '/fe1_*')
#images2 = glob.glob(tmp_folder + '/fe2_*') # test
images2 = glob.glob(tmp_folder + '/ir1_*')
images1.sort()
images2.sort()
#print(images1)
#print(images2)
if len(images1) == len(images2) == 0:
print("No images found. Exit.")
exit(0)
try:
for i, fname in enumerate(images1):
img1 = cv2.imread(images1[i])
img2 = cv2.imread(images2[i])
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# detect
ret1, corners1 = cv2.findChessboardCorners(gray1, CHECKERBOARD, None)
ret2, corners2 = cv2.findChessboardCorners(gray2, CHECKERBOARD, None)
if ret1 and ret2:
# subpixel refinement
criteria_sub = (cv2.TermCriteria_COUNT + cv2.TERM_CRITERIA_EPS, 10, 1e-1)
rt = cv2.cornerSubPix(gray1, corners1, (7, 7), (-1, -1), criteria_sub)
P2_1.append(corners1)
if args.visualize:
ret1 = cv2.drawChessboardCorners(img1, CHECKERBOARD, corners1, ret1)
cv2.imshow("img1", img1)
cv2.waitKey(200)
rt = cv2.cornerSubPix(gray2, corners2, (7, 7), (-1, -1), criteria_sub)
P2_2.append(corners2)
if args.visualize:
ret2 = cv2.drawChessboardCorners(img2, CHECKERBOARD, corners2, ret2)
cv2.imshow("img2", img2)
cv2.waitKey(200)
except cv2.error as e:
print("Error: ", e)
# calibration (stereo extrinsics)
R = np.zeros((1, 1, 3), dtype=np.float64)
T = np.zeros((1, 1, 3), dtype=np.float64)
N = len(P2_1) # number of successful detections
p3d = np.zeros( (CHECKERBOARD[0]*CHECKERBOARD[1], 1, 3) , np.float64)
p3d[:,0, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
# fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)!
P3 = np.array([p3d]*N, dtype=np.float64)
P2_1 = np.asarray(P2_1, dtype=np.float64)
P2_2 = np.asarray(P2_2, dtype=np.float64)
P3 = np.reshape(P3, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 3))*SIDE_LENGTH
P2_1 = np.reshape(P2_1, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))
P2_2 = np.reshape(P2_2, (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2))
(K1, D1, K2, D2, H1) = load_calibration(args.path, args.file_name)
try:
(rms, _, _, _, _, R, T) = \
cv2.fisheye.stereoCalibrate(
P3,
P2_1,
P2_2,
K1,
D1,
K2,
D2,
(0,0), # only used to initialize intrinsics when no intrinsics provided
R,
T,
cv2.fisheye.CALIB_FIX_INTRINSIC # extrinsics only
)
except cv2.error as e:
print("Error: ", e)
print("Please make sure that the checkerboard exists in the images. See tmp images in " + tmp_folder + " to debug.")
exit()
print("RMS:", rms)
H_cam2_cam1 = np.eye(4)
H_cam2_cam1[:3,:3] = R
H_cam2_cam1[:3,3] = T.flatten()
# w.r.t. pose
H_ir1_fe1 = H_cam2_cam1 # TODO: configure
H_pose_fe1 = H1
H_pose_ir1 = H_pose_fe1.dot( np.linalg.inv(H_ir1_fe1) )
print("H (ir1 wrt pose) =", H_pose_ir1)
fn = args.path + "/H.txt"
np.savetxt(fn, H_pose_ir1, fmt='%.9f')
print("Extrinsic output written to", fn)
if not args.save_tmp:
if os.path.isdir(tmp_folder):
shutil.rmtree(tmp_folder, ignore_errors=True)
print("Temporary files deleted. If you wish to keep the tmp files, use --save_tmp True.") | robot/src/vision_to_mavros/scripts/calibrate_extrinsics.py | 12,847 | !/usr/bin/env python3 Calibrating the extrinsics between T265 and D4xx Based on this example: https://github.com/IntelRealSense/librealsense/pull/4355 with changes and modifications. General steps: 1. Mount the two cameras rigidly 2. Print any one of the checkerboards from: https://markhedleyjones.com/projects/calibration-checkerboard-collection - The default settings in this script are for: https://markhedleyjones.com/storage/checkerboards/Checkerboard-A4-25mm-8x6.pdf - Measure the actual printed grid size of the squares and modify size. 3. Modify the script: - Change grid_H, grid_W and size according to the actual printed checkerboard. - Change the path and file_name if necessary (ex: use this script as standalone). 4. Run the script online: - python calibrate_extrinsics.py 5. The results include intrinsics (save file) and extrinsics (terminal output) w.r.t.print(ext)print(ext.rotation) in order (cam1,cam2)print("read_calibration") intrinsicsprint(H) Get realsense pipeline handle Find the T265 Obtain the serial number of the cameras, either automatically or from user's input cam 1 cam 2 turn OFF projector Retreive the stream and intrinsic properties for both cameras future improvements: make both stream configureable IR1"cam2" : profile1.get_stream(rs.stream.fisheye, 2).as_video_stream_profile()} testprint("cam1:", intrinsics["cam1"])print("cam2:", intrinsics["right"]) capture images cam 1 left fisheye right fisheye cam 2 left infrared right infrared TODO: configure streams display save or quit cv2.imwrite(tmp_folder+ '/ir2_' + str(i) + '.png', img_ir2) calibrate arrays to store detections w.r.t. target frame in image 1 in image 2 TODO: configure streamsimages2 = glob.glob(tmp_folder + '/fe2_*') testprint(images1)print(images2) detect subpixel refinement calibration (stereo extrinsics) number of successful detections fisheye.stereoCalibrate needs different data structures/dimensions than cv2.stereoCalibrate, i.e. (N, 1, CHECKERBOARD[0]*CHECKERBOARD[1], 2/3)! only used to initialize intrinsics when no intrinsics provided extrinsics only w.r.t. pose TODO: configure | 2,113 | en | 0.646712 |
"""Implementation of Rule L044."""
from typing import Optional
from sqlfluff.core.rules.analysis.select_crawler import Query, SelectCrawler
from sqlfluff.core.parser import BaseSegment
from sqlfluff.core.rules.base import BaseRule, LintResult, RuleContext
from sqlfluff.core.rules.doc_decorators import document_groups
from sqlfluff.core.rules.functional import sp
class RuleFailure(Exception):
"""Exception class for reporting lint failure inside deeply nested code."""
def __init__(self, anchor: BaseSegment):
self.anchor: BaseSegment = anchor
@document_groups
class Rule_L044(BaseRule):
"""Query produces an unknown number of result columns.
**Anti-pattern**
Querying all columns using ``*`` produces a query result where the number
or ordering of columns changes if the upstream table's schema changes.
This should generally be avoided because it can cause slow performance,
cause important schema changes to go undetected, or break production code.
For example:
* If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``,
and ``c``, the actual columns returned will be wrong/different if columns
are added to or deleted from the input table.
* ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number
of columns (and compatible types).
* ``JOIN`` queries may break due to new column name conflicts, e.g. the
query references a column ``c`` which initially existed in only one input
table but a column of the same name is added to another table.
* ``CREATE TABLE (<<column schema>>) AS SELECT *``
.. code-block:: sql
WITH cte AS (
SELECT * FROM foo
)
SELECT * FROM cte
UNION
SELECT a, b FROM t
**Best practice**
Somewhere along the "path" to the source data, specify columns explicitly.
.. code-block:: sql
WITH cte AS (
SELECT * FROM foo
)
SELECT a, b FROM cte
UNION
SELECT a, b FROM t
"""
groups = ("all",)
_works_on_unparsable = False
def _handle_alias(self, selectable, alias_info, query):
select_info_target = SelectCrawler.get(
query, alias_info.from_expression_element
)[0]
if isinstance(select_info_target, str):
# It's an alias to an external table whose
# number of columns could vary without our
# knowledge. Thus, warn.
self.logger.debug(
f"Query target {select_info_target} is external. Generating warning."
)
raise RuleFailure(selectable.selectable)
else:
# Handle nested SELECT.
self._analyze_result_columns(select_info_target)
def _analyze_result_columns(self, query: Query):
"""Given info on a list of SELECTs, determine whether to warn."""
# Recursively walk from the given query (select_info_list) to any
# wildcard columns in the select targets. If every wildcard evdentually
# resolves to a query without wildcards, all is well. Otherwise, warn.
if not query.selectables:
return # pragma: no cover
for selectable in query.selectables:
self.logger.debug(f"Analyzing query: {selectable.selectable.raw}")
for wildcard in selectable.get_wildcard_info():
if wildcard.tables:
for wildcard_table in wildcard.tables:
self.logger.debug(
f"Wildcard: {wildcard.segment.raw} has target "
"{wildcard_table}"
)
# Is it an alias?
alias_info = selectable.find_alias(wildcard_table)
if alias_info:
# Found the alias matching the wildcard. Recurse,
# analyzing the query associated with that alias.
self._handle_alias(selectable, alias_info, query)
else:
# Not an alias. Is it a CTE?
cte = query.lookup_cte(wildcard_table)
if cte:
# Wildcard refers to a CTE. Analyze it.
self._analyze_result_columns(cte)
else:
# Not CTE, not table alias. Presumably an
# external table. Warn.
self.logger.debug(
f"Query target {wildcard_table} is external. "
"Generating warning."
)
raise RuleFailure(selectable.selectable)
else:
# No table was specified with the wildcard. Assume we're
# querying from a nested select in FROM.
query_list = SelectCrawler.get(
query, query.selectables[0].selectable
)
for o in query_list:
if isinstance(o, Query):
self._analyze_result_columns(o)
return
self.logger.debug(
f'Query target "{query.selectables[0].selectable.raw}" has no '
"targets. Generating warning."
)
raise RuleFailure(query.selectables[0].selectable)
def _eval(self, context: RuleContext) -> Optional[LintResult]:
"""Outermost query should produce known number of columns."""
start_types = ["select_statement", "set_expression", "with_compound_statement"]
if context.segment.is_type(
*start_types
) and not context.functional.parent_stack.any(sp.is_type(*start_types)):
crawler = SelectCrawler(context.segment, context.dialect)
# Begin analysis at the outer query.
if crawler.query_tree:
try:
return self._analyze_result_columns(crawler.query_tree)
except RuleFailure as e:
return LintResult(anchor=e.anchor)
return None
| src/sqlfluff/rules/L044.py | 6,370 | Exception class for reporting lint failure inside deeply nested code.
Query produces an unknown number of result columns.
**Anti-pattern**
Querying all columns using ``*`` produces a query result where the number
or ordering of columns changes if the upstream table's schema changes.
This should generally be avoided because it can cause slow performance,
cause important schema changes to go undetected, or break production code.
For example:
* If a query does ``SELECT t.*`` and is expected to return columns ``a``, ``b``,
and ``c``, the actual columns returned will be wrong/different if columns
are added to or deleted from the input table.
* ``UNION`` and ``DIFFERENCE`` clauses require the inputs have the same number
of columns (and compatible types).
* ``JOIN`` queries may break due to new column name conflicts, e.g. the
query references a column ``c`` which initially existed in only one input
table but a column of the same name is added to another table.
* ``CREATE TABLE (<<column schema>>) AS SELECT *``
.. code-block:: sql
WITH cte AS (
SELECT * FROM foo
)
SELECT * FROM cte
UNION
SELECT a, b FROM t
**Best practice**
Somewhere along the "path" to the source data, specify columns explicitly.
.. code-block:: sql
WITH cte AS (
SELECT * FROM foo
)
SELECT a, b FROM cte
UNION
SELECT a, b FROM t
Given info on a list of SELECTs, determine whether to warn.
Outermost query should produce known number of columns.
Implementation of Rule L044.
It's an alias to an external table whose number of columns could vary without our knowledge. Thus, warn. Handle nested SELECT. Recursively walk from the given query (select_info_list) to any wildcard columns in the select targets. If every wildcard evdentually resolves to a query without wildcards, all is well. Otherwise, warn. pragma: no cover Is it an alias? Found the alias matching the wildcard. Recurse, analyzing the query associated with that alias. Not an alias. Is it a CTE? Wildcard refers to a CTE. Analyze it. Not CTE, not table alias. Presumably an external table. Warn. No table was specified with the wildcard. Assume we're querying from a nested select in FROM. Begin analysis at the outer query. | 2,248 | en | 0.824228 |
import os, paramiko, time, schedule, smtplib, ssl
from datetime import datetime
from email.message import EmailMessage
host='localhost'
port='5432'
user='postgres'
password='admin'
database='testdb'
#chemin de sauvegarde locale
local_dir = 'C:\\Users\\Kamla\\projets\\auto-backup-sqldb\\backup\\'
#local_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\'
#chemin de sauvegarde distant
remote_dir = '/C:/Users/vmwin10/Documents/ftpfile/'
def job():
print("Backup working...")
filestamp = time.strftime('%Y-%m-%dT%H-%M-%S.%z')
#nom pour le fichier sql qui serra genere par pg_dump
database_remote = database+"_"+filestamp+".bak.sql"
PASS="set PGPASSWORD=%s" % (password)
#lancement de la commande mysqldump qui va faire une sauvegarde en local
#les fichiers sont sauvegarder dans le respertoire 'backup'
os.system("(cd backup) && ("+PASS+") && (pg_dump -h %s -p %s -U %s -f %s -C -d %s)" % (host, port, user, database_remote, database))
print("Database dumped to "+database_remote)
# debut du SFTP
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
#on se connecte a la machine dans laquelle serra sauvegarde le le fichier backup
ssh_client.connect(hostname='192.168.126.2',username='vmwin10',password='vmwin10')
ftp_client=ssh_client.open_sftp()
#envoie du fichier local vers le remote
ftp_client.put(local_dir+database_remote,remote_dir+database_remote)
ftp_client.close()
print("Successfull Backup")
# A chaque backup un email est envoye
msg = EmailMessage()
msg.set_content("Un backup vient d'etre effectue")
msg["Subject"] = "Email de Backup"
msg["From"] = "ksb.cmr@gmail.com"
msg["To"] = "test@mail.com"
context=ssl.create_default_context()
with smtplib.SMTP("smtp.gmail.com", port=587) as smtp:
smtp.starttls(context=context)
smtp.login(msg["From"], "password")
smtp.send_message(msg)
# le backup se fait chaque 1h
schedule.every(3).seconds.do(job)
#schedule.every(15).minutes.do(job)
#schedule.every().hour.do(job)
#schedule.every().day.at("10:30").do(job)
#schedule.every(10).to(10).minutes.do(job)
#schedule.every().monday.do(job)
#schedule.every().wednesday.at("15:00").do(job)
#schedule.every().minute.at(":15").do(job)
while True:
schedule.run_pending()
time.sleep(1)
| pgsqlbackup.py | 2,432 | chemin de sauvegarde localelocal_dir = 'Chemin vers le dossier de la base de donnees a sauvegarder\\'chemin de sauvegarde distantnom pour le fichier sql qui serra genere par pg_dumplancement de la commande mysqldump qui va faire une sauvegarde en localles fichiers sont sauvegarder dans le respertoire 'backup' debut du SFTPon se connecte a la machine dans laquelle serra sauvegarde le le fichier backupenvoie du fichier local vers le remote A chaque backup un email est envoye le backup se fait chaque 1hschedule.every(15).minutes.do(job)schedule.every().hour.do(job)schedule.every().day.at("10:30").do(job)schedule.every(10).to(10).minutes.do(job)schedule.every().monday.do(job)schedule.every().wednesday.at("15:00").do(job)schedule.every().minute.at(":15").do(job) | 767 | fr | 0.832575 |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
from datetime import date
from pathlib import Path
ROOT_DIR = Path(__file__).resolve(strict=True).parent.parent
PACKAGE_DIR = ROOT_DIR / "email_service"
DOCS_DIR = ROOT_DIR / "email_service"
version_file_path = PACKAGE_DIR / "version.py"
code_obj = compile(version_file_path.read_text(), version_file_path, "exec")
__version__ = dict()
exec(code_obj, __version__)
version = __version__["__version__"]
# -- Project information -----------------------------------------------------
project = "Email Service"
copyright = """2021, Aditya Raman"""
author = "Aditya Raman"
# The full version, including alpha/beta/rc tags
version = release = f"v{version}"
today = str(date.today())
language = "en"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx_rtd_theme",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme" # alternate: "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# These paths are either relative to html_static_path
# or fully qualified paths (eg. https://...)
# html_css_files = []
#
# html_style = ""
master_doc = "index"
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
"papersize": "a4paper",
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
"preamble": "\\addto\\captionsenglish{\\renewcommand{\\contentsname}{Table of contents}}",
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_show_urls = "footnote"
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
add_function_parentheses = False
show_authors = True
| docs/conf.py | 3,252 | Configuration file for the Sphinx documentation builder. This file only contains a selection of the most common options. For a full list see the documentation: https://www.sphinx-doc.org/en/master/usage/configuration.html -- Path setup -------------------------------------------------------------- If extensions (or modules to document with autodoc) are in another directory, add these directories to sys.path here. If the directory is relative to the documentation root, use os.path.abspath to make it absolute, like shown here. -- Project information ----------------------------------------------------- The full version, including alpha/beta/rc tags -- General configuration --------------------------------------------------- Add any Sphinx extension module names here, as strings. They can be extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. Add any paths that contain templates here, relative to this directory. List of patterns, relative to source directory, that match files and directories to ignore when looking for source files. This pattern also affects html_static_path and html_extra_path. -- Options for HTML output ------------------------------------------------- The theme to use for HTML and HTML Help pages. See the documentation for a list of builtin themes. alternate: "alabaster" Add any paths that contain custom static files (such as style sheets) here, relative to this directory. They are copied after the builtin static files, so a file named "default.css" will overwrite the builtin "default.css". These paths are either relative to html_static_path or fully qualified paths (eg. https://...) html_css_files = [] html_style = "" The paper size ('letterpaper' or 'a4paper'). The font size ('10pt', '11pt' or '12pt'). 'pointsize': '10pt', Additional stuff for the LaTeX preamble. Latex figure (float) alignment 'figure_align': 'htbp', If true, the current module name will be prepended to all description unit titles (such as .. function::). | 1,996 | en | 0.683511 |
# NOTE - Still seems to be a leak here somewhere
# gateway count doesnt hit zero. Hence the print statements!
import sys
sys.coinit_flags = 0 # Must be free-threaded!
import win32api, pythoncom, time
import pywintypes
import os
import winerror
import win32com
import win32com.client.connect
from win32com.test.util import CheckClean
from win32com.client import constants, DispatchBaseClass, CastTo, VARIANT
from win32com.test.util import RegisterPythonServer
from pywin32_testutil import str2memory
import datetime
import decimal
import win32timezone
importMsg = "**** PyCOMTest is not installed ***\n PyCOMTest is a Python test specific COM client and server.\n It is likely this server is not installed on this machine\n To install the server, you must get the win32com sources\n and build it using MS Visual C++"
error = Exception
# This test uses a Python implemented COM server - ensure correctly registered.
RegisterPythonServer(
os.path.join(os.path.dirname(__file__), "..", "servers", "test_pycomtest.py"),
"Python.Test.PyCOMTest",
)
from win32com.client import gencache
try:
gencache.EnsureModule("{6BCDCB60-5605-11D0-AE5F-CADD4C000000}", 0, 1, 1)
except pythoncom.com_error:
print("The PyCOMTest module can not be located or generated.")
print(importMsg)
raise RuntimeError(importMsg)
# We had a bg where RegisterInterfaces would fail if gencache had
# already been run - exercise that here
from win32com import universal
universal.RegisterInterfaces("{6BCDCB60-5605-11D0-AE5F-CADD4C000000}", 0, 1, 1)
verbose = 0
# convert a normal int to a long int - used to avoid, eg, '1L' for py3k
# friendliness
def ensure_long(int_val):
if sys.version_info > (3,):
# py3k - no such thing as a 'long'
return int_val
# on py2x, we just use an expression that results in a long
return 0x100000000 - 0x100000000 + int_val
def check_get_set(func, arg):
got = func(arg)
if got != arg:
raise error("%s failed - expected %r, got %r" % (func, arg, got))
def check_get_set_raises(exc, func, arg):
try:
got = func(arg)
except exc as e:
pass # what we expect!
else:
raise error(
"%s with arg %r didn't raise %s - returned %r" % (func, arg, exc, got)
)
def progress(*args):
if verbose:
for arg in args:
print(arg, end=" ")
print()
def TestApplyResult(fn, args, result):
try:
fnName = str(fn).split()[1]
except:
fnName = str(fn)
progress("Testing ", fnName)
pref = "function " + fnName
rc = fn(*args)
if rc != result:
raise error("%s failed - result not %r but %r" % (pref, result, rc))
def TestConstant(constName, pyConst):
try:
comConst = getattr(constants, constName)
except:
raise error("Constant %s missing" % (constName,))
if comConst != pyConst:
raise error(
"Constant value wrong for %s - got %s, wanted %s"
% (constName, comConst, pyConst)
)
# Simple handler class. This demo only fires one event.
class RandomEventHandler:
def _Init(self):
self.fireds = {}
def OnFire(self, no):
try:
self.fireds[no] = self.fireds[no] + 1
except KeyError:
self.fireds[no] = 0
def OnFireWithNamedParams(self, no, a_bool, out1, out2):
# This test exists mainly to help with an old bug, where named
# params would come in reverse.
Missing = pythoncom.Missing
if no is not Missing:
# We know our impl called 'OnFire' with the same ID
assert no in self.fireds
assert no + 1 == out1, "expecting 'out1' param to be ID+1"
assert no + 2 == out2, "expecting 'out2' param to be ID+2"
# The middle must be a boolean.
assert a_bool is Missing or type(a_bool) == bool, "middle param not a bool"
return out1 + 2, out2 + 2
def _DumpFireds(self):
if not self.fireds:
print("ERROR: Nothing was received!")
for firedId, no in self.fireds.items():
progress("ID %d fired %d times" % (firedId, no))
# A simple handler class that derives from object (ie, a "new style class") -
# only relevant for Python 2.x (ie, the 2 classes should be identical in 3.x)
class NewStyleRandomEventHandler(object):
def _Init(self):
self.fireds = {}
def OnFire(self, no):
try:
self.fireds[no] = self.fireds[no] + 1
except KeyError:
self.fireds[no] = 0
def OnFireWithNamedParams(self, no, a_bool, out1, out2):
# This test exists mainly to help with an old bug, where named
# params would come in reverse.
Missing = pythoncom.Missing
if no is not Missing:
# We know our impl called 'OnFire' with the same ID
assert no in self.fireds
assert no + 1 == out1, "expecting 'out1' param to be ID+1"
assert no + 2 == out2, "expecting 'out2' param to be ID+2"
# The middle must be a boolean.
assert a_bool is Missing or type(a_bool) == bool, "middle param not a bool"
return out1 + 2, out2 + 2
def _DumpFireds(self):
if not self.fireds:
print("ERROR: Nothing was received!")
for firedId, no in self.fireds.items():
progress("ID %d fired %d times" % (firedId, no))
# Test everything which can be tested using both the "dynamic" and "generated"
# COM objects (or when there are very subtle differences)
def TestCommon(o, is_generated):
progress("Getting counter")
counter = o.GetSimpleCounter()
TestCounter(counter, is_generated)
progress("Checking default args")
rc = o.TestOptionals()
if rc[:-1] != ("def", 0, 1) or abs(rc[-1] - 3.14) > 0.01:
print(rc)
raise error("Did not get the optional values correctly")
rc = o.TestOptionals("Hi", 2, 3, 1.1)
if rc[:-1] != ("Hi", 2, 3) or abs(rc[-1] - 1.1) > 0.01:
print(rc)
raise error("Did not get the specified optional values correctly")
rc = o.TestOptionals2(0)
if rc != (0, "", 1):
print(rc)
raise error("Did not get the optional2 values correctly")
rc = o.TestOptionals2(1.1, "Hi", 2)
if rc[1:] != ("Hi", 2) or abs(rc[0] - 1.1) > 0.01:
print(rc)
raise error("Did not get the specified optional2 values correctly")
progress("Checking getting/passing IUnknown")
check_get_set(o.GetSetUnknown, o)
progress("Checking getting/passing IDispatch")
# This might be called with either the interface or the CoClass - but these
# functions always return from the interface.
expected_class = o.__class__
# CoClass instances have `default_interface`
expected_class = getattr(expected_class, "default_interface", expected_class)
if not isinstance(o.GetSetDispatch(o), expected_class):
raise error("GetSetDispatch failed: %r" % (o.GetSetDispatch(o),))
progress("Checking getting/passing IDispatch of known type")
expected_class = o.__class__
expected_class = getattr(expected_class, "default_interface", expected_class)
if o.GetSetInterface(o).__class__ != expected_class:
raise error("GetSetDispatch failed")
progress("Checking misc args")
check_get_set(o.GetSetVariant, 4)
check_get_set(o.GetSetVariant, "foo")
check_get_set(o.GetSetVariant, o)
# signed/unsigned.
check_get_set(o.GetSetInt, 0)
check_get_set(o.GetSetInt, -1)
check_get_set(o.GetSetInt, 1)
check_get_set(o.GetSetUnsignedInt, 0)
check_get_set(o.GetSetUnsignedInt, 1)
check_get_set(o.GetSetUnsignedInt, 0x80000000)
if o.GetSetUnsignedInt(-1) != 0xFFFFFFFF:
# -1 is a special case - we accept a negative int (silently converting to
# unsigned) but when getting it back we convert it to a long.
raise error("unsigned -1 failed")
check_get_set(o.GetSetLong, 0)
check_get_set(o.GetSetLong, -1)
check_get_set(o.GetSetLong, 1)
check_get_set(o.GetSetUnsignedLong, 0)
check_get_set(o.GetSetUnsignedLong, 1)
check_get_set(o.GetSetUnsignedLong, 0x80000000)
# -1 is a special case - see above.
if o.GetSetUnsignedLong(-1) != 0xFFFFFFFF:
raise error("unsigned -1 failed")
# We want to explicitly test > 32 bits. py3k has no 'maxint' and
# 'maxsize+1' is no good on 64bit platforms as its 65 bits!
big = 2147483647 # sys.maxint on py2k
for l in big, big + 1, 1 << 65:
check_get_set(o.GetSetVariant, l)
progress("Checking structs")
r = o.GetStruct()
assert r.int_value == 99 and str(r.str_value) == "Hello from C++"
assert o.DoubleString("foo") == "foofoo"
progress("Checking var args")
o.SetVarArgs("Hi", "There", "From", "Python", 1)
if o.GetLastVarArgs() != ("Hi", "There", "From", "Python", 1):
raise error("VarArgs failed -" + str(o.GetLastVarArgs()))
progress("Checking arrays")
l = []
TestApplyResult(o.SetVariantSafeArray, (l,), len(l))
l = [1, 2, 3, 4]
TestApplyResult(o.SetVariantSafeArray, (l,), len(l))
TestApplyResult(
o.CheckVariantSafeArray,
(
(
1,
2,
3,
4,
),
),
1,
)
# and binary
TestApplyResult(o.SetBinSafeArray, (str2memory("foo\0bar"),), 7)
progress("Checking properties")
o.LongProp = 3
if o.LongProp != 3 or o.IntProp != 3:
raise error("Property value wrong - got %d/%d" % (o.LongProp, o.IntProp))
o.LongProp = o.IntProp = -3
if o.LongProp != -3 or o.IntProp != -3:
raise error("Property value wrong - got %d/%d" % (o.LongProp, o.IntProp))
# This number fits in an unsigned long. Attempting to set it to a normal
# long will involve overflow, which is to be expected. But we do
# expect it to work in a property explicitly a VT_UI4.
check = 3 * 10 ** 9
o.ULongProp = check
if o.ULongProp != check:
raise error(
"Property value wrong - got %d (expected %d)" % (o.ULongProp, check)
)
TestApplyResult(o.Test, ("Unused", 99), 1) # A bool function
TestApplyResult(o.Test, ("Unused", -1), 1) # A bool function
TestApplyResult(o.Test, ("Unused", 1 == 1), 1) # A bool function
TestApplyResult(o.Test, ("Unused", 0), 0)
TestApplyResult(o.Test, ("Unused", 1 == 0), 0)
assert o.DoubleString("foo") == "foofoo"
TestConstant("ULongTest1", ensure_long(0xFFFFFFFF))
TestConstant("ULongTest2", ensure_long(0x7FFFFFFF))
TestConstant("LongTest1", ensure_long(-0x7FFFFFFF))
TestConstant("LongTest2", ensure_long(0x7FFFFFFF))
TestConstant("UCharTest", 255)
TestConstant("CharTest", -1)
# 'Hello World', but the 'r' is the "Registered" sign (\xae)
TestConstant("StringTest", "Hello Wo\xaeld")
progress("Checking dates and times")
# For now *all* times passed must be tz-aware.
now = win32timezone.now()
# but conversion to and from a VARIANT loses sub-second...
now = now.replace(microsecond=0)
later = now + datetime.timedelta(seconds=1)
TestApplyResult(o.EarliestDate, (now, later), now)
# The below used to fail with `ValueError: microsecond must be in 0..999999` - see #1655
# https://planetcalc.com/7027/ says that float is: Sun, 25 Mar 1951 7:23:49 am
assert o.MakeDate(18712.308206013888) == datetime.datetime.fromisoformat(
"1951-03-25 07:23:49+00:00"
)
progress("Checking currency")
# currency.
pythoncom.__future_currency__ = 1
if o.CurrencyProp != 0:
raise error("Expecting 0, got %r" % (o.CurrencyProp,))
for val in ("1234.5678", "1234.56", "1234"):
o.CurrencyProp = decimal.Decimal(val)
if o.CurrencyProp != decimal.Decimal(val):
raise error("%s got %r" % (val, o.CurrencyProp))
v1 = decimal.Decimal("1234.5678")
TestApplyResult(o.DoubleCurrency, (v1,), v1 * 2)
v2 = decimal.Decimal("9012.3456")
TestApplyResult(o.AddCurrencies, (v1, v2), v1 + v2)
TestTrickyTypesWithVariants(o, is_generated)
progress("Checking win32com.client.VARIANT")
TestPyVariant(o, is_generated)
def TestTrickyTypesWithVariants(o, is_generated):
# Test tricky stuff with type handling and generally only works with
# "generated" support but can be worked around using VARIANT.
if is_generated:
got = o.TestByRefVariant(2)
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_VARIANT, 2)
o.TestByRefVariant(v)
got = v.value
if got != 4:
raise error("TestByRefVariant failed")
if is_generated:
got = o.TestByRefString("Foo")
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, "Foo")
o.TestByRefString(v)
got = v.value
if got != "FooFoo":
raise error("TestByRefString failed")
# check we can pass ints as a VT_UI1
vals = [1, 2, 3, 4]
if is_generated:
arg = vals
else:
arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI1, vals)
TestApplyResult(o.SetBinSafeArray, (arg,), len(vals))
# safearrays of doubles and floats
vals = [0, 1.1, 2.2, 3.3]
if is_generated:
arg = vals
else:
arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R8, vals)
TestApplyResult(o.SetDoubleSafeArray, (arg,), len(vals))
if is_generated:
arg = vals
else:
arg = VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R4, vals)
TestApplyResult(o.SetFloatSafeArray, (arg,), len(vals))
vals = [1.1, 2.2, 3.3, 4.4]
expected = (1.1 * 2, 2.2 * 2, 3.3 * 2, 4.4 * 2)
if is_generated:
TestApplyResult(o.ChangeDoubleSafeArray, (vals,), expected)
else:
arg = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_ARRAY | pythoncom.VT_R8, vals)
o.ChangeDoubleSafeArray(arg)
if arg.value != expected:
raise error("ChangeDoubleSafeArray got the wrong value")
if is_generated:
got = o.DoubleInOutString("foo")
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_BSTR, "foo")
o.DoubleInOutString(v)
got = v.value
assert got == "foofoo", got
val = decimal.Decimal("1234.5678")
if is_generated:
got = o.DoubleCurrencyByVal(val)
else:
v = VARIANT(pythoncom.VT_BYREF | pythoncom.VT_CY, val)
o.DoubleCurrencyByVal(v)
got = v.value
assert got == val * 2
def TestDynamic():
progress("Testing Dynamic")
import win32com.client.dynamic
o = win32com.client.dynamic.DumbDispatch("PyCOMTest.PyCOMTest")
TestCommon(o, False)
counter = win32com.client.dynamic.DumbDispatch("PyCOMTest.SimpleCounter")
TestCounter(counter, False)
# Dynamic doesn't know this should be an int, so we get a COM
# TypeMismatch error.
try:
check_get_set_raises(ValueError, o.GetSetInt, "foo")
raise error("no exception raised")
except pythoncom.com_error as exc:
if exc.hresult != winerror.DISP_E_TYPEMISMATCH:
raise
arg1 = VARIANT(pythoncom.VT_R4 | pythoncom.VT_BYREF, 2.0)
arg2 = VARIANT(pythoncom.VT_BOOL | pythoncom.VT_BYREF, True)
arg3 = VARIANT(pythoncom.VT_I4 | pythoncom.VT_BYREF, 4)
o.TestInOut(arg1, arg2, arg3)
assert arg1.value == 4.0, arg1
assert arg2.value == False
assert arg3.value == 8
# damn - props with params don't work for dynamic objects :(
# o.SetParamProp(0, 1)
# if o.ParamProp(0) != 1:
# raise RuntimeError, o.paramProp(0)
def TestGenerated():
# Create an instance of the server.
from win32com.client.gencache import EnsureDispatch
o = EnsureDispatch("PyCOMTest.PyCOMTest")
TestCommon(o, True)
counter = EnsureDispatch("PyCOMTest.SimpleCounter")
TestCounter(counter, True)
# This dance lets us get a CoClass even though it's not explicitly registered.
# This is `CoPyComTest`
from win32com.client.CLSIDToClass import GetClass
coclass_o = GetClass("{8EE0C520-5605-11D0-AE5F-CADD4C000000}")()
TestCommon(coclass_o, True)
# Test the regression reported in #1753
assert bool(coclass_o)
# This is `CoSimpleCounter` and the counter tests should work.
coclass = GetClass("{B88DD310-BAE8-11D0-AE86-76F2C1000000}")()
TestCounter(coclass, True)
# XXX - this is failing in dynamic tests, but should work fine.
i1, i2 = o.GetMultipleInterfaces()
if not isinstance(i1, DispatchBaseClass) or not isinstance(i2, DispatchBaseClass):
# Yay - is now an instance returned!
raise error(
"GetMultipleInterfaces did not return instances - got '%s', '%s'" % (i1, i2)
)
del i1
del i2
# Generated knows to only pass a 32bit int, so should fail.
check_get_set_raises(OverflowError, o.GetSetInt, 0x80000000)
check_get_set_raises(OverflowError, o.GetSetLong, 0x80000000)
# Generated knows this should be an int, so raises ValueError
check_get_set_raises(ValueError, o.GetSetInt, "foo")
check_get_set_raises(ValueError, o.GetSetLong, "foo")
# Pass some non-sequence objects to our array decoder, and watch it fail.
try:
o.SetVariantSafeArray("foo")
raise error("Expected a type error")
except TypeError:
pass
try:
o.SetVariantSafeArray(666)
raise error("Expected a type error")
except TypeError:
pass
o.GetSimpleSafeArray(None)
TestApplyResult(o.GetSimpleSafeArray, (None,), tuple(range(10)))
resultCheck = tuple(range(5)), tuple(range(10)), tuple(range(20))
TestApplyResult(o.GetSafeArrays, (None, None, None), resultCheck)
l = []
TestApplyResult(o.SetIntSafeArray, (l,), len(l))
l = [1, 2, 3, 4]
TestApplyResult(o.SetIntSafeArray, (l,), len(l))
ll = [1, 2, 3, 0x100000000]
TestApplyResult(o.SetLongLongSafeArray, (ll,), len(ll))
TestApplyResult(o.SetULongLongSafeArray, (ll,), len(ll))
# Tell the server to do what it does!
TestApplyResult(o.Test2, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test3, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test4, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test5, (constants.Attr2,), constants.Attr2)
TestApplyResult(o.Test6, (constants.WideAttr1,), constants.WideAttr1)
TestApplyResult(o.Test6, (constants.WideAttr2,), constants.WideAttr2)
TestApplyResult(o.Test6, (constants.WideAttr3,), constants.WideAttr3)
TestApplyResult(o.Test6, (constants.WideAttr4,), constants.WideAttr4)
TestApplyResult(o.Test6, (constants.WideAttr5,), constants.WideAttr5)
TestApplyResult(o.TestInOut, (2.0, True, 4), (4.0, False, 8))
o.SetParamProp(0, 1)
if o.ParamProp(0) != 1:
raise RuntimeError(o.paramProp(0))
# Make sure CastTo works - even though it is only casting it to itself!
o2 = CastTo(o, "IPyCOMTest")
if o != o2:
raise error("CastTo should have returned the same object")
# Do the connection point thing...
# Create a connection object.
progress("Testing connection points")
o2 = win32com.client.DispatchWithEvents(o, RandomEventHandler)
TestEvents(o2, o2)
o2 = win32com.client.DispatchWithEvents(o, NewStyleRandomEventHandler)
TestEvents(o2, o2)
# and a plain "WithEvents".
handler = win32com.client.WithEvents(o, RandomEventHandler)
TestEvents(o, handler)
handler = win32com.client.WithEvents(o, NewStyleRandomEventHandler)
TestEvents(o, handler)
progress("Finished generated .py test.")
def TestEvents(o, handler):
sessions = []
handler._Init()
try:
for i in range(3):
session = o.Start()
sessions.append(session)
time.sleep(0.5)
finally:
# Stop the servers
for session in sessions:
o.Stop(session)
handler._DumpFireds()
handler.close()
def _TestPyVariant(o, is_generated, val, checker=None):
if is_generated:
vt, got = o.GetVariantAndType(val)
else:
# Gotta supply all 3 args with the last 2 being explicit variants to
# get the byref behaviour.
var_vt = VARIANT(pythoncom.VT_UI2 | pythoncom.VT_BYREF, 0)
var_result = VARIANT(pythoncom.VT_VARIANT | pythoncom.VT_BYREF, 0)
o.GetVariantAndType(val, var_vt, var_result)
vt = var_vt.value
got = var_result.value
if checker is not None:
checker(got)
return
# default checking.
assert vt == val.varianttype, (vt, val.varianttype)
# Handle our safe-array test - if the passed value is a list of variants,
# compare against the actual values.
if type(val.value) in (tuple, list):
check = [v.value if isinstance(v, VARIANT) else v for v in val.value]
# pythoncom always returns arrays as tuples.
got = list(got)
else:
check = val.value
assert type(check) == type(got), (type(check), type(got))
assert check == got, (check, got)
def _TestPyVariantFails(o, is_generated, val, exc):
try:
_TestPyVariant(o, is_generated, val)
raise error("Setting %r didn't raise %s" % (val, exc))
except exc:
pass
def TestPyVariant(o, is_generated):
_TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_UI1, 1))
_TestPyVariant(
o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_UI4, [1, 2, 3])
)
_TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_BSTR, "hello"))
_TestPyVariant(
o,
is_generated,
VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_BSTR, ["hello", "there"]),
)
def check_dispatch(got):
assert isinstance(got._oleobj_, pythoncom.TypeIIDs[pythoncom.IID_IDispatch])
_TestPyVariant(o, is_generated, VARIANT(pythoncom.VT_DISPATCH, o), check_dispatch)
_TestPyVariant(
o, is_generated, VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_DISPATCH, [o])
)
# an array of variants each with a specific type.
v = VARIANT(
pythoncom.VT_ARRAY | pythoncom.VT_VARIANT,
[
VARIANT(pythoncom.VT_UI4, 1),
VARIANT(pythoncom.VT_UI4, 2),
VARIANT(pythoncom.VT_UI4, 3),
],
)
_TestPyVariant(o, is_generated, v)
# and failures
_TestPyVariantFails(o, is_generated, VARIANT(pythoncom.VT_UI1, "foo"), ValueError)
def TestCounter(counter, bIsGenerated):
# Test random access into container
progress("Testing counter", repr(counter))
import random
for i in range(50):
num = int(random.random() * len(counter))
try:
# XXX - this appears broken by commit 08a14d4deb374eaa06378509cf44078ad467b9dc -
# We shouldn't need to do generated differently than dynamic.
if bIsGenerated:
ret = counter.Item(num + 1)
else:
ret = counter[num]
if ret != num + 1:
raise error(
"Random access into element %d failed - return was %s"
% (num, repr(ret))
)
except IndexError:
raise error("** IndexError accessing collection element %d" % num)
num = 0
if bIsGenerated:
counter.SetTestProperty(1)
counter.TestProperty = 1 # Note this has a second, default arg.
counter.SetTestProperty(1, 2)
if counter.TestPropertyWithDef != 0:
raise error("Unexpected property set value!")
if counter.TestPropertyNoDef(1) != 1:
raise error("Unexpected property set value!")
else:
pass
# counter.TestProperty = 1
counter.LBound = 1
counter.UBound = 10
if counter.LBound != 1 or counter.UBound != 10:
print("** Error - counter did not keep its properties")
if bIsGenerated:
bounds = counter.GetBounds()
if bounds[0] != 1 or bounds[1] != 10:
raise error("** Error - counter did not give the same properties back")
counter.SetBounds(bounds[0], bounds[1])
for item in counter:
num = num + 1
if num != len(counter):
raise error("*** Length of counter and loop iterations dont match ***")
if num != 10:
raise error("*** Unexpected number of loop iterations ***")
try:
counter = iter(counter)._iter_.Clone() # Test Clone() and enum directly
except AttributeError:
# *sob* - sometimes this is a real iterator and sometimes not :/
progress("Finished testing counter (but skipped the iterator stuff")
return
counter.Reset()
num = 0
for item in counter:
num = num + 1
if num != 10:
raise error("*** Unexpected number of loop iterations - got %d ***" % num)
progress("Finished testing counter")
def TestLocalVTable(ob):
# Python doesn't fully implement this interface.
if ob.DoubleString("foo") != "foofoo":
raise error("couldn't foofoo")
###############################
##
## Some vtable tests of the interface
##
def TestVTable(clsctx=pythoncom.CLSCTX_ALL):
# Any vtable interfaces marked as dual *should* be able to be
# correctly implemented as IDispatch.
ob = win32com.client.Dispatch("Python.Test.PyCOMTest")
TestLocalVTable(ob)
# Now test it via vtable - use some C++ code to help here as Python can't do it directly yet.
tester = win32com.client.Dispatch("PyCOMTest.PyCOMTest")
testee = pythoncom.CoCreateInstance(
"Python.Test.PyCOMTest", None, clsctx, pythoncom.IID_IUnknown
)
# check we fail gracefully with None passed.
try:
tester.TestMyInterface(None)
except pythoncom.com_error as details:
pass
# and a real object.
tester.TestMyInterface(testee)
def TestVTable2():
# We once crashed creating our object with the native interface as
# the first IID specified. We must do it _after_ the tests, so that
# Python has already had the gateway registered from last run.
ob = win32com.client.Dispatch("Python.Test.PyCOMTest")
iid = pythoncom.InterfaceNames["IPyCOMTest"]
clsid = "Python.Test.PyCOMTest"
clsctx = pythoncom.CLSCTX_SERVER
try:
testee = pythoncom.CoCreateInstance(clsid, None, clsctx, iid)
except TypeError:
# Python can't actually _use_ this interface yet, so this is
# "expected". Any COM error is not.
pass
def TestVTableMI():
clsctx = pythoncom.CLSCTX_SERVER
ob = pythoncom.CoCreateInstance(
"Python.Test.PyCOMTestMI", None, clsctx, pythoncom.IID_IUnknown
)
# This inherits from IStream.
ob.QueryInterface(pythoncom.IID_IStream)
# This implements IStorage, specifying the IID as a string
ob.QueryInterface(pythoncom.IID_IStorage)
# IDispatch should always work
ob.QueryInterface(pythoncom.IID_IDispatch)
iid = pythoncom.InterfaceNames["IPyCOMTest"]
try:
ob.QueryInterface(iid)
except TypeError:
# Python can't actually _use_ this interface yet, so this is
# "expected". Any COM error is not.
pass
def TestQueryInterface(long_lived_server=0, iterations=5):
tester = win32com.client.Dispatch("PyCOMTest.PyCOMTest")
if long_lived_server:
# Create a local server
t0 = win32com.client.Dispatch(
"Python.Test.PyCOMTest", clsctx=pythoncom.CLSCTX_LOCAL_SERVER
)
# Request custom interfaces a number of times
prompt = [
"Testing QueryInterface without long-lived local-server #%d of %d...",
"Testing QueryInterface with long-lived local-server #%d of %d...",
]
for i in range(iterations):
progress(prompt[long_lived_server != 0] % (i + 1, iterations))
tester.TestQueryInterface()
class Tester(win32com.test.util.TestCase):
def testVTableInProc(self):
# We used to crash running this the second time - do it a few times
for i in range(3):
progress("Testing VTables in-process #%d..." % (i + 1))
TestVTable(pythoncom.CLSCTX_INPROC_SERVER)
def testVTableLocalServer(self):
for i in range(3):
progress("Testing VTables out-of-process #%d..." % (i + 1))
TestVTable(pythoncom.CLSCTX_LOCAL_SERVER)
def testVTable2(self):
for i in range(3):
TestVTable2()
def testVTableMI(self):
for i in range(3):
TestVTableMI()
def testMultiQueryInterface(self):
TestQueryInterface(0, 6)
# When we use the custom interface in the presence of a long-lived
# local server, i.e. a local server that is already running when
# we request an instance of our COM object, and remains afterwards,
# then after repeated requests to create an instance of our object
# the custom interface disappears -- i.e. QueryInterface fails with
# E_NOINTERFACE. Set the upper range of the following test to 2 to
# pass this test, i.e. TestQueryInterface(1,2)
TestQueryInterface(1, 6)
def testDynamic(self):
TestDynamic()
def testGenerated(self):
TestGenerated()
if __name__ == "__main__":
# XXX - todo - Complete hack to crank threading support.
# Should NOT be necessary
def NullThreadFunc():
pass
import _thread
_thread.start_new(NullThreadFunc, ())
if "-v" in sys.argv:
verbose = 1
win32com.test.util.testmain()
| env/Lib/site-packages/win32com/test/testPyComTest.py | 29,446 | NOTE - Still seems to be a leak here somewhere gateway count doesnt hit zero. Hence the print statements! Must be free-threaded! This test uses a Python implemented COM server - ensure correctly registered. We had a bg where RegisterInterfaces would fail if gencache had already been run - exercise that here convert a normal int to a long int - used to avoid, eg, '1L' for py3k friendliness py3k - no such thing as a 'long' on py2x, we just use an expression that results in a long what we expect! Simple handler class. This demo only fires one event. This test exists mainly to help with an old bug, where named params would come in reverse. We know our impl called 'OnFire' with the same ID The middle must be a boolean. A simple handler class that derives from object (ie, a "new style class") - only relevant for Python 2.x (ie, the 2 classes should be identical in 3.x) This test exists mainly to help with an old bug, where named params would come in reverse. We know our impl called 'OnFire' with the same ID The middle must be a boolean. Test everything which can be tested using both the "dynamic" and "generated" COM objects (or when there are very subtle differences) This might be called with either the interface or the CoClass - but these functions always return from the interface. CoClass instances have `default_interface` signed/unsigned. -1 is a special case - we accept a negative int (silently converting to unsigned) but when getting it back we convert it to a long. -1 is a special case - see above. We want to explicitly test > 32 bits. py3k has no 'maxint' and 'maxsize+1' is no good on 64bit platforms as its 65 bits! sys.maxint on py2k and binary This number fits in an unsigned long. Attempting to set it to a normal long will involve overflow, which is to be expected. But we do expect it to work in a property explicitly a VT_UI4. A bool function A bool function A bool function 'Hello World', but the 'r' is the "Registered" sign (\xae) For now *all* times passed must be tz-aware. but conversion to and from a VARIANT loses sub-second... The below used to fail with `ValueError: microsecond must be in 0..999999` - see 1655 https://planetcalc.com/7027/ says that float is: Sun, 25 Mar 1951 7:23:49 am currency. Test tricky stuff with type handling and generally only works with "generated" support but can be worked around using VARIANT. check we can pass ints as a VT_UI1 safearrays of doubles and floats Dynamic doesn't know this should be an int, so we get a COM TypeMismatch error. damn - props with params don't work for dynamic objects :( o.SetParamProp(0, 1) if o.ParamProp(0) != 1: raise RuntimeError, o.paramProp(0) Create an instance of the server. This dance lets us get a CoClass even though it's not explicitly registered. This is `CoPyComTest` Test the regression reported in 1753 This is `CoSimpleCounter` and the counter tests should work. XXX - this is failing in dynamic tests, but should work fine. Yay - is now an instance returned! Generated knows to only pass a 32bit int, so should fail. Generated knows this should be an int, so raises ValueError Pass some non-sequence objects to our array decoder, and watch it fail. Tell the server to do what it does! Make sure CastTo works - even though it is only casting it to itself! Do the connection point thing... Create a connection object. and a plain "WithEvents". Stop the servers Gotta supply all 3 args with the last 2 being explicit variants to get the byref behaviour. default checking. Handle our safe-array test - if the passed value is a list of variants, compare against the actual values. pythoncom always returns arrays as tuples. an array of variants each with a specific type. and failures Test random access into container XXX - this appears broken by commit 08a14d4deb374eaa06378509cf44078ad467b9dc - We shouldn't need to do generated differently than dynamic. Note this has a second, default arg. counter.TestProperty = 1 Test Clone() and enum directly *sob* - sometimes this is a real iterator and sometimes not :/ Python doesn't fully implement this interface. Some vtable tests of the interface Any vtable interfaces marked as dual *should* be able to be correctly implemented as IDispatch. Now test it via vtable - use some C++ code to help here as Python can't do it directly yet. check we fail gracefully with None passed. and a real object. We once crashed creating our object with the native interface as the first IID specified. We must do it _after_ the tests, so that Python has already had the gateway registered from last run. Python can't actually _use_ this interface yet, so this is "expected". Any COM error is not. This inherits from IStream. This implements IStorage, specifying the IID as a string IDispatch should always work Python can't actually _use_ this interface yet, so this is "expected". Any COM error is not. Create a local server Request custom interfaces a number of times We used to crash running this the second time - do it a few times When we use the custom interface in the presence of a long-lived local server, i.e. a local server that is already running when we request an instance of our COM object, and remains afterwards, then after repeated requests to create an instance of our object the custom interface disappears -- i.e. QueryInterface fails with E_NOINTERFACE. Set the upper range of the following test to 2 to pass this test, i.e. TestQueryInterface(1,2) XXX - todo - Complete hack to crank threading support. Should NOT be necessary | 5,515 | en | 0.885883 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name
# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements
"""Read individual image files and perform augmentations."""
from __future__ import absolute_import, print_function
import os
import random
import logging
import json
import warnings
import numpy as np
try:
import cv2
except ImportError:
cv2 = None
from ..base import numeric_types
from .. import ndarray as nd
from ..ndarray import _internal
from ..ndarray._internal import _cvimresize as imresize
from ..ndarray._internal import _cvcopyMakeBorder as copyMakeBorder
from .. import io
from .. import recordio
def imread(filename, *args, **kwargs):
"""Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
"""
return _internal._cvimread(filename, *args, **kwargs)
def imdecode(buf, *args, **kwargs):
"""Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
"""
if not isinstance(buf, nd.NDArray):
buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)
return _internal._cvimdecode(buf, *args, **kwargs)
def scale_down(src_size, size):
"""Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
"""
w, h = size
sw, sh = src_size
if sh < h:
w, h = float(w * sh) / h, sh
if sw < w:
w, h = sw, float(h * sw) / w
return int(w), int(h)
def _get_interp_method(interp, sizes=()):
"""Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
"""
if interp == 9:
if sizes:
assert len(sizes) == 4
oh, ow, nh, nw = sizes
if nh > oh and nw > ow:
return 2
elif nh < oh and nw < ow:
return 3
else:
return 1
else:
return 2
if interp == 10:
return random.randint(0, 4)
if interp not in (0, 1, 2, 3, 4):
raise ValueError('Unknown interp method %d' % interp)
return interp
def resize_short(src, size, interp=2):
"""Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
"""
h, w, _ = src.shape
if h > w:
new_h, new_w = size * h // w, size
else:
new_h, new_w = size, size * w // h
return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))
def fixed_crop(src, x0, y0, w, h, size=None, interp=2):
"""Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
"""
out = nd.crop(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))
if size is not None and (w, h) != size:
sizes = (h, w, size[1], size[0])
out = imresize(out, *size, interp=_get_interp_method(interp, sizes))
return out
def random_crop(src, size, interp=2):
"""Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def center_crop(src, size, interp=2):
"""Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
"""
h, w, _ = src.shape
new_w, new_h = scale_down((w, h), size)
x0 = int((w - new_w) / 2)
y0 = int((h - new_h) / 2)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
def color_normalize(src, mean, std=None):
"""Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
"""
if mean is not None:
src -= mean
if std is not None:
src /= std
return src
def random_size_crop(src, size, area, ratio, interp=2, **kwargs):
"""Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
"""
h, w, _ = src.shape
src_area = h * w
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
area = kwargs.pop('min_area')
assert not kwargs, "unexpected keyword arguments for `random_size_crop`."
if isinstance(area, numeric_types):
area = (area, 1.0)
for _ in range(10):
target_area = random.uniform(area[0], area[1]) * src_area
new_ratio = random.uniform(*ratio)
new_w = int(round(np.sqrt(target_area * new_ratio)))
new_h = int(round(np.sqrt(target_area / new_ratio)))
if random.random() < 0.5:
new_h, new_w = new_w, new_h
if new_w <= w and new_h <= h:
x0 = random.randint(0, w - new_w)
y0 = random.randint(0, h - new_h)
out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)
return out, (x0, y0, new_w, new_h)
# fall back to center_crop
return center_crop(src, size, interp)
class Augmenter(object):
"""Image Augmenter base class"""
def __init__(self, **kwargs):
self._kwargs = kwargs
for k, v in self._kwargs.items():
if isinstance(v, nd.NDArray):
v = v.asnumpy()
if isinstance(v, np.ndarray):
v = v.tolist()
self._kwargs[k] = v
def dumps(self):
"""Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
"""
return json.dumps([self.__class__.__name__.lower(), self._kwargs])
def __call__(self, src):
"""Abstract implementation body"""
raise NotImplementedError("Must override implementation.")
class SequentialAug(Augmenter):
"""Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
"""
def __init__(self, ts):
super(SequentialAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
for aug in self.ts:
src = aug(src)
return src
class ResizeAug(Augmenter):
"""Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return resize_short(src, self.size, self.interp)
class ForceResizeAug(Augmenter):
"""Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(ForceResizeAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])
return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))
class RandomCropAug(Augmenter):
"""Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(RandomCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return random_crop(src, self.size, self.interp)[0]
class RandomSizedCropAug(Augmenter):
"""Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, area, ratio, interp=2, **kwargs):
super(RandomSizedCropAug, self).__init__(size=size, area=area,
ratio=ratio, interp=interp)
self.size = size
if 'min_area' in kwargs:
warnings.warn('`min_area` is deprecated. Please use `area` instead.',
DeprecationWarning)
self.area = kwargs.pop('min_area')
else:
self.area = area
self.ratio = ratio
self.interp = interp
assert not kwargs, "unexpected keyword arguments for `RandomSizedCropAug`."
def __call__(self, src):
"""Augmenter body"""
return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]
class CenterCropAug(Augmenter):
"""Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
"""
def __init__(self, size, interp=2):
super(CenterCropAug, self).__init__(size=size, interp=interp)
self.size = size
self.interp = interp
def __call__(self, src):
"""Augmenter body"""
return center_crop(src, self.size, self.interp)[0]
class RandomOrderAug(Augmenter):
"""Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
"""
def __init__(self, ts):
super(RandomOrderAug, self).__init__()
self.ts = ts
def dumps(self):
"""Override the default to avoid duplicate dump."""
return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]
def __call__(self, src):
"""Augmenter body"""
random.shuffle(self.ts)
for t in self.ts:
src = t(src)
return src
class BrightnessJitterAug(Augmenter):
"""Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
"""
def __init__(self, brightness):
super(BrightnessJitterAug, self).__init__(brightness=brightness)
self.brightness = brightness
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.brightness, self.brightness)
src *= alpha
return src
class ContrastJitterAug(Augmenter):
"""Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
"""
def __init__(self, contrast):
super(ContrastJitterAug, self).__init__(contrast=contrast)
self.contrast = contrast
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.contrast, self.contrast)
gray = src * self.coef
gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)
src *= alpha
src += gray
return src
class SaturationJitterAug(Augmenter):
"""Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, saturation):
super(SaturationJitterAug, self).__init__(saturation=saturation)
self.saturation = saturation
self.coef = nd.array([[[0.299, 0.587, 0.114]]])
def __call__(self, src):
"""Augmenter body"""
alpha = 1.0 + random.uniform(-self.saturation, self.saturation)
gray = src * self.coef
gray = nd.sum(gray, axis=2, keepdims=True)
gray *= (1.0 - alpha)
src *= alpha
src += gray
return src
class HueJitterAug(Augmenter):
"""Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
"""
def __init__(self, hue):
super(HueJitterAug, self).__init__(hue=hue)
self.hue = hue
self.tyiq = np.array([[0.299, 0.587, 0.114],
[0.596, -0.274, -0.321],
[0.211, -0.523, 0.311]])
self.ityiq = np.array([[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.107, 1.705]])
def __call__(self, src):
"""Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
"""
alpha = random.uniform(-self.hue, self.hue)
u = np.cos(alpha * np.pi)
w = np.sin(alpha * np.pi)
bt = np.array([[1.0, 0.0, 0.0],
[0.0, u, -w],
[0.0, w, u]])
t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T
src = nd.dot(src, nd.array(t))
return src
class ColorJitterAug(RandomOrderAug):
"""Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
"""
def __init__(self, brightness, contrast, saturation):
ts = []
if brightness > 0:
ts.append(BrightnessJitterAug(brightness))
if contrast > 0:
ts.append(ContrastJitterAug(contrast))
if saturation > 0:
ts.append(SaturationJitterAug(saturation))
super(ColorJitterAug, self).__init__(ts)
class LightingAug(Augmenter):
"""Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
"""
def __init__(self, alphastd, eigval, eigvec):
super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)
self.alphastd = alphastd
self.eigval = eigval
self.eigvec = eigvec
def __call__(self, src):
"""Augmenter body"""
alpha = np.random.normal(0, self.alphastd, size=(3,))
rgb = np.dot(self.eigvec * alpha, self.eigval)
src += nd.array(rgb)
return src
class ColorNormalizeAug(Augmenter):
"""Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
"""
def __init__(self, mean, std):
super(ColorNormalizeAug, self).__init__(mean=mean, std=std)
self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)
self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)
def __call__(self, src):
"""Augmenter body"""
return color_normalize(src, self.mean, self.std)
class RandomGrayAug(Augmenter):
"""Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
"""
def __init__(self, p):
super(RandomGrayAug, self).__init__(p=p)
self.p = p
self.mat = nd.array([[0.21, 0.21, 0.21],
[0.72, 0.72, 0.72],
[0.07, 0.07, 0.07]])
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.dot(src, self.mat)
return src
class HorizontalFlipAug(Augmenter):
"""Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
"""
def __init__(self, p):
super(HorizontalFlipAug, self).__init__(p=p)
self.p = p
def __call__(self, src):
"""Augmenter body"""
if random.random() < self.p:
src = nd.flip(src, axis=1)
return src
class CastAug(Augmenter):
"""Cast to float32"""
def __init__(self, typ='float32'):
super(CastAug, self).__init__(type=typ)
self.typ = typ
def __call__(self, src):
"""Augmenter body"""
src = src.astype(self.typ)
return src
def CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,
mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,
pca_noise=0, rand_gray=0, inter_method=2):
"""Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
"""
auglist = []
if resize > 0:
auglist.append(ResizeAug(resize, inter_method))
crop_size = (data_shape[2], data_shape[1])
if rand_resize:
assert rand_crop
auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))
elif rand_crop:
auglist.append(RandomCropAug(crop_size, inter_method))
else:
auglist.append(CenterCropAug(crop_size, inter_method))
if rand_mirror:
auglist.append(HorizontalFlipAug(0.5))
auglist.append(CastAug())
if brightness or contrast or saturation:
auglist.append(ColorJitterAug(brightness, contrast, saturation))
if hue:
auglist.append(HueJitterAug(hue))
if pca_noise > 0:
eigval = np.array([55.46, 4.794, 1.148])
eigvec = np.array([[-0.5675, 0.7192, 0.4009],
[-0.5808, -0.0045, -0.8140],
[-0.5836, -0.6948, 0.4203]])
auglist.append(LightingAug(pca_noise, eigval, eigvec))
if rand_gray > 0:
auglist.append(RandomGrayAug(rand_gray))
if mean is True:
mean = nd.array([123.68, 116.28, 103.53])
elif mean is not None:
assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]
if std is True:
std = nd.array([58.395, 57.12, 57.375])
elif std is not None:
assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]
if mean is not None or std is not None:
auglist.append(ColorNormalizeAug(mean, std))
return auglist
class ImageIter(io.DataIter):
"""Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
"""
def __init__(self, batch_size, data_shape, label_width=1,
path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,
shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,
data_name='data', label_name='softmax_label', dtype='float32',
last_batch_handle='pad', **kwargs):
super(ImageIter, self).__init__()
assert path_imgrec or path_imglist or (isinstance(imglist, list))
assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'
num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)
logging.info('Using %s threads for decoding...', str(num_threads))
logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'
' larger number to use more threads.')
class_name = self.__class__.__name__
if path_imgrec:
logging.info('%s: loading recordio %s...',
class_name, path_imgrec)
if path_imgidx:
self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = list(self.imgrec.keys)
else:
self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type
self.imgidx = None
else:
self.imgrec = None
if path_imglist:
logging.info('%s: loading image list %s...', class_name, path_imglist)
with open(path_imglist) as fin:
imglist = {}
imgkeys = []
for line in iter(fin.readline, ''):
line = line.strip().split('\t')
label = nd.array(line[1:-1], dtype=dtype)
key = int(line[0])
imglist[key] = (label, line[-1])
imgkeys.append(key)
self.imglist = imglist
elif isinstance(imglist, list):
logging.info('%s: loading image list...', class_name)
result = {}
imgkeys = []
index = 1
for img in imglist:
key = str(index) # pylint: disable=redefined-variable-type
index += 1
if len(img) > 2:
label = nd.array(img[:-1], dtype=dtype)
elif isinstance(img[0], numeric_types):
label = nd.array([img[0]], dtype=dtype)
else:
label = nd.array(img[0], dtype=dtype)
result[key] = (label, img[-1])
imgkeys.append(str(key))
self.imglist = result
else:
self.imglist = None
self.path_root = path_root
self.check_data_shape(data_shape)
self.provide_data = [(data_name, (batch_size,) + data_shape)]
if label_width > 1:
self.provide_label = [(label_name, (batch_size, label_width))]
else:
self.provide_label = [(label_name, (batch_size,))]
self.batch_size = batch_size
self.data_shape = data_shape
self.label_width = label_width
self.shuffle = shuffle
if self.imgrec is None:
self.seq = imgkeys
elif shuffle or num_parts > 1:
assert self.imgidx is not None
self.seq = self.imgidx
else:
self.seq = None
if num_parts > 1:
assert part_index < num_parts
N = len(self.seq)
C = N // num_parts
self.seq = self.seq[part_index * C:(part_index + 1) * C]
if aug_list is None:
self.auglist = CreateAugmenter(data_shape, **kwargs)
else:
self.auglist = aug_list
self.cur = 0
self._allow_read = True
self.last_batch_handle = last_batch_handle
self.num_image = len(self.seq) if self.seq is not None else None
self._cache_data = None
self._cache_label = None
self._cache_idx = None
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.last_batch_handle != 'roll_over' or \
self._cache_data is None:
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
if self._allow_read is False:
self._allow_read = True
def hard_reset(self):
"""Resets the iterator and ignore roll over data"""
if self.seq is not None and self.shuffle:
random.shuffle(self.seq)
if self.imgrec is not None:
self.imgrec.reset()
self.cur = 0
self._allow_read = True
self._cache_data = None
self._cache_label = None
self._cache_idx = None
def next_sample(self):
"""Helper function for reading in next sample."""
if self._allow_read is False:
raise StopIteration
if self.seq is not None:
if self.cur < self.num_image:
idx = self.seq[self.cur]
else:
if self.last_batch_handle != 'discard':
self.cur = 0
raise StopIteration
self.cur += 1
if self.imgrec is not None:
s = self.imgrec.read_idx(idx)
header, img = recordio.unpack(s)
if self.imglist is None:
return header.label, img
else:
return self.imglist[idx][0], img
else:
label, fname = self.imglist[idx]
return label, self.read_image(fname)
else:
s = self.imgrec.read()
if s is None:
if self.last_batch_handle != 'discard':
self.imgrec.reset()
raise StopIteration
header, img = recordio.unpack(s)
return header.label, img
def _batchify(self, batch_data, batch_label, start=0):
"""Helper function for batchifying data"""
i = start
batch_size = self.batch_size
try:
while i < batch_size:
label, s = self.next_sample()
data = self.imdecode(s)
try:
self.check_valid_image(data)
except RuntimeError as e:
logging.debug('Invalid image, skipping: %s', str(e))
continue
data = self.augmentation_transform(data)
assert i < batch_size, 'Batch size must be multiples of augmenter output length'
batch_data[i] = self.postprocess_data(data)
batch_label[i] = label
i += 1
except StopIteration:
if not i:
raise StopIteration
return i
def next(self):
"""Returns the next batch of data."""
batch_size = self.batch_size
c, h, w = self.data_shape
# if last batch data is rolled over
if self._cache_data is not None:
# check both the data and label have values
assert self._cache_label is not None, "_cache_label didn't have values"
assert self._cache_idx is not None, "_cache_idx didn't have values"
batch_data = self._cache_data
batch_label = self._cache_label
i = self._cache_idx
# clear the cache data
else:
batch_data = nd.empty((batch_size, c, h, w))
batch_label = nd.empty(self.provide_label[0][1])
i = self._batchify(batch_data, batch_label)
# calculate the padding
pad = batch_size - i
# handle padding for the last batch
if pad != 0:
if self.last_batch_handle == 'discard':
raise StopIteration
# if the option is 'roll_over', throw StopIteration and cache the data
elif self.last_batch_handle == 'roll_over' and \
self._cache_data is None:
self._cache_data = batch_data
self._cache_label = batch_label
self._cache_idx = i
raise StopIteration
else:
_ = self._batchify(batch_data, batch_label, i)
if self.last_batch_handle == 'pad':
self._allow_read = False
else:
self._cache_data = None
self._cache_label = None
self._cache_idx = None
return io.DataBatch([batch_data], [batch_label], pad=pad)
def check_data_shape(self, data_shape):
"""Checks if the input data shape is valid"""
if not len(data_shape) == 3:
raise ValueError('data_shape should have length 3, with dimensions CxHxW')
if not data_shape[0] == 3:
raise ValueError('This iterator expects inputs to have 3 channels.')
def check_valid_image(self, data):
"""Checks if the input data is valid"""
if len(data[0].shape) == 0:
raise RuntimeError('Data shape is wrong')
def imdecode(self, s):
"""Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details."""
def locate():
"""Locate the image file/index if decode fails."""
if self.seq is not None:
idx = self.seq[(self.cur % self.num_image) - 1]
else:
idx = (self.cur % self.num_image) - 1
if self.imglist is not None:
_, fname = self.imglist[idx]
msg = "filename: {}".format(fname)
else:
msg = "index: {}".format(idx)
return "Broken image " + msg
try:
img = imdecode(s)
except Exception as e:
raise RuntimeError("{}, {}".format(locate(), e))
return img
def read_image(self, fname):
"""Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
"""
with open(os.path.join(self.path_root, fname), 'rb') as fin:
img = fin.read()
return img
def augmentation_transform(self, data):
"""Transforms input data with specified augmentation."""
for aug in self.auglist:
data = aug(data)
return data
def postprocess_data(self, datum):
"""Final postprocessing step before image is loaded into the batch."""
return nd.transpose(datum, axes=(2, 0, 1))
| python/mxnet/image/image.py | 45,108 | Image Augmenter base class
Random brightness jitter augmentation.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
Cast to float32
Make center crop augmenter.
Parameters
----------
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Apply random brightness, contrast and saturation jitter in random order.
Parameters
----------
brightness : float
The brightness jitter ratio range, [0, 1]
contrast : float
The contrast jitter ratio range, [0, 1]
saturation : float
The saturation jitter ratio range, [0, 1]
Mean and std normalization.
Parameters
----------
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Random contrast jitter augmentation.
Parameters
----------
contrast : float
The contrast jitter ratio range, [0, 1]
Force resize to size regardless of aspect ratio
Parameters
----------
size : tuple of (int, int)
The desired size as in (width, height)
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Random horizontal flip.
Parameters
----------
p : float
Probability to flip image horizontally
Random hue jitter augmentation.
Parameters
----------
hue : float
The hue jitter ratio range, [0, 1]
Image data iterator with a large number of augmentation choices.
This iterator supports reading from both .rec files and raw image files.
To load input images from .rec files, use `path_imgrec` parameter and to load from raw image
files, use `path_imglist` and `path_root` parameters.
To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.
Parameters
----------
batch_size : int
Number of examples per batch.
data_shape : tuple
Data shape in (channels, height, width) format.
For now, only RGB image with 3 channels is supported.
label_width : int, optional
Number of labels per example. The default label width is 1.
path_imgrec : str
Path to image record file (.rec).
Created with tools/im2rec.py or bin/im2rec.
path_imglist : str
Path to image list (.lst).
Created with tools/im2rec.py or with custom script.
Format: Tab separated record of index, one or more labels and relative_path_from_root.
imglist: list
A list of images with the label(s).
Each item is a list [imagelabel: float or list of float, imgpath].
path_root : str
Root folder of image files.
path_imgidx : str
Path to image index file. Needed for partition and shuffling when using .rec source.
shuffle : bool
Whether to shuffle all images at the start of each iteration or not.
Can be slow for HDD.
part_index : int
Partition index.
num_parts : int
Total number of partitions.
data_name : str
Data name for provided symbols.
label_name : str
Label name for provided symbols.
dtype : str
Label data type. Default: float32. Other options: int32, int64, float64
last_batch_handle : str, optional
How to handle the last batch.
This parameter can be 'pad'(default), 'discard' or 'roll_over'.
If 'pad', the last batch will be padded with data starting from the begining
If 'discard', the last batch will be discarded
If 'roll_over', the remaining elements will be rolled over to the next iteration
kwargs : ...
More arguments for creating augmenter. See mx.image.CreateAugmenter.
Add PCA based noise.
Parameters
----------
alphastd : float
Noise level
eigval : 3x1 np.array
Eigen values
eigvec : 3x3 np.array
Eigen vectors
Make random crop augmenter
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Randomly convert to gray image.
Parameters
----------
p : float
Probability to convert to grayscale
Apply list of augmenters in random order
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in random order
Make random crop with random resizing and random aspect ratio jitter augmenter.
Parameters
----------
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Make resize shorter edge to size augmenter.
Parameters
----------
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Random saturation jitter augmentation.
Parameters
----------
saturation : float
The saturation jitter ratio range, [0, 1]
Composing a sequential augmenter list.
Parameters
----------
ts : list of augmenters
A series of augmenters to be applied in sequential order.
Creates an augmenter list.
Parameters
----------
data_shape : tuple of int
Shape for output data
resize : int
Resize shorter edge if larger than 0 at the begining
rand_crop : bool
Whether to enable random cropping other than center crop
rand_resize : bool
Whether to enable random sized cropping, require rand_crop to be enabled
rand_gray : float
[0, 1], probability to convert to grayscale for all channels, the number
of channels will not be reduced to 1
rand_mirror : bool
Whether to apply horizontal flip to image with probability 0.5
mean : np.ndarray or None
Mean pixel values for [r, g, b]
std : np.ndarray or None
Standard deviations for [r, g, b]
brightness : float
Brightness jittering range (percent)
contrast : float
Contrast jittering range (percent)
saturation : float
Saturation jittering range (percent)
hue : float
Hue jittering range (percent)
pca_noise : float
Pca noise level (percent)
inter_method : int, default=2(Area-based)
Interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
Examples
--------
>>> # An example of creating multiple augmenters
>>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,
... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,
... saturation=0.125, pca_noise=0.05, inter_method=10)
>>> # dump the details
>>> for aug in augs:
... aug.dumps()
Abstract implementation body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body.
Using approximate linear transfomation described in:
https://beesbuzz.biz/code/hsv_color_transforms.php
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Augmenter body
Helper function for batchifying data
Get the interpolation method for resize functions.
The major purpose of this function is to wrap a random interp method selection
and a auto-estimation method.
Parameters
----------
interp : int
interpolation method for all resizing operations
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
sizes : tuple of int
(old_height, old_width, new_height, new_width), if None provided, auto(9)
will return Area(2) anyway.
Returns
-------
int
interp method from 0 to 4
Transforms input data with specified augmentation.
Crops the image `src` to the given `size` by trimming on all four
sides and preserving the center of the image. Upsamples if `src` is smaller
than `size`.
.. note:: This requires MXNet to be compiled with USE_OPENCV.
Parameters
----------
src : NDArray
Binary source image data.
size : list or tuple of int
The desired output image size.
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
The cropped image.
Tuple
(x, y, width, height) where x, y are the positions of the crop in the
original image and width, height the dimensions of the crop.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.image.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))
>>> cropped_image
<NDArray 500x1000x3 @cpu(0)>
>>> x, y, width, height
(1241, 910, 1000, 500)
Checks if the input data shape is valid
Checks if the input data is valid
Normalize src with mean and std.
Parameters
----------
src : NDArray
Input image
mean : NDArray
RGB mean to be subtracted
std : NDArray
RGB standard deviation to be divided
Returns
-------
NDArray
An `NDArray` containing the normalized image.
Saves the Augmenter to string
Returns
-------
str
JSON formatted string that describes the Augmenter.
Override the default to avoid duplicate dump.
Override the default to avoid duplicate dump.
Crop src at fixed location, and (optionally) resize it to size.
Parameters
----------
src : NDArray
Input image
x0 : int
Left boundary of the cropping area
y0 : int
Top boundary of the cropping area
w : int
Width of the cropping area
h : int
Height of the cropping area
size : tuple of (w, h)
Optional, resize to new size after cropping
interp : int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Resets the iterator and ignore roll over data
Decode an image to an NDArray.
Note: `imdecode` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
buf : str/bytes or numpy.ndarray
Binary image data as string or numpy ndarray.
flag : int, optional, default=1
1 for three channel color output. 0 for grayscale output.
to_rgb : int, optional, default=1
1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, flag=0)
>>> image
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> with open("flower.jpg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image, to_rgb=0)
>>> image
<NDArray 224x224x3 @cpu(0)>
Decodes a string or byte string to an NDArray.
See mx.img.imdecode for more details.
Read and decode an image to an NDArray.
Note: `imread` uses OpenCV (not the CV2 Python library).
MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.
Parameters
----------
filename : str
Name of the image file to be loaded.
flag : {0, 1}, default 1
1 for three channel color output. 0 for grayscale output.
to_rgb : bool, default True
True for RGB formatted output (MXNet default).
False for BGR formatted output (OpenCV default).
out : NDArray, optional
Output buffer. Use `None` for automatic allocation.
Returns
-------
NDArray
An `NDArray` containing the image.
Example
-------
>>> mx.img.imread("flower.jpg")
<NDArray 224x224x3 @cpu(0)>
Set `flag` parameter to 0 to get grayscale output
>>> mx.img.imread("flower.jpg", flag=0)
<NDArray 224x224x1 @cpu(0)>
Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)
>>> mx.img.imread("flower.jpg", to_rgb=0)
<NDArray 224x224x3 @cpu(0)>
Locate the image file/index if decode fails.
Returns the next batch of data.
Helper function for reading in next sample.
Final postprocessing step before image is loaded into the batch.
Randomly crop `src` with `size` (width, height).
Upsample result if `src` is smaller than `size`.
Parameters
----------
src: Source image `NDArray`
size: Size of the crop formatted as (width, height). If the `size` is larger
than the image, then the source image is upsampled to `size` and returned.
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Example
-------
>>> im = mx.nd.array(cv2.imread("flower.jpg"))
>>> cropped_im, rect = mx.image.random_crop(im, (100, 100))
>>> print cropped_im
<NDArray 100x100x1 @cpu(0)>
>>> print rect
(20, 21, 100, 100)
Randomly crop src with size. Randomize area and aspect ratio.
Parameters
----------
src : NDArray
Input image
size : tuple of (int, int)
Size of the crop formatted as (width, height).
area : float in (0, 1] or tuple of (float, float)
If tuple, minimum area and maximum area to be maintained after cropping
If float, minimum area to be maintained after cropping, maximum area is set to 1.0
ratio : tuple of (float, float)
Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)
interp: int, optional, default=2
Interpolation method. See resize_short for details.
Returns
-------
NDArray
An `NDArray` containing the cropped image.
Tuple
A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the
original image and (width, height) are the dimensions of the cropped image.
Reads an input image `fname` and returns the decoded raw bytes.
Example usage:
----------
>>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.
Resets the iterator to the beginning of the data.
Resizes shorter edge to size.
Note: `resize_short` uses OpenCV (not the CV2 Python library).
MXNet must have been built with OpenCV for `resize_short` to work.
Resizes the original image by setting the shorter edge to size
and setting the longer edge accordingly.
Resizing function is called from OpenCV.
Parameters
----------
src : NDArray
The original image.
size : int
The length to be set for the shorter edge.
interp : int, optional, default=2
Interpolation method used for resizing the image.
Possible values:
0: Nearest Neighbors Interpolation.
1: Bilinear interpolation.
2: Area-based (resampling using pixel area relation). It may be a
preferred method for image decimation, as it gives moire-free
results. But when the image is zoomed, it is similar to the Nearest
Neighbors method. (used by default).
3: Bicubic interpolation over 4x4 pixel neighborhood.
4: Lanczos interpolation over 8x8 pixel neighborhood.
9: Cubic for enlarge, area for shrink, bilinear for others
10: Random select from interpolation method metioned above.
Note:
When shrinking an image, it will generally look best with AREA-based
interpolation, whereas, when enlarging an image, it will generally look best
with Bicubic (slow) or Bilinear (faster but still looks OK).
More details can be found in the documentation of OpenCV, please refer to
http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.
Returns
-------
NDArray
An 'NDArray' containing the resized image.
Example
-------
>>> with open("flower.jpeg", 'rb') as fp:
... str_image = fp.read()
...
>>> image = mx.img.imdecode(str_image)
>>> image
<NDArray 2321x3482x3 @cpu(0)>
>>> size = 640
>>> new_image = mx.img.resize_short(image, size)
>>> new_image
<NDArray 2321x3482x3 @cpu(0)>
Scales down crop size if it's larger than image size.
If width/height of the crop is larger than the width/height of the image,
sets the width/height to the width/height of the image.
Parameters
----------
src_size : tuple of int
Size of the image in (width, height) format.
size : tuple of int
Size of the crop in (width, height) format.
Returns
-------
tuple of int
A tuple containing the scaled crop size in (width, height) format.
Example
--------
>>> src_size = (640,480)
>>> size = (720,120)
>>> new_size = mx.img.scale_down(src_size, size)
>>> new_size
(640,106)
Read individual image files and perform augmentations.
Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements fall back to center_crop pylint: disable=redefined-variable-type pylint: disable=redefined-variable-type pylint: disable=redefined-variable-type if last batch data is rolled over check both the data and label have values clear the cache data calculate the padding handle padding for the last batch if the option is 'roll_over', throw StopIteration and cache the data | 19,440 | en | 0.637788 |
# coding: utf-8
"""
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the \"watch to old error\" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v3.6.0-alpha.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import openshift.client
from kubernetes.client.rest import ApiException
from openshift.client.models.v1beta1_cpu_target_utilization import V1beta1CPUTargetUtilization
class TestV1beta1CPUTargetUtilization(unittest.TestCase):
""" V1beta1CPUTargetUtilization unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1CPUTargetUtilization(self):
"""
Test V1beta1CPUTargetUtilization
"""
model = openshift.client.models.v1beta1_cpu_target_utilization.V1beta1CPUTargetUtilization()
if __name__ == '__main__':
unittest.main()
| openshift/test/test_v1beta1_cpu_target_utilization.py | 4,258 | V1beta1CPUTargetUtilization unit test stubs
Test V1beta1CPUTargetUtilization
OpenShift API (with Kubernetes)
OpenShift provides builds, application lifecycle, image content management, and administrative policy on top of Kubernetes. The API allows consistent management of those objects. All API operations are authenticated via an Authorization bearer token that is provided for service accounts as a generated secret (in JWT form) or via the native OAuth endpoint located at /oauth/authorize. Core infrastructure components may use openshift.client certificates that require no authentication. All API operations return a 'resourceVersion' string that represents the version of the object in the underlying storage. The standard LIST operation performs a snapshot read of the underlying objects, returning a resourceVersion representing a consistent version of the listed objects. The WATCH operation allows all updates to a set of objects after the provided resourceVersion to be observed by a openshift.client. By listing and beginning a watch from the returned resourceVersion, openshift.clients may observe a consistent view of the state of one or more objects. Note that WATCH always returns the update after the provided resourceVersion. Watch may be extended a limited time in the past - using etcd 2 the watch window is 1000 events (which on a large cluster may only be a few tens of seconds) so openshift.clients must explicitly handle the "watch to old error" by re-listing. Objects are divided into two rough categories - those that have a lifecycle and must reflect the state of the cluster, and those that have no state. Objects with lifecycle typically have three main sections: * 'metadata' common to all objects * a 'spec' that represents the desired state * a 'status' that represents how much of the desired state is reflected on the cluster at the current time Objects that have no state have 'metadata' but may lack a 'spec' or 'status' section. Objects are divided into those that are namespace scoped (only exist inside of a namespace) and those that are cluster scoped (exist outside of a namespace). A namespace scoped resource will be deleted when the namespace is deleted and cannot be created if the namespace has not yet been created or is in the process of deletion. Cluster scoped resources are typically only accessible to admins - resources like nodes, persistent volumes, and cluster policy. All objects have a schema that is a combination of the 'kind' and 'apiVersion' fields. This schema is additive only for any given version - no backwards incompatible changes are allowed without incrementing the apiVersion. The server will return and accept a number of standard responses that share a common schema - for instance, the common error type is 'unversioned.Status' (described below) and will be returned on any error from the API server. The API is available in multiple serialization formats - the default is JSON (Accept: application/json and Content-Type: application/json) but openshift.clients may also use YAML (application/yaml) or the native Protobuf schema (application/vnd.kubernetes.protobuf). Note that the format of the WATCH API call is slightly different - for JSON it returns newline delimited objects while for Protobuf it returns length-delimited frames (4 bytes in network-order) that contain a 'versioned.Watch' Protobuf object. See the OpenShift documentation at https://docs.openshift.org for more information.
OpenAPI spec version: v3.6.0-alpha.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
coding: utf-8 | 3,605 | en | 0.87757 |
import warnings
import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair
from mmdet.core.anchor.builder import ANCHOR_GENERATORS
from mmdet.core.anchor import AnchorGenerator
@ANCHOR_GENERATORS.register_module(force=True)
class SSDAnchorGenerator(AnchorGenerator):
"""Anchor generator for SSD
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
basesize_ratio_range (tuple(float)): Ratio range of anchors.
input_size (int): Size of feature map, 300 for SSD300,
512 for SSD512.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
"""
def __init__(self,
strides,
ratios,
basesize_ratio_range,
input_size=300,
scale_major=True):
assert len(strides) == len(ratios)
assert mmcv.is_tuple_of(basesize_ratio_range, float)
self.strides = [_pair(stride) for stride in strides]
self.input_size = max(input_size) if isinstance(input_size, (list,tuple)) else input_size
self.centers = [(stride[0] / 2., stride[1] / 2.)
for stride in self.strides]
self.basesize_ratio_range = basesize_ratio_range
# calculate anchor ratios and sizes
min_ratio, max_ratio = basesize_ratio_range
min_ratio = int(min_ratio * 100)
max_ratio = int(max_ratio * 100)
step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
min_sizes = []
max_sizes = []
for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
min_sizes.append(int(self.input_size * ratio / 100))
max_sizes.append(int(self.input_size * (ratio + step) / 100))
if self.input_size == 300:
if basesize_ratio_range[0] == 0.15: # SSD300 COCO
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
elif basesize_ratio_range[0] == 0.2: # SSD300 VOC
min_sizes.insert(0, int(self.input_size * 10 / 100))
max_sizes.insert(0, int(self.input_size * 20 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn(
'according to original SSD, basesize_ratio_range[0] should be either 0.15'
'or 0.2 when input_size is 300, got '
f'{basesize_ratio_range[0]}.')
elif self.input_size == 512:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
elif basesize_ratio_range[0] == 0.15: # SSD512 VOC
min_sizes.insert(0, int(self.input_size * 7 / 100))
max_sizes.insert(0, int(self.input_size * 15 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
warnings.warn('according to original SSD, basesize_ratio_range[0] should be either 0.1'
'or 0.15 when input_size is 512, got'
f' {basesize_ratio_range[0]}.')
else:
if basesize_ratio_range[0] == 0.1: # SSD512 COCO
min_sizes.insert(0, int(self.input_size * 4 / 100))
max_sizes.insert(0, int(self.input_size * 10 / 100))
else:
min_sizes.insert(0, int(self.input_size * basesize_ratio_range[0] * 0.4))
max_sizes.insert(0, int(self.input_size * basesize_ratio_range[0]))
anchor_ratios = []
anchor_scales = []
for k in range(len(self.strides)):
scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
anchor_ratio = [1.]
for r in ratios[k]:
anchor_ratio += [1 / r, r] # 4 or 6 ratio
anchor_ratios.append(torch.Tensor(anchor_ratio))
anchor_scales.append(torch.Tensor(scales))
self.base_sizes = min_sizes
self.scales = anchor_scales
self.ratios = anchor_ratios
self.scale_major = scale_major
self.center_offset = 0
self.base_anchors = self.gen_base_anchors()
# added for proto export
self.min_sizes = min_sizes
self.max_sizes = max_sizes
def gen_base_anchors(self):
"""Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple \
feature levels.
"""
multi_level_base_anchors = []
for i, base_size in enumerate(self.base_sizes):
base_anchors = self.gen_single_level_base_anchors(
base_size,
scales=self.scales[i],
ratios=self.ratios[i],
center=self.centers[i])
indices = list(range(len(self.ratios[i])))
indices.insert(1, len(indices))
base_anchors = torch.index_select(base_anchors, 0,
torch.LongTensor(indices))
multi_level_base_anchors.append(base_anchors)
return multi_level_base_anchors
def __repr__(self):
"""str: a string that describes the module"""
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}strides={self.strides},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}scale_major={self.scale_major},\n'
repr_str += f'{indent_str}input_size={self.input_size},\n'
repr_str += f'{indent_str}scales={self.scales},\n'
repr_str += f'{indent_str}ratios={self.ratios},\n'
repr_str += f'{indent_str}num_levels={self.num_levels},\n'
repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
repr_str += f'{indent_str}basesize_ratio_range='
repr_str += f'{self.basesize_ratio_range})'
return repr_str
| xmmdet/core/anchor/anchor_generator.py | 6,591 | Anchor generator for SSD
Args:
strides (list[int] | list[tuple[int, int]]): Strides of anchors
in multiple feature levels.
ratios (list[float]): The list of ratios between the height and width
of anchors in a single level.
basesize_ratio_range (tuple(float)): Ratio range of anchors.
input_size (int): Size of feature map, 300 for SSD300,
512 for SSD512.
scale_major (bool): Whether to multiply scales first when generating
base anchors. If true, the anchors in the same row will have the
same scales. It is always set to be False in SSD.
str: a string that describes the module
Generate base anchors.
Returns:
list(torch.Tensor): Base anchors of a feature grid in multiple feature levels.
calculate anchor ratios and sizes SSD300 COCO SSD300 VOC SSD512 COCO SSD512 VOC SSD512 COCO 4 or 6 ratio added for proto export | 902 | en | 0.613184 |
from __future__ import absolute_import
"""This module offers a display and interaction frontend with Qt.
It will try importing PySide first, and if that fails PyQt. The code will
constantly be tested with both bindings."""
from .displaywidgets import DisplayWidget, NewDisplayWidget
from .control import ControlWidget
#from .mainwin import ZasimMainWindow
display_objects = []
class ZasimDisplay(object):
simulator = None
"""The `Simulator` object for this display."""
display = None
"""The `BaseDisplayWidget` in use."""
window = None
"""The `ZasimMainWindow` instance in use."""
control = None
"""The `ControlWidget` in use."""
def __init__(self, simulator):
"""Instantiate a Display (thas is: a window with a display widget and
simulation controls) from a simulator.
:param simulator: The simulator to use."""
self.simulator = simulator
if not self.display:
if 'tiles' in self.simulator.palette_info:
self.display = NewDisplayWidget(self.simulator)
else:
self.display = DisplayWidget(self.simulator)
if self.control is None:
self.control = ControlWidget(self.simulator)
from .mainwin import ZasimMainWindow
self.window = ZasimMainWindow(self.simulator, self.display, self.control)
display_objects.append(self.window)
self.window.show()
def set_scale(self, scale):
"""Sets the scale of the display component."""
self.display.set_scale(scale)
| zasim/gui/display.py | 1,565 | Instantiate a Display (thas is: a window with a display widget and
simulation controls) from a simulator.
:param simulator: The simulator to use.
Sets the scale of the display component.
from .mainwin import ZasimMainWindow | 225 | en | 0.656556 |
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer.utils import type_check
class ResizeImages3D(function_node.FunctionNode):
def __init__(self, output_shape):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
self.out_D = output_shape[2]
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.char == 'f',
x_type.ndim == 5
)
def forward(self, inputs):
x, = inputs
xp = cuda.get_array_module(x)
B, C, H, W, D = x.shape
u_1d = xp.linspace(0, W - 1, num=self.out_W)
v_1d = xp.linspace(0, H - 1, num=self.out_H)
t_1d = xp.linspace(0, D - 1, num=self.out_D)
grid = xp.meshgrid(u_1d, v_1d, t_1d)
u = grid[0].ravel()
v = grid[1].ravel()
t = grid[2].ravel()
u0 = xp.floor(u).astype(numpy.int32)
u0 = u0.clip(0, W - 2)
u1 = u0 + 1
v0 = xp.floor(v).astype(numpy.int32)
v0 = v0.clip(0, H - 2)
v1 = v0 + 1
t0 = xp.floor(t).astype(numpy.int32)
t0 = t0.clip(0, D - 2)
t1 = t0 + 1
# weights
w1 = (u1 - u) * (v1 - v) * (t1 - t)
w2 = (u - u0) * (v1 - v) * (t1 - t)
w3 = (u1 - u) * (v - v0) * (t1 - t)
w4 = (u - u0) * (v - v0) * (t1 - t)
w5 = (u1 - u) * (v1 - v) * (t - t0)
w6 = (u - u0) * (v1 - v) * (t - t0)
w7 = (u1 - u) * (v - v0) * (t - t0)
w8 = (u - u0) * (v - v0) * (t - t0)
w1 = w1.astype(x.dtype)
w2 = w2.astype(x.dtype)
w3 = w3.astype(x.dtype)
w4 = w4.astype(x.dtype)
w5 = w5.astype(x.dtype)
w6 = w6.astype(x.dtype)
w7 = w7.astype(x.dtype)
w8 = w8.astype(x.dtype)
y = (w1[None, None, :] * x[:, :, v0, u0, t0] +
w2[None, None, :] * x[:, :, v0, u1, t0] +
w3[None, None, :] * x[:, :, v1, u0, t0] +
w4[None, None, :] * x[:, :, v1, u1, t0] +
w5[None, None, :] * x[:, :, v0, u0, t1] +
w6[None, None, :] * x[:, :, v0, u1, t1] +
w7[None, None, :] * x[:, :, v1, u0, t1] +
w8[None, None, :] * x[:, :, v1, u1, t1])
y = y.reshape(B, C, self.out_H, self.out_W, self.out_D)
return y,
def backward(self, indexes, grad_outputs):
return ResizeImagesGrad3D(
self.inputs[0].shape,
(self.out_H, self.out_W, self.out_D)).apply(grad_outputs)
class ResizeImagesGrad3D(function_node.FunctionNode):
def __init__(self, input_shape, output_shape):
self.out_H = output_shape[0]
self.out_W = output_shape[1]
self.out_D = output_shape[2]
self.input_shape = input_shape
def check_type_forward(self, in_types):
n_in = in_types.size()
type_check.expect(n_in == 1)
x_type = in_types[0]
type_check.expect(
x_type.dtype.char == 'f',
x_type.ndim == 5
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
gy, = inputs
B, C, H, W, D = self.input_shape
u_1d = xp.linspace(0, W - 1, num=self.out_W)
v_1d = xp.linspace(0, H - 1, num=self.out_H)
t_1d = xp.linspace(0, D - 1, num=self.out_D)
grid = xp.meshgrid(u_1d, v_1d, t_1d)
u = grid[0].ravel()
v = grid[1].ravel()
t = grid[2].ravel()
u0 = xp.floor(u).astype(numpy.int32)
u0 = u0.clip(0, W - 2)
u1 = u0 + 1
v0 = xp.floor(v).astype(numpy.int32)
v0 = v0.clip(0, H - 2)
v1 = v0 + 1
t0 = xp.floor(t).astype(numpy.int32)
t0 = t0.clip(0, D - 2)
t1 = t0 + 1
# weights
wu0 = u - u0
wu1 = u1 - u
wv0 = v - v0
wv1 = v1 - v
wt0 = t - t0
wt1 = t1 - t
wu0 = wu0.astype(gy.dtype)
wu1 = wu1.astype(gy.dtype)
wv0 = wv0.astype(gy.dtype)
wv1 = wv1.astype(gy.dtype)
wt0 = wt0.astype(gy.dtype)
wt1 = wt1.astype(gy.dtype)
# --- gx
if xp is numpy:
scatter_add = numpy.add.at
else:
scatter_add = cuda.cupyx.scatter_add
gx = xp.zeros(self.input_shape, dtype=gy.dtype)
gy = gy.reshape(B, C, -1)
scatter_add(gx, (slice(None), slice(None), v0, u0, t0),
gy * wu1 * wv1 * wt1)
scatter_add(gx, (slice(None), slice(None), v0, u1, t0),
gy * wu0 * wv1 * wt1)
scatter_add(gx, (slice(None), slice(None), v1, u0, t0),
gy * wu1 * wv0 * wt1)
scatter_add(gx, (slice(None), slice(None), v1, u1, t0),
gy * wu0 * wv0 * wt1)
scatter_add(gx, (slice(None), slice(None), v0, u0, t1),
gy * wu1 * wv1 * wt0)
scatter_add(gx, (slice(None), slice(None), v0, u1, t1),
gy * wu0 * wv1 * wt0)
scatter_add(gx, (slice(None), slice(None), v1, u0, t1),
gy * wu1 * wv0 * wt0)
scatter_add(gx, (slice(None), slice(None), v1, u1, t1),
gy * wu0 * wv0 * wt0)
return gx,
def backward(self, indexes, grad_outputs):
return ResizeImages3D(
(self.out_H, self.out_W, self.out_D)).apply(grad_outputs)
def resize_images_3d(x, output_shape):
"""Resize images to the given shape.
This function resizes 3D data to :obj:`output_shape`.
Currently, only bilinear interpolation is supported as the sampling method.
Notatition: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the
input image, respectively.
- :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth
of the output image.
Args:
x (~chainer.Variable):
Input variable of shape :math:`(n, c_I, h, w, d)`.
output_shape (tuple):
This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.
Returns:
~chainer.Variable: Resized image whose shape is \
:math:`(n, c_I, h_O, w_O, d_O)`.
"""
return ResizeImages3D(output_shape).apply((x,))[0]
| src/links/model/resize_images_3d.py | 6,408 | Resize images to the given shape.
This function resizes 3D data to :obj:`output_shape`.
Currently, only bilinear interpolation is supported as the sampling method.
Notatition: here is a notation for dimensionalities.
- :math:`n` is the batch size.
- :math:`c_I` is the number of the input channels.
- :math:`h`, :math:`w` and :math:`d` are the height, width and depth of the
input image, respectively.
- :math:`h_O`, :math:`w_O` and :math:`d_0` are the height, width and depth
of the output image.
Args:
x (~chainer.Variable):
Input variable of shape :math:`(n, c_I, h, w, d)`.
output_shape (tuple):
This is a tuple of length 3 whose values are :obj:`(h_O, w_O, d_O)`.
Returns:
~chainer.Variable: Resized image whose shape is :math:`(n, c_I, h_O, w_O, d_O)`.
weights weights --- gx | 823 | en | 0.806657 |
# coding=utf-8
'''
author: ShiLei Miao
analyses and build model about NBA
'''
import numpy as np
from numpy import *
import pandas as pd
from pandas import *
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.cross_validation import KFold
from sklearn import metrics
os.chdir(r'E:\PycharmProjects\Rong360\dta')
def loadDataSetT(path):
data = pd.read_csv(path)
dataSet = data.values[0:,2:]
dataLabel = data.values[0:,1:2]
return dataSet,dataLabel
def transLabel(Mat_Labels):
labels = []
for item in Mat_Labels:
labels.append(item[0])
labels = array(labels)
return labels
def P_YYYY(N_train, target_train, N_test, target_test):
clf = RandomForestClassifier(n_estimators=300, random_state=520341, max_depth=9,\
min_samples_split=3, class_weight='balanced_subsample')
clf = clf.fit(N_train, target_train)
pred = clf.predict_proba(N_test)
pred = DataFrame(pred)[0].values
N_auc = metrics.roc_auc_score(target_test, 1 - pred)
print N_auc
print '\n'
return N_auc, clf
def preds_calculate(Mat_Train,Mat_Labels):
kf = KFold(len(Mat_Train), n_folds=10)
NN_auc = []
for train_index, test_index in kf:
X_train, X_test = Mat_Train[train_index], Mat_Train[test_index]
y_train, y_test = Mat_Labels[train_index], Mat_Labels[test_index]
N_auc, clf = P_YYYY(X_train, y_train, X_test, y_test)
NN_auc.append(N_auc)
mean_auc = mean(NN_auc)
print 'AUC均值:',mean_auc
return mean_auc, clf
# 训练集
S_train_user_info = pd.read_csv(r'Generate_dta\S_train_user_info.csv')
N_train_user_info = pd.read_csv(r'Generate_dta\N_train_user_info.csv').drop(['lable'],axis=1)
relation1_train = pd.read_csv(r'Generate_dta\0909relation1_train.csv')
relation2_train = pd.read_csv(r'Generate_dta\0909relation2_train.csv')
N_train_consumption1 = pd.read_csv(r'Generate_dta\N_train_consumption1.csv').drop(['lable'],axis=1)
t_consumption = pd.read_csv(r'Generate_dta\t_consumption.csv')
#rong_tag 没有使用 【下面的数据是one-hot后的特征】
rong_tag_train = pd.read_csv(r'Generate_dta\N_rong_tag_train.csv').drop(['lable'],axis=1)
N_rong_tag_train_var = pd.read_excel(r'Stat_importance_var.xls')
N_rong_tag_train_var = N_rong_tag_train_var[N_rong_tag_train_var['Importance']>10]
N_rong_tag_train = rong_tag_train.reindex(columns = N_rong_tag_train_var['Feature'].values)
N_rong_tag_train['user_id'] = rong_tag_train['user_id']
N_rong_tag_train = N_rong_tag_train.replace([None], [-1])
train = merge(S_train_user_info,N_train_user_info,how="left", left_on='user_id', right_on='user_id')
train = merge(train,relation1_train,how="left", left_on='user_id', right_on='user_id')
train = merge(train,relation2_train,how="left", left_on='user_id', right_on='user_id')
train = merge(train,N_train_consumption1,how="left", left_on='user_id', right_on='user_id')
train = merge(train,t_consumption,how="left", left_on='user_id', right_on='user_id')
train = train.replace([None], [-1])
train['category_null'] = (train<0).sum(axis=1)
## 在统计的train跟test缺失的情况后,选择剔除用户的特征缺失个数为187的【基本都是product_id=2】
train = train[train['category_null'] < 187]
train = DataFrame(train.values,columns=train.columns)
train = merge(train,N_rong_tag_train,how="left", left_on='user_id', right_on='user_id')
Mat_Train = train.drop(['user_id','lable','category_null'],axis=1)
Mat_Train = array(Mat_Train)
Mat_Label = train['lable'].astype(int)
mean_auc, clf = preds_calculate(Mat_Train,Mat_Label)
| Procedure/2_M1/train/m2-cv-rf.py | 3,732 | coding=utf-8 训练集rong_tag 没有使用 【下面的数据是one-hot后的特征】 在统计的train跟test缺失的情况后,选择剔除用户的特征缺失个数为187的【基本都是product_id=2】 | 107 | zh | 0.980853 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib.auth.models import User
from django.db import models
class Post(models.Model):
status_ITEMS = (
(1, '上线'),
(2, '草稿'),
(3, '删除'),
)
title = models.CharField(max_length=50, verbose_name='标题')
desc = models.CharField(max_length=255, blank=True, verbose_name='摘要')
category = models.ForeignKey('Category', verbose_name='分类')
tags = models.ManyToManyField('Tag', related_name="posts", verbose_name='标签')
content = models.TextField(verbose_name='内容', help_text='注:目前仅支持Markdown格式')
status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')
owner = models.ForeignKey(User, verbose_name='作者')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
lasted_update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')
def status_show(self):
return '当前状态:%s'%(self.status)
status_show.short_description = '展示站台'
def __unicode__(self):
return self.title
class Meta:
verbose_name = verbose_name_plural = '文章'
class Category(models.Model):
status_ITEMS = (
(1, '可用'),
(2, '删除'),
)
name = models.CharField(max_length=50,verbose_name='名称')
status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')
owner = models.ForeignKey(User, verbose_name='作者')
is_nav = models.BooleanField(default=False, verbose_name="是否为导航")
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
# parent = models.ForeignKey('Category', verbose_name='分类')
def __unicode__(self):
return self.name
class Meta:
verbose_name = verbose_name_plural = '分类'
class Tag(models.Model):
status_ITEMS= (
(1, '正常'),
(2, '删除'),
)
name = models.CharField(max_length=50,verbose_name='名称')
status = models.PositiveIntegerField(default=1, choices=status_ITEMS, verbose_name='状态')
owner = models.ForeignKey(User, verbose_name='作者')
created_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
def __unicode__(self):
return self.name
class Meta:
verbose_name = verbose_name_plural = '标签'
| typeidea/blog/models.py | 2,307 | -*- coding: utf-8 -*- parent = models.ForeignKey('Category', verbose_name='分类') | 82 | en | 0.426176 |
#!/usr/bin/env python2.7
# Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT)
# Bespoke Link to Instruments and Small Satellites (BLISS)
#
# Copyright 2016, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any
# commercial use must be negotiated with the Office of Technology Transfer
# at the California Institute of Technology.
#
# This software may be subject to U.S. export control laws. By accepting
# this software, the user agrees to comply with all applicable U.S. export
# laws and regulations. User has the responsibility to obtain export licenses,
# or other export authority as may be required before exporting such
# information to foreign countries or providing access to foreign persons.
import time
import datetime
import mock
import os
import os.path
import nose
import nose.tools
import ait.core
from ait.core import dmc
LEAPSECOND_DATA_RESPONSE = '''#
# Updated through IERS Bulletin C55
# File expires on: 28 December 2018
#
#@ 3754944000
#
2272060800 10 # 1 Jan 1972
2287785600 11 # 1 Jul 1972
2303683200 12 # 1 Jan 1973
2335219200 13 # 1 Jan 1974
2366755200 14 # 1 Jan 1975
2398291200 15 # 1 Jan 1976
2429913600 16 # 1 Jan 1977
2461449600 17 # 1 Jan 1978
2492985600 18 # 1 Jan 1979
2524521600 19 # 1 Jan 1980
2571782400 20 # 1 Jul 1981
2603318400 21 # 1 Jul 1982
2634854400 22 # 1 Jul 1983
2698012800 23 # 1 Jul 1985
2776982400 24 # 1 Jan 1988
2840140800 25 # 1 Jan 1990
2871676800 26 # 1 Jan 1991
2918937600 27 # 1 Jul 1992
2950473600 28 # 1 Jul 1993
2982009600 29 # 1 Jul 1994
3029443200 30 # 1 Jan 1996
3076704000 31 # 1 Jul 1997
'''
class MockResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
def test_getTimestampUTC():
expected = time.strftime('%Y-%j', time.gmtime())
actual = time.strftime('%Y-%j', time.gmtime(dmc.getTimestampUTC()[0]))
assert actual == expected
def test_getUTCDatetimeDOY_w_days():
days = 1
t = datetime.datetime.utcnow() + datetime.timedelta(days=days)
timestamp = t.timetuple()
exp_year = timestamp.tm_year
exp_day = '%03d' % timestamp.tm_yday
dtime = dmc.getUTCDatetimeDOY(days=days).split('T')[0].split('-')
assert str(exp_year) == dtime[0]
assert str(exp_day) == dtime[1]
def test_leap_second_attrs():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
ls = dmc.LeapSeconds
ls._load_leap_second_data()
assert ls.leapseconds == ls._data['leapseconds']
assert ls.valid_date == ls._data['valid']
assert ls.get_current_GPS_offset() == ls.leapseconds[-1][-1]
@nose.tools.raises(ValueError)
def test_leap_second_by_date_invalid_gps_date():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
dmc.LeapSeconds._load_leap_second_data()
dmc.LeapSeconds.get_GPS_offset_for_date(datetime.datetime(1980, 1, 1))
def test_leap_second_by_date():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
ls = dmc.LeapSeconds
ls._load_leap_second_data()
assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 1, 1)) == 0
assert ls.get_GPS_offset_for_date(datetime.datetime(1981, 7, 1)) == 1
assert ls.get_GPS_offset_for_date(datetime.datetime(1982, 7, 1)) == 2
assert ls.get_GPS_offset_for_date(datetime.datetime(1983, 7, 1)) == 3
assert ls.get_GPS_offset_for_date(datetime.datetime(1985, 7, 1)) == 4
assert ls.get_GPS_offset_for_date(datetime.datetime(1988, 1, 1)) == 5
assert ls.get_GPS_offset_for_date(datetime.datetime(1990, 1, 1)) == 6
assert ls.get_GPS_offset_for_date(datetime.datetime(1991, 1, 1)) == 7
assert ls.get_GPS_offset_for_date(datetime.datetime(1992, 7, 1)) == 8
assert ls.get_GPS_offset_for_date(datetime.datetime(1993, 7, 1)) == 9
assert ls.get_GPS_offset_for_date(datetime.datetime(1994, 7, 1)) == 10
assert ls.get_GPS_offset_for_date(datetime.datetime(1996, 1, 1)) == 11
assert ls.get_GPS_offset_for_date(datetime.datetime(1997, 7, 1)) == 12
assert ls.get_GPS_offset_for_date(datetime.datetime(1999, 1, 1)) == 13
assert ls.get_GPS_offset_for_date(datetime.datetime(2006, 1, 1)) == 14
assert ls.get_GPS_offset_for_date(datetime.datetime(2009, 1, 1)) == 15
assert ls.get_GPS_offset_for_date(datetime.datetime(2012, 7, 1)) == 16
assert ls.get_GPS_offset_for_date(datetime.datetime(2015, 7, 1)) == 17
assert ls.get_GPS_offset_for_date(datetime.datetime(2017, 1, 1)) == 18
# Make sure not supplying a date returns the offset for the current date
assert (ls.get_GPS_offset_for_date(datetime.datetime.utcnow()) ==
ls.get_GPS_offset_for_date())
def test_leap_second_data_load():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "leapseconds.dat"
)
assert type(dmc.LeapSeconds.leapseconds) == type([])
assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1)
assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now())
@nose.tools.raises(ValueError)
@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400)))
def test_failed_leapsecond_load_and_update():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "invalidpath", "leapseconds.dat"
)
dmc.LeapSeconds._data = None
dmc.LeapSeconds._load_leap_second_data()
@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 200)))
def test_update_leap_second_data():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "tmp_leapseconds.out"
)
dmc.LeapSeconds._data = None
dmc.LeapSeconds._update_leap_second_data()
assert type(dmc.LeapSeconds.leapseconds) == type([])
assert dmc.LeapSeconds.leapseconds[0] == (datetime.datetime(1981, 7, 1), 1)
assert type(dmc.LeapSeconds.valid_date) == type(datetime.datetime.now())
assert os.path.isfile(ait.config.leapseconds.filename)
os.remove(ait.config.leapseconds.filename)
@nose.tools.raises(ValueError)
@mock.patch('requests.get', mock.MagicMock(return_value=MockResponse(LEAPSECOND_DATA_RESPONSE, 400)))
def test_unable_to_pull_leapsecond_data():
ait.config.leapseconds._config['filename'] = os.path.join(
os.path.dirname(__file__), "testdata", "dmc", "tmp_leapseconds.out"
)
dmc.LeapSeconds._data = None
dmc.LeapSeconds._update_leap_second_data()
if __name__ == '__main__':
nose.main()
| ait/core/test/test_dmc.py | 6,860 | !/usr/bin/env python2.7 Advanced Multi-Mission Operations System (AMMOS) Instrument Toolkit (AIT) Bespoke Link to Instruments and Small Satellites (BLISS) Copyright 2016, by the California Institute of Technology. ALL RIGHTS RESERVED. United States Government Sponsorship acknowledged. Any commercial use must be negotiated with the Office of Technology Transfer at the California Institute of Technology. This software may be subject to U.S. export control laws. By accepting this software, the user agrees to comply with all applicable U.S. export laws and regulations. User has the responsibility to obtain export licenses, or other export authority as may be required before exporting such information to foreign countries or providing access to foreign persons. Make sure not supplying a date returns the offset for the current date | 837 | en | 0.902081 |
# Copyright 2018 the GPflow authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.random import randn
import tensorflow as tf
import pytest
import gpflow
from gpflow import logdensities, settings
from gpflow.test_util import session_tf
from scipy.stats import multivariate_normal as mvn
from numpy.testing import assert_allclose
rng = np.random.RandomState(1)
@pytest.mark.parametrize("x", [randn(4,10), randn(4,1)])
@pytest.mark.parametrize("mu", [randn(4,10), randn(4,1)])
@pytest.mark.parametrize("cov_sqrt", [randn(4,4), np.eye(4)])
def test_multivariate_normal(session_tf, x, mu, cov_sqrt):
cov = np.dot(cov_sqrt, cov_sqrt.T)
L = np.linalg.cholesky(cov)
x_tf = tf.placeholder(settings.float_type)
mu_tf = tf.placeholder(settings.float_type)
gp_result = logdensities.multivariate_normal(
x_tf, mu_tf, tf.convert_to_tensor(L))
gp_result = session_tf.run(gp_result, feed_dict={x_tf: x, mu_tf: mu})
if mu.shape[1] > 1:
if x.shape[1] > 1:
sp_result = [mvn.logpdf(x[:,i], mu[:,i], cov) for i in range(mu.shape[1])]
else:
sp_result = [mvn.logpdf(x.ravel(), mu[:, i], cov) for i in range(mu.shape[1])]
else:
sp_result = mvn.logpdf(x.T, mu.ravel(), cov)
assert_allclose(gp_result, sp_result)
def test_shape_asserts(session_tf):
A = np.random.randn(5)
B = np.random.randn(5)
L = np.tril(np.random.randn(5, 5))
# Static shape check:
with pytest.raises(ValueError):
tA = tf.identity(A)
tB = tf.identity(B)
tL = tf.identity(L)
res = logdensities.multivariate_normal(tA, tB, tL)
# Dynamic shape check:
# the following results in a segfault before PR#964
with pytest.raises(tf.errors.InvalidArgumentError):
vA = tf.placeholder(tf.float64)
vB = tf.placeholder(tf.float64)
vL = tf.placeholder(tf.float64)
res = logdensities.multivariate_normal(vA, vB, vL)
session_tf.run(res, {vA: A, vB: B, vL: L})
| tests/test_logdensities.py | 2,521 | Copyright 2018 the GPflow authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Static shape check: Dynamic shape check: the following results in a segfault before PR964 | 642 | en | 0.857075 |
# -*- coding: utf-8 -*-
"""
wsproto/handshake
~~~~~~~~~~~~~~~~~~
An implementation of WebSocket handshakes.
"""
from collections import deque
from typing import Deque, Dict, Generator, List, Optional, Union
import h11
from .connection import Connection, ConnectionState, ConnectionType
from .events import AcceptConnection, Event, RejectConnection, RejectData, Request
from .extensions import Extension
from .typing import Headers
from .utilities import (
generate_accept_token,
generate_nonce,
LocalProtocolError,
normed_header_dict,
RemoteProtocolError,
split_comma_header,
)
# RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake
WEBSOCKET_VERSION = b"13"
class H11Handshake:
"""A Handshake implementation for HTTP/1.1 connections."""
def __init__(self, connection_type: ConnectionType) -> None:
self.client = connection_type is ConnectionType.CLIENT
self._state = ConnectionState.CONNECTING
if self.client:
self._h11_connection = h11.Connection(h11.CLIENT)
else:
self._h11_connection = h11.Connection(h11.SERVER)
self._connection: Optional[Connection] = None
self._events: Deque[Event] = deque()
self._initiating_request: Optional[Request] = None
self._nonce: Optional[bytes] = None
@property
def state(self) -> ConnectionState:
return self._state
@property
def connection(self) -> Optional[Connection]:
"""Return the established connection.
This will either return the connection or raise a
LocalProtocolError if the connection has not yet been
established.
:rtype: h11.Connection
"""
return self._connection
def initiate_upgrade_connection(self, headers: Headers, path: str) -> None:
"""Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path.
"""
if self.client:
raise LocalProtocolError(
"Cannot initiate an upgrade connection when acting as the client"
)
upgrade_request = h11.Request(method=b"GET", target=path, headers=headers)
h11_client = h11.Connection(h11.CLIENT)
self.receive_data(h11_client.send(upgrade_request))
def send(self, event: Event) -> bytes:
"""Send an event to the remote.
This will return the bytes to send based on the event or raise
a LocalProtocolError if the event is not valid given the
state.
:returns: Data to send to the WebSocket peer.
:rtype: bytes
"""
data = b""
if isinstance(event, Request):
data += self._initiate_connection(event)
elif isinstance(event, AcceptConnection):
data += self._accept(event)
elif isinstance(event, RejectConnection):
data += self._reject(event)
elif isinstance(event, RejectData):
data += self._send_reject_data(event)
else:
raise LocalProtocolError(
"Event {} cannot be sent during the handshake".format(event)
)
return data
def receive_data(self, data: bytes) -> None:
"""Receive data from the remote.
A list of events that the remote peer triggered by sending
this data can be retrieved with :meth:`events`.
:param bytes data: Data received from the WebSocket peer.
"""
self._h11_connection.receive_data(data)
while True:
try:
event = self._h11_connection.next_event()
except h11.RemoteProtocolError:
raise RemoteProtocolError(
"Bad HTTP message", event_hint=RejectConnection()
)
if (
isinstance(event, h11.ConnectionClosed)
or event is h11.NEED_DATA
or event is h11.PAUSED
):
break
if self.client:
if isinstance(event, h11.InformationalResponse):
if event.status_code == 101:
self._events.append(self._establish_client_connection(event))
else:
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=False,
)
)
self._state = ConnectionState.CLOSED
elif isinstance(event, h11.Response):
self._state = ConnectionState.REJECTING
self._events.append(
RejectConnection(
headers=event.headers,
status_code=event.status_code,
has_body=True,
)
)
elif isinstance(event, h11.Data):
self._events.append(
RejectData(data=event.data, body_finished=False)
)
elif isinstance(event, h11.EndOfMessage):
self._events.append(RejectData(data=b"", body_finished=True))
self._state = ConnectionState.CLOSED
else:
if isinstance(event, h11.Request):
self._events.append(self._process_connection_request(event))
def events(self) -> Generator[Event, None, None]:
"""Return a generator that provides any events that have been generated
by protocol activity.
:returns: a generator that yields H11 events.
"""
while self._events:
yield self._events.popleft()
############ Server mode methods
def _process_connection_request(self, event: h11.Request) -> Request:
if event.method != b"GET":
raise RemoteProtocolError(
"Request method must be GET", event_hint=RejectConnection()
)
connection_tokens = None
extensions: List[str] = []
host = None
key = None
subprotocols: List[str] = []
upgrade = b""
version = None
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
elif name == b"host":
host = value.decode("ascii")
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
extensions = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-key":
key = value
elif name == b"sec-websocket-protocol":
subprotocols = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-version":
version = value
elif name == b"upgrade":
upgrade = value
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if version != WEBSOCKET_VERSION:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'",
event_hint=RejectConnection(
headers=[(b"Sec-WebSocket-Version", WEBSOCKET_VERSION)],
status_code=426,
),
)
if key is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Key'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
if version is None:
raise RemoteProtocolError(
"Missing header, 'Sec-WebSocket-Version'", event_hint=RejectConnection()
)
self._initiating_request = Request(
extensions=extensions,
extra_headers=headers,
host=host,
subprotocols=subprotocols,
target=event.target.decode("ascii"),
)
return self._initiating_request
def _accept(self, event: AcceptConnection) -> bytes:
request_headers = normed_header_dict(self._initiating_request.extra_headers)
nonce = request_headers[b"sec-websocket-key"]
accept_token = generate_accept_token(nonce)
headers = [
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Accept", accept_token),
]
if event.subprotocol is not None:
if event.subprotocol not in self._initiating_request.subprotocols:
raise LocalProtocolError(
"unexpected subprotocol {}".format(event.subprotocol)
)
headers.append(
(b"Sec-WebSocket-Protocol", event.subprotocol.encode("ascii"))
)
if event.extensions:
accepts = server_extensions_handshake( # type: ignore
self._initiating_request.extensions, event.extensions
)
if accepts:
headers.append((b"Sec-WebSocket-Extensions", accepts))
response = h11.InformationalResponse(
status_code=101, headers=headers + event.extra_headers
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
event.extensions,
)
self._state = ConnectionState.OPEN
return self._h11_connection.send(response)
def _reject(self, event: RejectConnection) -> bytes:
if self.state != ConnectionState.CONNECTING:
raise LocalProtocolError(
"Connection cannot be rejected in state %s" % self.state
)
headers = event.headers
if not event.has_body:
headers.append((b"content-length", b"0"))
response = h11.Response(status_code=event.status_code, headers=headers)
data = self._h11_connection.send(response)
self._state = ConnectionState.REJECTING
if not event.has_body:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
def _send_reject_data(self, event: RejectData) -> bytes:
if self.state != ConnectionState.REJECTING:
raise LocalProtocolError(
"Cannot send rejection data in state {}".format(self.state)
)
data = self._h11_connection.send(h11.Data(data=event.data))
if event.body_finished:
data += self._h11_connection.send(h11.EndOfMessage())
self._state = ConnectionState.CLOSED
return data
############ Client mode methods
def _initiate_connection(self, request: Request) -> bytes:
self._initiating_request = request
self._nonce = generate_nonce()
headers = [
(b"Host", request.host.encode("ascii")),
(b"Upgrade", b"WebSocket"),
(b"Connection", b"Upgrade"),
(b"Sec-WebSocket-Key", self._nonce),
(b"Sec-WebSocket-Version", WEBSOCKET_VERSION),
]
if request.subprotocols:
headers.append(
(
b"Sec-WebSocket-Protocol",
(", ".join(request.subprotocols)).encode("ascii"),
)
)
if request.extensions:
offers = {e.name: e.offer() for e in request.extensions} # type: ignore
extensions = []
for name, params in offers.items():
name = name.encode("ascii")
if params is True:
extensions.append(name)
elif params:
extensions.append(
b"%s; %s" % (name, params.encode("ascii")) # type: ignore
)
if extensions:
headers.append((b"Sec-WebSocket-Extensions", b", ".join(extensions)))
upgrade = h11.Request(
method=b"GET",
target=request.target.encode("ascii"),
headers=headers + request.extra_headers,
)
return self._h11_connection.send(upgrade)
def _establish_client_connection(
self, event: h11.InformationalResponse
) -> AcceptConnection: # noqa: MC0001
accept = None
connection_tokens = None
accepts: List[str] = []
subprotocol = None
upgrade = b""
headers: Headers = []
for name, value in event.headers:
name = name.lower()
if name == b"connection":
connection_tokens = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-extensions":
accepts = split_comma_header(value)
continue # Skip appending to headers
elif name == b"sec-websocket-accept":
accept = value
continue # Skip appending to headers
elif name == b"sec-websocket-protocol":
subprotocol = value
continue # Skip appending to headers
elif name == b"upgrade":
upgrade = value
continue # Skip appending to headers
headers.append((name, value))
if connection_tokens is None or not any(
token.lower() == "upgrade" for token in connection_tokens
):
raise RemoteProtocolError(
"Missing header, 'Connection: Upgrade'", event_hint=RejectConnection()
)
if upgrade.lower() != b"websocket":
raise RemoteProtocolError(
"Missing header, 'Upgrade: WebSocket'", event_hint=RejectConnection()
)
accept_token = generate_accept_token(self._nonce)
if accept != accept_token:
raise RemoteProtocolError("Bad accept token", event_hint=RejectConnection())
if subprotocol is not None:
subprotocol = subprotocol.decode("ascii")
if subprotocol not in self._initiating_request.subprotocols:
raise RemoteProtocolError(
"unrecognized subprotocol {}".format(subprotocol),
event_hint=RejectConnection(),
)
extensions = client_extensions_handshake( # type: ignore
accepts, self._initiating_request.extensions
)
self._connection = Connection(
ConnectionType.CLIENT if self.client else ConnectionType.SERVER,
extensions,
self._h11_connection.trailing_data[0],
)
self._state = ConnectionState.OPEN
return AcceptConnection(
extensions=extensions, extra_headers=headers, subprotocol=subprotocol
)
def __repr__(self) -> str:
return "{}(client={}, state={})".format(
self.__class__.__name__, self.client, self.state
)
def server_extensions_handshake(
requested: List[str], supported: List[Extension]
) -> Optional[bytes]:
"""Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions
"""
accepts: Dict[str, Union[bool, bytes]] = {}
for offer in requested:
name = offer.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
accept = extension.accept(offer)
if accept is True:
accepts[extension.name] = True
elif accept is not False and accept is not None:
accepts[extension.name] = accept.encode("ascii") # type: ignore
if accepts:
extensions: List[bytes] = []
for name, params in accepts.items():
name = name.encode("ascii") # type: ignore
if params is True:
extensions.append(name) # type: ignore
else:
if params == b"":
extensions.append(b"%s" % (name))
else:
extensions.append(b"%s; %s" % (name, params))
return b", ".join(extensions)
return None
def client_extensions_handshake(
accepted: List[str], supported: List[Extension]
) -> List[Extension]:
# This raises RemoteProtocolError is the accepted extension is not
# supported.
extensions = []
for accept in accepted:
name = accept.split(";", 1)[0].strip()
for extension in supported:
if extension.name == name:
extension.finalize(accept)
extensions.append(extension)
break
else:
raise RemoteProtocolError(
"unrecognized extension {}".format(name), event_hint=RejectConnection()
)
return extensions
| wsproto/handshake.py | 17,527 | A Handshake implementation for HTTP/1.1 connections.
Return the established connection.
This will either return the connection or raise a
LocalProtocolError if the connection has not yet been
established.
:rtype: h11.Connection
Return a generator that provides any events that have been generated
by protocol activity.
:returns: a generator that yields H11 events.
Initiate an upgrade connection.
This should be used if the request has already be received and
parsed.
:param list headers: HTTP headers represented as a list of 2-tuples.
:param str path: A URL path.
Receive data from the remote.
A list of events that the remote peer triggered by sending
this data can be retrieved with :meth:`events`.
:param bytes data: Data received from the WebSocket peer.
Send an event to the remote.
This will return the bytes to send based on the event or raise
a LocalProtocolError if the event is not valid given the
state.
:returns: Data to send to the WebSocket peer.
:rtype: bytes
Agree on the extensions to use returning an appropriate header value.
This returns None if there are no agreed extensions
wsproto/handshake
~~~~~~~~~~~~~~~~~~
An implementation of WebSocket handshakes.
-*- coding: utf-8 -*- RFC6455, Section 4.2.1/6 - Reading the Client's Opening Handshake Server mode methods Skip appending to headers Skip appending to headers Skip appending to headers type: ignore Client mode methods type: ignore type: ignore noqa: MC0001 Skip appending to headers Skip appending to headers Skip appending to headers Skip appending to headers Skip appending to headers type: ignore type: ignore type: ignore type: ignore This raises RemoteProtocolError is the accepted extension is not supported. | 1,707 | en | 0.817603 |
import numpy as np
from astropy.io import fits
from scipy.interpolate import interp1d
# Fitting Sline3
def fit_spline3(y, x, order=3, nsum=3):
y_resampled = [np.median(y[i:i + nsum]) for i in range(0, len(y) - len(y) % nsum, nsum)]
x_resampled = np.linspace(0, len(y), len(y_resampled))
# Fitting
f = interp1d(x_resampled, y_resampled, kind=order, bounds_error=True)
# Return function to be constructed with any other x array
return f
# Local Minima and Maxima
def local_minmax(data, nmin=2, nmax=2):
# Identifying indices of local minima-maxima points
id_min = (np.gradient(np.sign(np.gradient(data))) > 0).nonzero()[0] # index of local min
id_max = (np.gradient(np.sign(np.gradient(data))) < 0).nonzero()[0] # index of local max
# Taking values at min/max points
list_min, list_max = data[id_min], data[id_max]
# Sorting minima-maxima values (bigger --> lower)
list_min, id_min = (list(p) for p in zip(*sorted(zip(list_min, id_min), reverse=False)))
list_max, id_max = (list(p) for p in zip(*sorted(zip(list_max, id_max), reverse=True)))
# Taking the desired number of local minima-maxima points
list_min, list_max, id_min, id_max = list_min[0:nmin], list_max[0:nmax], id_min[0:nmin], id_max[0:nmax]
return list_min, list_max, id_min, id_max
def trim_slitedge(flat, plot=True):
# Getting input data
ccddata = fits.getdata(flat, ignore_missing_end=True)
# Collapse flat in the dispersion direction
flat_collapsed = fits.getdata(flat, ignore_missing_end=True).sum(axis=1) / ccddata.shape[1]
lines = np.arange(0, flat_collapsed.size, 1)
# Excluding first pixels in the spatial direction
cut = 3
c_flat = flat_collapsed[cut:-cut]
c_lines = np.arange(0, c_flat.size, 1)
# Fittin cubic spline. It's working very well with order=5, nsum=2
func_splin3 = fit_spline3(c_flat, c_lines, order=5, nsum=2)
smooth_flat = func_splin3(c_lines)
# Compute 1st and flat smoothed
dy = np.gradient(smooth_flat)
dy2 = np.gradient(dy)
# Regions to compute local minina-maxima
# Region one: it represent first 40 percent of all data
# Region two: ... last 40%
pixa, pixb = int(len(c_flat) * 0.4), int(len(c_flat) * 0.6)
dy2_one, dy2_two = dy2[0:pixa], dy2[pixb:]
# Reg. 1: Compute local min/max of the 2nd derivative
list_min_1, list_max_1, id_min_1, id_max_1 = local_minmax(dy2_one, nmin=1, nmax=1)
list_min_2, list_max_2, id_min_2, id_max_2 = local_minmax(dy2_two, nmin=1, nmax=1)
# Indice have to be reshifted to the original indices of the function dy2
id_min_2 = np.array(id_min_2) + pixb
# Slit edges are the local maxima/minima 1/2 [accounting the cutted pixels]
slit_1, slit_2 = int(np.array(id_min_1) + cut), int(np.array(id_min_2) + cut)
print slit_1, slit_2
if plot is True:
import matplotlib.pyplot as plt
c_lines += cut
plt.plot(lines, flat_collapsed, 'k-', label='Flat Collapsed')
plt.plot(lines[slit_1:slit_2], flat_collapsed[slit_1:slit_2], 'r-', label = 'Cutted Flat')
plt.plot(c_lines, dy, 'g-', label="Dy/dx")
plt.plot(c_lines, dy2, 'y-', label="Dy2/dx")
plt.plot(slit_1, list_min_1, 'bo', label='Slit Edge 1 ')
plt.plot(slit_2, list_min_2, 'ro', label='Slit Edge 2')
plt.xlim(lines.min() - 50, lines.max() + 50)
plt.legend(loc='best')
plt.show()
return slit_1, slit_2
flat = '/home/davidsanm/PyCharmProjects/GoodmanDataReduction/2016-03-20/RED/master_flat_600.fits'
trim_slitedge(flat, plot = True) | trim_slitedge.py | 3,601 | Fitting Sline3 Fitting Return function to be constructed with any other x array Local Minima and Maxima Identifying indices of local minima-maxima points index of local min index of local max Taking values at min/max points Sorting minima-maxima values (bigger --> lower) Taking the desired number of local minima-maxima points Getting input data Collapse flat in the dispersion direction Excluding first pixels in the spatial direction Fittin cubic spline. It's working very well with order=5, nsum=2 Compute 1st and flat smoothed Regions to compute local minina-maxima Region one: it represent first 40 percent of all data Region two: ... last 40% Reg. 1: Compute local min/max of the 2nd derivative Indice have to be reshifted to the original indices of the function dy2 Slit edges are the local maxima/minima 1/2 [accounting the cutted pixels] | 847 | en | 0.74017 |
# -*- encoding: utf-8 -*-
# $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $
#
# This file is part of the pydns project.
# Homepage: http://pydns.sourceforge.net
#
# This code is covered by the standard Python License.
#
# __init__.py for DNS class.
__version__ = '2.3.1'
import Type,Opcode,Status,Class
from Base import DnsRequest, DNSError
from Lib import DnsResult
from Base import *
from Lib import *
Error=DNSError
from lazy import *
Request = DnsRequest
Result = DnsResult
from Serialization import Serialize,DeSerialize
#
# $Log: __init__.py,v $
# Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned
# utf-8 in __init__.py
#
# Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned
# Release 2.3.1
#
# Revision 1.8 2002/05/06 06:17:49 anthonybaxter
# found that the old README file called itself release 2.2. So make
# this one 2.3...
#
# Revision 1.7 2002/05/06 06:16:15 anthonybaxter
# make some sort of reasonable version string. releasewards ho!
#
# Revision 1.6 2002/03/19 13:05:02 anthonybaxter
# converted to class based exceptions (there goes the python1.4 compatibility :)
#
# removed a quite gross use of 'eval()'.
#
# Revision 1.5 2002/03/19 12:41:33 anthonybaxter
# tabnannied and reindented everything. 4 space indent, no tabs.
# yay.
#
# Revision 1.4 2001/11/26 17:57:51 stroeder
# Added __version__
#
# Revision 1.3 2001/08/09 09:08:55 anthonybaxter
# added identifying header to top of each file
#
# Revision 1.2 2001/07/19 06:57:07 anthony
# cvs keywords added
#
#
| tools/hipdnsproxy/DNS/__init__.py | 1,527 | -*- encoding: utf-8 -*- $Id: __init__.py,v 1.8.2.2 2007/05/22 21:06:52 customdesigned Exp $ This file is part of the pydns project. Homepage: http://pydns.sourceforge.net This code is covered by the standard Python License. __init__.py for DNS class. $Log: __init__.py,v $ Revision 1.8.2.2 2007/05/22 21:06:52 customdesigned utf-8 in __init__.py Revision 1.8.2.1 2007/05/22 20:39:20 customdesigned Release 2.3.1 Revision 1.8 2002/05/06 06:17:49 anthonybaxter found that the old README file called itself release 2.2. So make this one 2.3... Revision 1.7 2002/05/06 06:16:15 anthonybaxter make some sort of reasonable version string. releasewards ho! Revision 1.6 2002/03/19 13:05:02 anthonybaxter converted to class based exceptions (there goes the python1.4 compatibility :) removed a quite gross use of 'eval()'. Revision 1.5 2002/03/19 12:41:33 anthonybaxter tabnannied and reindented everything. 4 space indent, no tabs. yay. Revision 1.4 2001/11/26 17:57:51 stroeder Added __version__ Revision 1.3 2001/08/09 09:08:55 anthonybaxter added identifying header to top of each file Revision 1.2 2001/07/19 06:57:07 anthony cvs keywords added | 1,159 | en | 0.700518 |
import os
import sys
import setuptools
# To prevent importing about and thereby breaking the coverage info we use this
# exec hack
about = {}
with open('python_utils/__about__.py') as fp:
exec(fp.read(), about)
if os.path.isfile('README.rst'):
long_description = open('README.rst').read()
else:
long_description = 'See http://pypi.python.org/pypi/python-utils/'
needs_pytest = set(['ptr', 'pytest', 'test']).intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
if __name__ == '__main__':
setuptools.setup(
name='python-utils',
version=about['__version__'],
author=about['__author__'],
author_email=about['__author_email__'],
description=about['__description__'],
url=about['__url__'],
license='BSD',
packages=setuptools.find_packages(),
long_description=long_description,
install_requires=['six'],
tests_require=['pytest'],
setup_requires=[] + pytest_runner,
classifiers=['License :: OSI Approved :: BSD License'],
)
| setup.py | 1,077 | To prevent importing about and thereby breaking the coverage info we use this exec hack | 87 | en | 0.841132 |
import logging as log
import cv2
import sys
import numpy as np
class LandmarksDetectionModel:
'''
Class for the Face Landmarks Detection Model.
Load and configure inference plugins for the specified target devices,
and performs either synchronous or asynchronous modes for the
specified infer requests.
'''
def __init__(self, model_name, device='CPU', extensions=None, async_infer=True):
'''
Set instance variables.
'''
self.plugin = None
self.network = None
self.exec_network = None
self.infer_request_handle = None
self.input_blob = None
self.input_shape = None
self.output_blob = None
self.output_shape = None
self.model_name = model_name
self.device = device
self.extensions = extensions
self.async_infer = async_infer
def load_model(self, plugin):
'''
This method is for loading the model (in IR format) to the device specified by the user.
Default device is CPU.
'''
# Get model
model_structure = self.model_name + '.xml'
model_weights = self.model_name + '.bin'
# Initialize the plugin - load the inference engine API
# Plugin is the one already created for the Face Detection model
self.plugin = plugin
# Add a CPU extension, if applicable
if self.extensions and 'CPU' in self.device:
self.plugin.add_extension(self.extensions, self.device)
# Read the IR as IENetwork
try:
self.network = self.plugin.read_network(model=model_structure, weights=model_weights)
except:
raise ValueError("Could not initialise the network. Have you entered the correct model path?")
# Check if model and CPU plugin are supported
if self.device == 'CPU':
self.check_model()
# Load the IENetwork into the plugin
self.exec_network = self.plugin.load_network(network=self.network, device_name=self.device, num_requests=1)
# Get the input and output layers
self.input_blob = next(iter(self.network.inputs))
self.input_shape = self.network.inputs[self.input_blob].shape
self.output_blob = next(iter(self.network.outputs))
self.output_shape = self.network.outputs[self.output_blob].shape
return
def predict(self, image):
'''
This method is meant for running predictions on the input image.
'''
if np.all(np.array(image.shape)):
# Create input image to feed into the network
net_input = {self.input_blob: self.preprocess_input(image)}
# Start inference. Infer mode (async/sync) is input by user
if self.async_infer:
self.infer_request_handle = self.exec_network.start_async(request_id=0, inputs=net_input)
# Wait for the result of the inference
if self.exec_network.requests[0].wait(-1) == 0:
# Get result of the inference request
outputs = self.infer_request_handle.outputs[self.output_blob]
eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)
else:
self.infer_request_handle = self.exec_network.infer(inputs=net_input)
# Get result of the inference request
outputs = self.infer_request_handle[self.output_blob]
eyes_coords, crop_left, crop_right = self.preprocess_output(outputs, image)
else:
eyes_coords = []
crop_left = []
crop_right = []
return eyes_coords, crop_left, crop_right
def check_model(self):
'''
This method check whether the model (along with the plugin) is support on the CPU device.
If anything is missing (such as a CPU extension), let the user know and exit the programm.
'''
supported_layers = self.plugin.query_network(network=self.network, device_name='CPU')
unsupported_layers = [l for l in self.network.layers.keys() if l not in supported_layers]
if len(unsupported_layers) != 0:
log.error("Unsupported layers found: {}".format(unsupported_layers))
if self.extensions:
log.error("The extensions specified do not support some layers. Please specify a new extension.")
else:
log.error(
"Please try to specify an extension library path by using the --extensions command line argument.")
sys.exit(1)
return
def preprocess_input(self, image):
'''
Method to process inputs before feeding them into the model for inference.
'''
image = cv2.resize(image, (self.input_shape[3], self.input_shape[2]))
image = image.transpose((2, 0, 1))
image = image.reshape(1, *image.shape)
return image
def preprocess_output(self, outputs, image):
'''
Method to process outputs before feeding them into the next model for
inference or for the last step of the app.
'''
w = image.shape[1]
h = image.shape[0]
outputs = outputs[0]
xl, yl = int(outputs[0][0][0] * w), int(outputs[1][0][0] * h)
xr, yr = int(outputs[2][0][0] * w), int(outputs[3][0][0] * h)
eyes_coords = [xl, yl, xr, yr]
# Using the fact that eyes take 1/5 of your face width
# define bounding boxes around the eyes according to this
square_size = int(w / 10)
left_eye_box = [xl - square_size, yl - square_size, xl + square_size, yl + square_size]
right_eye_box = [xr - square_size, yr - square_size, xr + square_size, yr + square_size]
crop_left = image[left_eye_box[1]:left_eye_box[3], left_eye_box[0]:left_eye_box[2]]
crop_right = image[right_eye_box[1]:right_eye_box[3], right_eye_box[0]:right_eye_box[2]]
return eyes_coords, crop_left, crop_right
| src/facial_landmarks_detection.py | 6,044 | Class for the Face Landmarks Detection Model.
Load and configure inference plugins for the specified target devices,
and performs either synchronous or asynchronous modes for the
specified infer requests.
Set instance variables.
This method check whether the model (along with the plugin) is support on the CPU device.
If anything is missing (such as a CPU extension), let the user know and exit the programm.
This method is for loading the model (in IR format) to the device specified by the user.
Default device is CPU.
This method is meant for running predictions on the input image.
Method to process inputs before feeding them into the model for inference.
Method to process outputs before feeding them into the next model for
inference or for the last step of the app.
Get model Initialize the plugin - load the inference engine API Plugin is the one already created for the Face Detection model Add a CPU extension, if applicable Read the IR as IENetwork Check if model and CPU plugin are supported Load the IENetwork into the plugin Get the input and output layers Create input image to feed into the network Start inference. Infer mode (async/sync) is input by user Wait for the result of the inference Get result of the inference request Get result of the inference request Using the fact that eyes take 1/5 of your face width define bounding boxes around the eyes according to this | 1,395 | en | 0.849101 |
"""
ASGI config for FYP project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'FYP.settings')
application = get_asgi_application()
| src/FYP/FYP/asgi.py | 383 | ASGI config for FYP project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ | 209 | en | 0.7294 |
# -*- coding: utf-8 -*-
from __future__ import print_function
from IPython import get_ipython
from IPython.display import (
display,
Javascript,
)
from IPython.core import magic_arguments
from IPython.core.magic import (
Magics,
magics_class,
cell_magic,
)
from IPython.utils.importstring import import_item
import yaml
__version__ = "0.2.0"
@magics_class
class YAMLMagics(Magics):
"""
Write and load YAML in the IPython Notebook. Uses SafeLoader by default.
Example:
%%yaml x -lyaml.Loader
foo:
bar: baz
"""
def __init__(self, shell):
super(YAMLMagics, self).__init__(shell)
@cell_magic
@magic_arguments.magic_arguments()
@magic_arguments.argument(
"var_name",
default=None,
nargs="?",
help="""Name of local variable to set to parsed value"""
)
@magic_arguments.argument(
"-l", "--loader",
default="yaml.SafeLoader",
help="""Dotted-notation class to use for loading"""
)
def yaml(self, line, cell):
line = line.strip()
args = magic_arguments.parse_argstring(self.yaml, line)
display(Javascript(
"""
require(
[
"notebook/js/codecell",
"codemirror/mode/yaml/yaml"
],
function(cc){
cc.CodeCell.options_default.highlight_modes.magic_yaml = {
reg: ["^%%yaml"]
}
}
);
"""))
loader = get_ipython().user_global_ns.get(args.loader, None)
if loader is None:
loader = import_item(args.loader)
try:
val = yaml.load(cell, Loader=loader)
except yaml.YAMLError as err:
print(err)
return
if args.var_name is not None:
get_ipython().user_ns[args.var_name] = val
else:
return val
def load_ipython_extension(ip):
ip = get_ipython()
ip.register_magics(YAMLMagics)
| yamlmagic.py | 2,090 | Write and load YAML in the IPython Notebook. Uses SafeLoader by default.
Example:
%%yaml x -lyaml.Loader
foo:
bar: baz
-*- coding: utf-8 -*- | 160 | en | 0.575212 |
import h5py
import pickle
import numpy as np
# import read_affect_data as r
# from tqdm import tqdm
import random
from PIL import Image, ImageOps, ImageEnhance
import colorsys
# def read_h5_data_set(path):
# f = h5py.File(path, 'r')
# time_stamps = list(f[list(f.keys())[0]].keys())
# d = {time : dict() for time in time_stamps}
# for feature in list(f.keys()):
# if hasattr(f[feature], 'keys'):
# for time in tqdm(list(f[feature].keys())):
# k = list(f[feature][time].keys())[0]
# d[time][feature] = np.array(f[feature][time][k])
# return d
# def read_pkl_data_set(path):
# f = r.load_pickle(path)
# time_stamps = list(f[list(f.keys())[0]].keys())
# d = {time : dict() for time in time_stamps}
# for feature in list(f.keys()):
# if hasattr(f[feature], 'keys'):
# for time in tqdm(list(f[feature].keys())):
# if hasattr(f[feature][time], 'keys'):
# for k in list(f[feature][time].keys()):
# d[time][feature] = np.array(f[feature][time][k])
# return d
##############################################################################
# Visual
def visual_robustness(tests, noise_level=0.3, gray=True, contrast=True, s_and_p=True, gaus=True, rot=True, crop=True):
noises = []
if gray:
noises.append(grayscale)
if contrast:
noises.append(low_contrast)
if s_and_p:
noises.append(salt_and_pepper)
if gaus:
noises.append(gaussian)
if rot:
noises.append(rotate)
if crop:
noises.append(random_crop)
robustness_tests = []
for i in range(len(tests)):
img = Image.fromarray(tests[i])
for noise in noises:
img = noise(img, noise_level)
robustness_tests.append(np.array(img))
return robustness_tests
def grayscale(img, p):
if np.random.sample() <= p:
return ImageOps.grayscale(img)
else:
return img
def low_contrast(img, factor):
if np.random.sample() <= p:
enhancer = ImageEnhance.Contrast(img)
return enhancer.enhance(factor)
else:
return img
def inversion(img, p):
if np.random.sample() <= p:
return ImageOps.invert(img)
else:
return img
def WB(img, p):
if np.random.sample() <= p:
kelvin_table = {1000: (255, 56, 0), 1500: (255, 109, 0), 2000: (255, 137, 18), 2500: (255, 161, 72), 3000: (255, 180, 107), 3500: (255, 196, 137), 4000: (255, 209, 163), 4500: (255, 219, 186), 5000: (255, 228, 206), 5500: (
255, 236, 224), 6000: (255, 243, 239), 6500: (255, 249, 253), 7000: (245, 243, 255), 7500: (235, 238, 255), 8000: (227, 233, 255), 8500: (220, 229, 255), 9000: (214, 225, 255), 9500: (208, 222, 255), 10000: (204, 219, 255)}
temp = np.random.choice(kelvin_table.keys())
r, g, b = kelvin_table[temp]
matrix = (r / 255.0, 0.0, 0.0, 0.0,
0.0, g / 255.0, 0.0, 0.0,
0.0, 0.0, b / 255.0, 0.0)
return img.convert('RGB', matrix)
else:
return img
def colorize(img, p):
if np.random.sample() <= p:
color = np.random.choice(['red', 'blue', 'green'])
layer = Image.new('RGB', img.size, color)
return Image.blend(img, layer, 0.3)
else:
return img
def salt_and_pepper(img, p):
if np.random.sample() <= p:
output = np.copy(np.array(img))
nb_salt = np.ceil(p*output.size*0.5)
coords = [np.random.randint(0, i-1, int(nb_salt))
for i in output.shape]
for i in coords:
output[i] = 1
nb_pepper = np.ceil(p*output.size*0.5)
coords = [np.random.randint(0, i-1, int(nb_pepper))
for i in output.shape]
for i in coords:
output[i] = 0
return Image.fromarray(output)
else:
return img
def gaussian(img, p):
if np.random.sample() <= p:
height, width = np.array(img).shape
gauss = np.random.normal(0, p, (height, width))
return Image.fromarray((np.array(img)+gauss).astype('uint8'))
else:
return img
def rotate(img, p):
if np.random.sample() <= p:
angle = np.random.random_sample()*40-20
return img.rotate(angle, Image.BILINEAR)
else:
return img
def horizontal_flip(img, p):
if np.random.sample() <= p:
return img.transpose(Image.FLIP_LEFT_RIGHT)
else:
return img
def random_crop(img, p):
if np.random.sample() <= p:
dim = np.array(img).shape
height = dim[0]
width = dim[1]
cropped_height = height / 5
cropped_width = width / 5
init_height = np.random.random_sample() * cropped_height
init_width = np.random.random_sample() * cropped_width
end_height = height - cropped_height + init_height
end_width = width - cropped_width + init_width
return img.crop((init_width, init_height, end_width, end_height)).resize((height, width))
else:
return img
def periodic(img, periodic_noise_filename="periodic_noise"):
height = img.height
width = img.width
output = []
for i in range(6):
noise = Image.open("{}_{}.png".format(
periodic_noise_filename, i+1)).convert("RGBA")
noise = random_crop(rotate(noise.resize(
(width*2, height*2)), np.random.random_sample()*360, 'white'), height, width)
output.append(Image.blend(img.convert("RGBA"), noise, 0.3))
return output
##############################################################################
# Text
def text_robustness(tests, noise_level=0.3, swap=True, rand_mid=True, typo=True, sticky=True, omit=True):
noises = []
if swap:
noises.append(swap_letter)
if rand_mid:
noises.append(random_mid)
if typo:
noises.append(qwerty_typo)
if sticky:
noises.append(sticky_keys)
if omit:
noises.append(omission)
robustness_tests = []
for i in range(len(tests)):
newtext = []
text = tests[i].lower().split()
for word in text:
if len(word) > 3 and np.random.sample() <= noise_level:
mode = np.random.randint(len(noises))
newtext.append(noises[mode](word))
else:
newtext.append(word)
robustness_tests.append(' '.join(newtext))
return np.array(robustness_tests)
def last_char(word):
for i in range(len(word)):
if word[len(word)-1-i].isalpha():
return len(word) - 1 - i
def swap_letter(word):
# swap two random adjacent letters
last = last_char(word)
pos = np.random.randint(last-2) + 1
return word[:pos] + word[pos+1] + word[pos] + word[pos+2:]
def random_mid(word):
# randomly permute the middle chunk of a word (all letters except the first and last letter)
last = last_char(word)
mid = [char for char in word[1:last]]
np.random.shuffle(mid)
return word[0]+''.join(mid)+word[last:]
def qwerty_typo(word, num_typo=1):
# randomly replace num_typo number of letters of a word to a one adjacent to it on qwerty keyboard
qwerty = {'q': ['w'], 'w': ['q', 'e', 's'], 'e': ['w', 'r', 'd'], 'r': ['e', 't', 'f'], 't': ['r', 'g', 'y'], 'y': ['t', 'u', 'h'], 'u': ['y', 'i', 'j'], 'i': ['u', 'o', 'k'], 'o': ['i', 'p', 'l'], 'p': ['o'], 'a': ['q', 's', 'z'], 's': ['a', 'w', 'd', 'x', 'z'], 'd': ['s', 'e', 'f', 'x', 'c'], 'f': ['d', 'r', 'g', 'c', 'v'], 'g': [
'f', 't', 'h', 'v', 'b'], 'h': ['g', 'y', 'j', 'b', 'n'], 'j': ['h', 'u', 'k', 'n', 'm'], 'k': ['j', 'i', 'l', 'm'], 'l': ['k', 'o'], 'z': ['a', 's', 'x'], 'x': ['z', 's', 'd', 'c'], 'c': ['x', 'd', 'f', 'v'], 'v': ['c', 'f', 'g', 'b'], 'b': ['v', 'g', 'h', 'n'], 'n': ['b', 'h', 'm', 'j'], 'm': ['n', 'j', 'k']}
last = last_char(word)
typos = np.arange(last+1)
np.random.shuffle(typos)
for i in range(num_typo):
typo = qwerty[word[typos[i]]]
key = typo[np.random.randint(len(typo))]
word = word[:typos[i]] + key + word[typos[i]+1:]
return word
def sticky_keys(word, num_sticky=1):
# randomly repeat num_sticky number of letters of a word
last = last_char(word)
sticky = np.arange(last+1)
np.random.shuffle(sticky)
for i in range(num_sticky):
word = word[:sticky[i]] + word[sticky[i]] + word[sticky[i]:]
return word
def omission(word, num_omit=1):
# randomly omit num_omit number of letters of a word
last = last_char(word)
for i in range(num_omit):
omit = np.random.randint(last-1) + 1
word = word[:omit] + word[omit+1:]
last -= 1
return word
##############################################################################
# Audio
def audio_robustness(tests, noise_level=0.3, noises=None):
if noises == None:
noises = [additive_white_gaussian_noise,
audio_random_dropout, audio_structured_dropout]
robustness_tests = np.zeros(tests.shape)
for i in range(len(tests)):
if np.random.sample() <= noise_level:
mode = np.random.randint(len(noises))
robustness_tests[i] = noises[mode](tests[i], noise_level)
return robustness_tests
def additive_white_gaussian_noise(signal, noise_level):
# SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2)
# RMS_s = np.sqrt(np.mean(signal*signal))
# RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10)))
noise = np.random.normal(0, noise_level, signal.shape[0])
return signal + noise
def audio_structured_dropout(sig, p, step=10):
# each consecutive time steps are chosen with probability p to be dropped
res = [sig[i] for i in range(len(sig))]
for i in range(len(res)-step+1):
if (res[i] != 0) and np.random.random_sample() < p:
for j in range(step):
res[i+j] = 0
return res
def audio_random_dropout(sig, p):
return audio_structured_dropout(sig, 1, p)
##############################################################################
# Time-Series
def timeseries_robustness(tests, noise_level=0.3, noise=True, rand_drop=True, struct_drop=True, modality_map=None):
robust_tests = np.array(tests)
if noise:
robust_tests = white_noise(robust_tests, noise_level)
if rand_drop:
robust_tests = random_drop(robust_tests, noise_level)
if struct_drop:
robust_tests = structured_drop(robust_tests, noise_level, modality_map)
return robust_tests
# add noise sampled from zero-mean Gaussian with standard deviation p at every time step
def white_noise(data, p):
for i in range(len(data)):
for time in range(len(data[i])):
data[i][time] += np.random.normal(0, p)
return data
# each entry is dropped independently with probability p
def random_drop(data, p):
for i in range(len(data)):
for time in range(len(data[i])):
for feature in range(len(data[i][time])):
if np.random.random_sample() < p:
data[i][time][feature] = 0
# else:
# result = dict()
# for time in data:
# for feature in data[time]:
# if np.random.random_sample() < p:
# result[time][feature] = np.zeros(data[time][feature].shape)
# else:
# result[time][feature] = data[time][feature]
return data
# independently for each modality, each time step is chosen with probability p
# at which all feature dimensions are dropped
def structured_drop(data, p, modality_map):
for i in range(len(data)):
for time in range(len(data[i])):
if np.random.random_sample() < p:
data[i][time] = np.zeros(data[i][time].shape)
# else:
# result = dict()
# for time in data:
# for modality in modality_map.keys():
# if np.random.random_sample() < p:
# for feature in modality_map[modality]:
# result[time][feature] = np.zeros(data[time][feature].shape)
# else:
# for feature in modality_map[modality]:
# result[time][feature] = data[time][feature]
return data
##############################################################################
# Tabular
def add_tabular_noise(tests, noise_level=0.3, drop=True, swap=True):
robust_tests = np.array(tests)
if drop:
robust_tests = drop_entry(robust_tests, noise_level)
if swap:
robust_tests = swap_entry(robust_tests, noise_level)
return robust_tests
def drop_entry(data, p):
for i in range(len(data)):
for j in range(len(data[i])):
if np.random.random_sample() < p:
data[i][j] = 0
else:
data[i][j] = data[i][j]
return data
def swap_entry(data, p):
for i in range(len(data)):
for j in range(1, len(data[i])):
if np.random.random_sample() < p:
data[i][j] = data[i][j-1]
data[i][j-1] = data[i][j]
return data
if __name__ == '__main__':
print('='*5 + 'Multi Affect' + '='*5)
print('1. CMU-MOSI, Aligned')
print('2. CMU-MOSI, Unaligned')
print('3. CMU-MOSEI, Aligned')
print('4. CMU-MOSEI, Unaligned')
print('5. CMU-POM, Aligned')
print('6. CMU-POM, Unaligned')
print('7. UR-Funny')
print('8. Sarcasm')
print('9. Deception')
opt = int(input('Input option: '))
print('='*22)
if opt == 1:
data = read_h5_data_set('./mosi/mosi.hdf5')
modality_map = {'vision': ['FACET_4.2', 'OpenFace_1'], 'text': [
'words'], 'vocal': ['COVAREP', 'OpenSmile_emobase2010']}
elif opt == 2:
print("To be implemented!")
# data = read_h5_data_set('./mosi/mosi_unalign.hdf5')
elif opt == 3:
data = read_h5_data_set('./mosei/mosei.hdf5')
modality_map = {'vision': ['OpenFace_2'],
'text': ['words'], 'vocal': ['COVAREP']}
elif opt == 4:
print("To be implemented!")
# data = read_h5_data_set('./mosei/mosei_unalign.hdf5')
elif opt == 5:
data = read_h5_data_set('./pom/pom.hdf5')
modality_map = {'vision': ['FACET_4.2', 'OpenFace2'], 'text': [
'words'], 'vocal': ['COVAREP']}
elif opt == 6:
print("To be implemented!")
# data = read_h5_data_set('./pom/pom_unalign.hdf5')
elif opt == 7:
data = read_pkl_data_set('./urfunny/urfunny.pkl')
# time = data[list(data.keys())[0]]
# k = data[list(data[time].keys())[0]]
elif opt == 8:
print("To be implemented!")
# display_sarcasm_data_set('./sarcasm/sarcasm.pkl')
elif opt == 9:
print("To be implemented!")
# display_pkl_data_set('./deception/deception.pkl')
else:
print('Wrong Input!')
| deprecated/robustness_tests_draft.py | 14,949 | import read_affect_data as r from tqdm import tqdm def read_h5_data_set(path): f = h5py.File(path, 'r') time_stamps = list(f[list(f.keys())[0]].keys()) d = {time : dict() for time in time_stamps} for feature in list(f.keys()): if hasattr(f[feature], 'keys'): for time in tqdm(list(f[feature].keys())): k = list(f[feature][time].keys())[0] d[time][feature] = np.array(f[feature][time][k]) return d def read_pkl_data_set(path): f = r.load_pickle(path) time_stamps = list(f[list(f.keys())[0]].keys()) d = {time : dict() for time in time_stamps} for feature in list(f.keys()): if hasattr(f[feature], 'keys'): for time in tqdm(list(f[feature].keys())): if hasattr(f[feature][time], 'keys'): for k in list(f[feature][time].keys()): d[time][feature] = np.array(f[feature][time][k]) return d Visual Text swap two random adjacent letters randomly permute the middle chunk of a word (all letters except the first and last letter) randomly replace num_typo number of letters of a word to a one adjacent to it on qwerty keyboard randomly repeat num_sticky number of letters of a word randomly omit num_omit number of letters of a word Audio SNR = 10 * log((RMS of signal)^2 / (RMS of noise)^2) RMS_s = np.sqrt(np.mean(signal*signal)) RMS_n = np.sqrt(RMS_s*RMS_s / (np.power(10, SNR/10))) each consecutive time steps are chosen with probability p to be dropped Time-Series add noise sampled from zero-mean Gaussian with standard deviation p at every time step each entry is dropped independently with probability p else: result = dict() for time in data: for feature in data[time]: if np.random.random_sample() < p: result[time][feature] = np.zeros(data[time][feature].shape) else: result[time][feature] = data[time][feature] independently for each modality, each time step is chosen with probability p at which all feature dimensions are dropped else: result = dict() for time in data: for modality in modality_map.keys(): if np.random.random_sample() < p: for feature in modality_map[modality]: result[time][feature] = np.zeros(data[time][feature].shape) else: for feature in modality_map[modality]: result[time][feature] = data[time][feature] Tabular data = read_h5_data_set('./mosi/mosi_unalign.hdf5') data = read_h5_data_set('./mosei/mosei_unalign.hdf5') data = read_h5_data_set('./pom/pom_unalign.hdf5') time = data[list(data.keys())[0]] k = data[list(data[time].keys())[0]] display_sarcasm_data_set('./sarcasm/sarcasm.pkl') display_pkl_data_set('./deception/deception.pkl') | 2,823 | en | 0.59783 |
import os, sys
import ROOT
from ROOT import TH1F,TH2F,TFile,TTree,TCanvas, TProfile, TNtuple, gErrorIgnoreLevel, kInfo, kWarning
from tqdm import tqdm
from particle import Particle, PDGID
tqdm_disable = False
ROOT.gErrorIgnoreLevel = kWarning;
File = TFile("/home/kshi/Zprime/Zp_data_Ntuple/WmTo3l_ZpM45.root","READ")
tree = File.Get("Ana/passedEvents")
nEntries = tree.GetEntries()
W, p, none, other = 0, 0, 0, 0
others = []
for i in tqdm(range(0, nEntries)):
tree.GetEntry(i)
#for j in range(0,tree.lep_matchedR03_MomMomId.size()):
# if abs(tree.lep_matchedR03_MomMomId[j])>=11 and abs(tree.lep_matchedR03_MomMomId[j])<=18:
# print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " MomMomid is: " + lepton#str(tree.lep_matchedR03_MomMomId[j])
#for j in range(0,tree.lep_matchedR03_PdgId.size()):
# if (abs(tree.lep_matchedR03_PdgId[j])<11 or abs(tree.lep_matchedR03_PdgId[j]>18)) and tree.lep_matchedR03_PdgId[j]!=0:
# print "Event:" + str(tree.Event) + " has lepton id of " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name
#for j in range(0,tree.GENlep_id.size()):
# if PDGID(tree.GENlep_id[j]).is_valid==False:
# print "Invalid lep id " + str(tree.GENlep_id[j])
# if PDGID(tree.GENlep_MomId[j]).is_valid==False:
# print "Invalid lep mom id " + str(tree.GENlep_MomId[j])
# if PDGID(tree.GENlep_MomMomId[j]).is_valid==False:
# print "Invalid lep mom mom id " + str(tree.GENlep_MomMomId[j])
# else:
# print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.GENlep_id[j]).name + " that came from a " + Particle.from_pdgid(tree.GENlep_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.GENlep_MomMomId[j]).name
for j in range(0,tree.lep_matchedR03_PdgId.size()):
#if PDGID(tree.lep_matchedR03_PdgId[j]).is_valid==False:
# print "Invalid lep id " + str(tree.lep_matchedR03_PdgId[j])
#if PDGID(tree.lep_matchedR03_MomId[j]).is_valid==False:
# print "Invalid lep mom id " + str(tree.lep_matchedR03_MomId[j])
#if PDGID(tree.lep_matchedR03_MomMomId[j]).is_valid==False:
# print "Invalid lep mom mom id " + str(tree.lep_matchedR03_MomMomId[j])
##if tree.lep_matchedR03_PdgId[j]!=999888 and tree.lep_matchedR03_MomId!=999888 and tree.lep_matchedR03_MomMomId[j]!=999888:
## print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name
#elif tree.lep_matchedR03_MomId[j]==999888:
# print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + str(tree.lep_matchedR03_MomId[j]) + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name
if tree.lep_matchedR03_MomId[j]==999888:
if abs(tree.lep_matchedR03_MomMomId[j])==24:
W+=1
elif abs(tree.lep_matchedR03_MomMomId[j])==2212:
p+=1
elif abs(tree.lep_matchedR03_MomMomId[j])==0:
none+=1
else:
other+=1
others.append(tree.lep_matchedR03_MomMomId[j])
print "Sources of Z':"
print "W = " + str(W) + ", p = " + str(p) + ", none = " + str(none) + ", other = " + str(other)
for i in range(0, len(others)):
print "Other MomMomId: " + str(others[i])
| Wto3l/mom_counting.py | 3,474 | for j in range(0,tree.lep_matchedR03_MomMomId.size()): if abs(tree.lep_matchedR03_MomMomId[j])>=11 and abs(tree.lep_matchedR03_MomMomId[j])<=18: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " MomMomid is: " + leptonstr(tree.lep_matchedR03_MomMomId[j])for j in range(0,tree.lep_matchedR03_PdgId.size()): if (abs(tree.lep_matchedR03_PdgId[j])<11 or abs(tree.lep_matchedR03_PdgId[j]>18)) and tree.lep_matchedR03_PdgId[j]!=0: print "Event:" + str(tree.Event) + " has lepton id of " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).namefor j in range(0,tree.GENlep_id.size()): if PDGID(tree.GENlep_id[j]).is_valid==False: print "Invalid lep id " + str(tree.GENlep_id[j]) if PDGID(tree.GENlep_MomId[j]).is_valid==False: print "Invalid lep mom id " + str(tree.GENlep_MomId[j]) if PDGID(tree.GENlep_MomMomId[j]).is_valid==False: print "Invalid lep mom mom id " + str(tree.GENlep_MomMomId[j]) else: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.GENlep_id[j]).name + " that came from a " + Particle.from_pdgid(tree.GENlep_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.GENlep_MomMomId[j]).nameif PDGID(tree.lep_matchedR03_PdgId[j]).is_valid==False: print "Invalid lep id " + str(tree.lep_matchedR03_PdgId[j])if PDGID(tree.lep_matchedR03_MomId[j]).is_valid==False: print "Invalid lep mom id " + str(tree.lep_matchedR03_MomId[j])if PDGID(tree.lep_matchedR03_MomMomId[j]).is_valid==False: print "Invalid lep mom mom id " + str(tree.lep_matchedR03_MomMomId[j])if tree.lep_matchedR03_PdgId[j]!=999888 and tree.lep_matchedR03_MomId!=999888 and tree.lep_matchedR03_MomMomId[j]!=999888: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomId[j]).name + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).nameelif tree.lep_matchedR03_MomId[j]==999888: print "Event:" + str(tree.Event) + ", Lepton " + str(j) + " is a " + Particle.from_pdgid(tree.lep_matchedR03_PdgId[j]).name + " that came from a " + str(tree.lep_matchedR03_MomId[j]) + " which came from a " + Particle.from_pdgid(tree.lep_matchedR03_MomMomId[j]).name | 2,277 | en | 0.619532 |
import logging
from korbit.client.korbit_client import KorbitClient
logging.basicConfig(level=logging.INFO)
properties_sandbox_file = '../properties_sandbox_test.json'
context_sandbox_file = '../context_sandbox.json'
kbclient = KorbitClient(properties_sandbox_file, context_sandbox_file)
print(kbclient.getUserInfo())
# 매수 Buy
# print( kbclient.buy(price=300000, coin_amount=1) )
# # 매도 Sell
# print( kbclient.sell(price=300000, coin_amount=1) )
print( kbclient.getOpenOrders() )
# Wallet Test
wallet = kbclient.getWallet()
balance = wallet['balance']
pending_orders = wallet['pendingOrders']
available = wallet['available']
print(balance)
print(pending_orders)
print(available) | test/korbit/client/korbit_client_tests.py | 694 | 매수 Buy print( kbclient.buy(price=300000, coin_amount=1) ) 매도 Sell print( kbclient.sell(price=300000, coin_amount=1) ) Wallet Test | 130 | en | 0.472198 |
import asyncio
import socket
from stor.server.server import StorServer
from stor.types.peer_info import PeerInfo
def start_reconnect_task(server: StorServer, peer_info_arg: PeerInfo, log, auth: bool):
"""
Start a background task that checks connection and reconnects periodically to a peer.
"""
# If peer_info_arg is already an address, use it, otherwise resolve it here.
if peer_info_arg.is_valid():
peer_info = peer_info_arg
else:
peer_info = PeerInfo(socket.gethostbyname(peer_info_arg.host), peer_info_arg.port)
async def connection_check():
while True:
peer_retry = True
for _, connection in server.all_connections.items():
if connection.get_peer_info() == peer_info or connection.get_peer_info() == peer_info_arg:
peer_retry = False
if peer_retry:
log.info(f"Reconnecting to peer {peer_info}")
try:
await server.start_client(peer_info, None, auth=auth)
except Exception as e:
log.info(f"Failed to connect to {peer_info} {e}")
await asyncio.sleep(3)
return asyncio.create_task(connection_check())
| stor/server/reconnect_task.py | 1,235 | Start a background task that checks connection and reconnects periodically to a peer.
If peer_info_arg is already an address, use it, otherwise resolve it here. | 162 | en | 0.854277 |
"""
Generates code metrics for a given project. Whereas code_metrics.py operates
on a single stream of source code input, this program walks a project tree and
generates reports based on all of the source code found.
TODO: project config should be supplied as input, not imported
"""
import os, shutil
import code_metrics, metrics_formatter, stats, config
def find_available_filename(filename):
if not os.path.exists(filename):
return filename
attempts = 1
filename += str(attempts)
while os.path.exists(filename):
attempts += 1
if (attempts > 999):
print('error: could not find available filename', filename)
exit()
filename = filename[:len(filename)-1] + str(attempts)
return filename
def is_code_file(path):
filename, file_ext = os.path.splitext(path)
return file_ext in config.code_filename_extensions
def find_files(root_path, filter):
result = []
for root, dirs, files in os.walk(root_path):
for file_name in files:
if not filter(file_name):
continue
path = os.path.join(root, file_name)
result.append(path)
return result
def add_project_totals(project_report, file_reports):
project_report['file_count'] = len(file_reports)
project_report['function_count'] = 0
project_report['line_count'] = 0
project_report['lines_ending_in_whitespace_count'] = 0
project_report['line_length_distribution'] = {}
project_report['line_indent_distribution'] = {}
for filename, file_report in file_reports.items():
if file_report == {}:
continue
project_report['function_count'] += len(file_report['functions'])
project_report['line_count'] += file_report['line_count']
# TODO: figure out how to aggregate project stats like this
#project_report['lines_ending_in_whitespace_count'] += file_report['lines_ending_in_whitespace_count']
#stats.merge_into_distribution(project_report['line_length_distribution'], file_report['line_length_distribution'])
#stats.merge_into_distribution(project_report['line_indent_distribution'], file_report['line_indent_distribution'])
def report(project_root):
file_reports = {}
for path in find_files(project_root, is_code_file):
target_lang = code_metrics.file_ext_lang(path)
with open(path, 'r') as input_file:
try:
file_reports[path] = code_metrics.report(path, input_file.read(), target_lang)
except IOError:
continue
project_report = {
'source_path': project_root,
'files': file_reports
}
add_project_totals(project_report, file_reports)
return project_report
def write_report_file(report, path, target_dir):
if report == {}:
return
filename = metrics_formatter.convert_path_to_report_filename(path)
out_file_path = target_dir + '/' + filename
out_file_path = find_available_filename(out_file_path)
with open(out_file_path, 'w') as output_file:
metrics_formatter.write_report(report, 'html', output_file)
def write_report(project_report, target_dir):
if os.path.exists(target_dir):
print('error: cannot create output dir', target_dir)
exit()
os.mkdir(target_dir)
with open(target_dir + '/' + 'index.html', 'w') as output_file:
metrics_formatter.write_project_index(project_report, 'html', output_file)
for path, report in project_report['files'].items():
write_report_file(report, path, target_dir)
if __name__ == '__main__':
# TODO: make output format configurable
output_dir = config.project_report_output_dir # TODO: also accept command line flag
output_dir = find_available_filename(output_dir)
write_report(report(config.project_root), output_dir)
shutil.copy('Chart.min.js', output_dir)
| project_metrics.py | 3,550 | Generates code metrics for a given project. Whereas code_metrics.py operates
on a single stream of source code input, this program walks a project tree and
generates reports based on all of the source code found.
TODO: project config should be supplied as input, not imported
TODO: figure out how to aggregate project stats like thisproject_report['lines_ending_in_whitespace_count'] += file_report['lines_ending_in_whitespace_count']stats.merge_into_distribution(project_report['line_length_distribution'], file_report['line_length_distribution'])stats.merge_into_distribution(project_report['line_indent_distribution'], file_report['line_indent_distribution']) TODO: make output format configurable TODO: also accept command line flag | 739 | en | 0.591556 |
"""
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import sqlalchemy as sqla
from .base import ModelDict
from .models import (
Model,
Switch,
DISABLED, SELECTIVE, GLOBAL, INHERIT,
INCLUDE, EXCLUDE,
)
from .proxy import SwitchProxy
from .settings import settings, Settings
from .store import SQLAlchemyStore
log = logging.getLogger(__name__)
# These are (mostly) read-only module variables since we want it shared among
# any and all threads. The only exception to read-only is when they are
# populated on Switchboard startup (i.e., operator.register()).
registry = {}
registry_by_namespace = {}
def nested_config(config):
cfg = {}
token = 'switchboard.'
for k, v in config.iteritems():
if k.startswith(token):
cfg[k.replace(token, '')] = v
return cfg
def configure(config={}, nested=False, cache=None):
"""Useful for when you need to control Switchboard's setup."""
if nested:
config = nested_config(config)
# Re-read settings to make sure we have everything.
Settings.init(cache=cache, **config)
operator.cache = cache
# Establish the connection to the database.
timeout = getattr(settings, 'SWITCHBOARD_TIMEOUT', 10)
dburl = settings.SWITCHBOARD_DBURL
if dburl:
engine = sqla.create_engine(
dburl, connect_args={'connect_timeout': timeout})
Switch.store = SQLAlchemyStore(engine, settings.SWITCHBOARD_DBTABLE)
# Register the builtins.
__import__('switchboard.builtins')
class SwitchManager(ModelDict):
DISABLED = DISABLED
SELECTIVE = SELECTIVE
GLOBAL = GLOBAL
INHERIT = INHERIT
INCLUDE = INCLUDE
EXCLUDE = EXCLUDE
def __init__(self, *args, **kwargs):
# Inject args and kwargs that are known quantities; the SwitchManager
# will always deal with the Switch model and so on.
new_args = [Switch]
new_args.extend(args)
kwargs['key'] = 'key'
kwargs['value'] = 'value'
self.result_cache = None
self.context = {}
super(SwitchManager, self).__init__(*new_args, **kwargs)
def __unicode__(self):
return "<%s: %s (%s)>" % (self.__class__.__name__,
getattr(self, 'model', ''),
registry.values())
def __getitem__(self, key):
"""
Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
"""
return SwitchProxy(self, super(SwitchManager, self).__getitem__(key))
def with_result_cache(func):
"""
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
"""
def inner(self, *args, **kwargs):
dic = self.result_cache
cache_key = None
if dic is not None:
cache_key = (args, tuple(kwargs.items()))
try:
result = dic.get(cache_key)
except TypeError as e: # not hashable
log.debug('Switchboard result cache not active for this "%s" check due to: %s within args: %s',
args[0], e, repr(cache_key)[:200])
cache_key = None
else:
if result is not None:
return result
result = func(self, *args, **kwargs)
if cache_key is not None:
dic[cache_key] = result
return result
return inner
@with_result_cache
def is_active(self, key, *instances, **kwargs):
"""
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
"""
try:
default = kwargs.pop('default', False)
# Check all parents for a disabled state
parts = key.split(':')
if len(parts) > 1:
child_kwargs = kwargs.copy()
child_kwargs['default'] = None
result = self.is_active(':'.join(parts[:-1]), *instances,
**child_kwargs)
if result is False:
return result
elif result is True:
default = result
try:
switch = self[key]
except KeyError:
# switch is not defined, defer to parent
return default
if switch.status == GLOBAL:
return True
elif switch.status == DISABLED:
return False
elif switch.status == INHERIT:
return default
conditions = switch.value
# If no conditions are set, we inherit from parents
if not conditions:
return default
instances = list(instances) if instances else []
instances.extend(self.context.values())
# check each switch to see if it can execute
return_value = False
for namespace, condition in conditions.iteritems():
condition_set = registry_by_namespace.get(namespace)
if not condition_set:
continue
result = condition_set.has_active_condition(condition,
instances)
if result is False:
return False
elif result is True:
return_value = True
except:
log.exception('Error checking if switch "%s" is active', key)
return_value = False
# there were no matching conditions, so it must not be enabled
return return_value
def register(self, condition_set):
"""
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry[condition_set.get_id()] = condition_set
registry_by_namespace[condition_set.get_namespace()] = condition_set
def unregister(self, condition_set):
"""
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
"""
if callable(condition_set):
condition_set = condition_set()
registry.pop(condition_set.get_id(), None)
registry_by_namespace.pop(condition_set.get_namespace(), None)
def get_condition_set_by_id(self, switch_id):
"""
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
"""
return registry[switch_id]
def get_condition_sets(self):
"""
Returns a generator yielding all currently registered
ConditionSet instances.
"""
return registry.itervalues()
def get_all_conditions(self):
"""
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
"""
cs = self.get_condition_sets()
for condition_set in sorted(cs, key=lambda x: x.get_group_label()):
group = unicode(condition_set.get_group_label())
for field in condition_set.fields.itervalues():
yield condition_set.get_id(), group, field
def as_request(self, user=None, ip_address=None):
from .helpers import MockRequest
return MockRequest(user, ip_address)
auto_create = getattr(settings, 'SWITCHBOARD_AUTO_CREATE', True)
operator = SwitchManager(auto_create=auto_create)
| switchboard/manager.py | 8,168 | Returns a SwitchProxy, rather than a Switch. It allows us to
easily extend the Switches method and automatically include our
manager instance.
Useful for when you need to control Switchboard's setup.
Returns a generator which yields groups of lists of conditions.
>>> for set_id, label, field in operator.get_all_conditions(): #doctest: +SKIP
>>> print "%(label)s: %(field)s" % (label, field.label) #doctest: +SKIP
Given the identifier of a condition set (described in
ConditionSet.get_id()), returns the registered instance.
Returns a generator yielding all currently registered
ConditionSet instances.
Returns ``True`` if any of ``instances`` match an active switch.
Otherwise returns ``False``.
>>> operator.is_active('my_feature', request) #doctest: +SKIP
Registers a condition set with the manager.
>>> condition_set = MyConditionSet() #doctest: +SKIP
>>> operator.register(condition_set) #doctest: +SKIP
Unregisters a condition set with the manager.
>>> operator.unregister(condition_set) #doctest: +SKIP
Decorator specifically for is_active. If self.result_cache is set to a {}
the is_active results will be cached for each set of params.
switchboard.manager
~~~~~~~~~~~~~~~~
:copyright: (c) 2015 Kyle Adams.
:license: Apache License 2.0, see LICENSE for more details.
These are (mostly) read-only module variables since we want it shared among any and all threads. The only exception to read-only is when they are populated on Switchboard startup (i.e., operator.register()). Re-read settings to make sure we have everything. Establish the connection to the database. Register the builtins. Inject args and kwargs that are known quantities; the SwitchManager will always deal with the Switch model and so on. not hashable Check all parents for a disabled state switch is not defined, defer to parent If no conditions are set, we inherit from parents check each switch to see if it can execute there were no matching conditions, so it must not be enabled | 1,973 | en | 0.824767 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# Created By Rodrigo Wilkens
# Last update 27/March/2022
# version ='1.0'
# ---------------------------------------------------------------------------
def join_institution(institution):
if len(institution)==0:
return None
if len(institution)==1:
return institution[0]
res = ", ".join(institution[:-1])
res += " and " + institution[-1]
return res
def get_user(or_id,client_acl, force_institution=False):
c = None
try:
c = client_acl.get_profile(or_id)
except:
print("\nERROR: or_id not found", or_id)
return {"first_name":or_id, "last_name":or_id,"name":or_id, "username":or_id, "emails":or_id, "institution":"NA"}, True
try:
if or_id[0] == "~":
emails = client_acl.search_profiles(ids=[or_id])
assert len(emails) >= 1
else:
emails = client_acl.search_profiles(ids=[c.id])
assert len(emails) >= 1
# emails = [or_id]
except:
print("\nERROR: or_id not associated to an email", or_id)
return {"first_name":or_id, "last_name":or_id,"name":or_id, "username":or_id, "emails":or_id, "institution":"NA"}, True
# try:
if True:
c = c.content
namePrefered = None
for name in c["names"]:
if namePrefered==None or ('preferred' in name and name['preferred']):
namePrefered = name
name = " ".join([namePrefered['first'] if type(namePrefered['first'])==str else '',
namePrefered['middle'] if namePrefered['middle']!=None else '',
namePrefered['last'] if namePrefered['last']!=None else '' ]).replace(" ", " ")
first_name = namePrefered['first'].strip() if type(namePrefered['first'])==str else ''
middle_name = namePrefered['middle'].strip() if namePrefered['middle']!=None else ''
last_name = namePrefered['last'].strip() if namePrefered['last']!=None else ''
username = namePrefered['username'].strip()
if len(first_name)>2:
first_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in first_name.split(" ")])
if len(middle_name)>2:
middle_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in middle_name.split(" ")])
if len(last_name)>2:
last_name = " ".join([n[0].upper() + n[1:].lower() if (n==n.upper() or n==n.lower()) else n for n in last_name.split(" ")])
if 'preferredEmail' in emails[0].content:
emails = emails[0].content['preferredEmail']
else:
emails = emails[0].content['emails'][0]
emails = emails.replace("_","\\_")
institution = []
if 'history' in c:
for h in c['history']:
if 'end' not in h or h['end'] == None:
institution.append(h['institution']["name"])
ret = {"first_name":first_name, "last_name":last_name,"name":name, "username":username, "emails":emails}
institution = join_institution(institution)
if institution:
ret["institution"] = institution
else:
if force_institution:
ret["institution"] = "NA"
if len(middle_name)>0:
ret["middle_name"]=middle_name
if "gscholar" in c:
ret["google_scholar_id"] = c["gscholar"]
if 'dblp' in c:
ret['dblp_id'] = c['dblp']
if 'homepage' in c:
ret['homepage'] = c['homepage']
if 'orcid'in c:
ret['orcid'] = c['orcid']
if 'semanticScholar' in c:
ret["semantic_scholar_id"] = c['semanticScholar']
return ret, False
| openreview/util.py | 3,883 | !/usr/bin/env python -*- coding: utf-8 -*----------------------------------------------------------------------------- Created By Rodrigo Wilkens Last update 27/March/2022 version ='1.0' --------------------------------------------------------------------------- emails = [or_id] try: | 284 | en | 0.358508 |
from pdf_reports import ReportWriter
# DEFINE A WRITER WITH DEFAULT TEMPLATE AND VALUES
report_writer = ReportWriter(
default_stylesheets=["style.css"],
default_template="template.pug",
title="My default title",
version="0.1.2"
)
# THEN LATER IN YOUR CODE:
html = report_writer.pug_to_html(my_name="Zulko", my_organization="EGF")
report_writer.write_report(html, "example_reportwriter.pdf") | examples/example_reportwriter/example_reportwriter.py | 408 | DEFINE A WRITER WITH DEFAULT TEMPLATE AND VALUES THEN LATER IN YOUR CODE: | 73 | en | 0.87841 |
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = [
'Foo',
]
@pulumi.input_type
class Foo:
def __init__(__self__, *,
a: Optional[bool] = None):
if a is not None:
pulumi.set(__self__, "a", a)
@property
@pulumi.getter
def a(self) -> Optional[bool]:
return pulumi.get(self, "a")
@a.setter
def a(self, value: Optional[bool]):
pulumi.set(self, "a", value)
| pkg/codegen/testing/test/testdata/plain-schema-gh6957/python/pulumi_xyz/_inputs.py | 693 | coding=utf-8 *** WARNING: this file was generated by test. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 141 | en | 0.974495 |
import os
import os.path as osp
import numpy as np
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
#
# Training options
#
__C.TRAIN = edict()
# Online hard negative mining
__C.TRAIN.HARD_POSITIVE_MINING = True
__C.TRAIN.HARD_NEGATIVE_MINING = True
__C.TRAIN.BG_THRESH_LOW = 0.0
__C.TRAIN.ORIG_SIZE = False
# Initial learning rate
__C.TRAIN.LEARNING_RATE = 0.001
# Momentum
__C.TRAIN.MOMENTUM = 0.9
# Weight decay, for regularization
__C.TRAIN.WEIGHT_DECAY = 0.0005
# Factor for reducing the learning rate
__C.TRAIN.GAMMA = 0.1
# Step size for reducing the learning rate, currently only support one step
__C.TRAIN.STEPSIZE = [30000]
# Iteration intervals for showing the loss during training, on command line interface
__C.TRAIN.DISPLAY = 50
# Iteration intervals for save check point
__C.TRAIN.CHECKPOINT = 500
# Whether to double the learning rate for bias
__C.TRAIN.DOUBLE_BIAS = True
# Whether to initialize the weights with truncated normal distribution
__C.TRAIN.TRUNCATED = False
# Whether to have weight decay on bias as well
__C.TRAIN.BIAS_DECAY = False
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Whether to use aspect-ratio grouping of training images, introduced merely for saving
# GPU memory
__C.TRAIN.ASPECT_GROUPING = False
# The number of snapshots kept, older ones are deleted to save space
__C.TRAIN.SNAPSHOT_KEPT = 3
# The time interval for saving tensorflow summaries
__C.TRAIN.SUMMARY_INTERVAL = 180
# Scale to use during training (can list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TRAIN.SCALES = (600,800)
# Max pixel size of the longest side of a scaled input image
__C.TRAIN.MAX_SIZE = 1200
# Trim size for input images to create minibatch
__C.TRAIN.TRIM_HEIGHT = 600
__C.TRAIN.TRIM_WIDTH = 600
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 1
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 256
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.0
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
# Overlap required between a ROI and ground-truth box in order for that ROI to
# be used as a bounding-box regression training example
__C.TRAIN.BBOX_THRESH = 0.5
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_ITERS = 5000
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'res101_faster_rcnn'
# __C.TRAIN.SNAPSHOT_INFIX = ''
# Use a prefetch thread in roi_data_layer.layer
# So far I haven't found this useful; likely more engineering work is required
# __C.TRAIN.USE_PREFETCH = False
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# Train using these proposals
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.ANCHOR_POSITIVE_OVERLAP = 0.5
# IOU < thresh: negative example
__C.TRAIN.ANCHOR_NEGATIVE_OVERLAP = 0.3
# If an anchor statisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.25
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 384
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 4
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Whether to use all ground truth bounding boxes for training,
# For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd''
__C.TRAIN.USE_ALL_GT = True
# Whether to tune the batch normalization parameters during training
__C.TRAIN.BN_TRAIN = False
#
# Testing options
#
__C.TEST = edict()
# Scale to use during testing (can NOT list multiple scales)
# The scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (1200,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1600
__C.TEST.ORIG_SIZE = False
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Experimental: treat the (K+1) units in the cls_score layer as linear
# predictors (trained, eg, with one-vs-rest SVMs).
__C.TEST.SVM = False
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
# Propose boxes
__C.TEST.HAS_RPN = False
# Test using these proposals
__C.TEST.PROPOSAL_METHOD = 'gt'
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.3
## Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
## Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
# Testing mode, default to be 'nms', 'top' is slower but better
# See report for details
__C.TEST.MODE = 'nms'
# Only useful when TEST.MODE is 'top', specifies the number of top proposals to select
__C.TEST.RPN_TOP_N = 5000
#
# ResNet options
#
__C.RESNET = edict()
# Option to set if max-pooling is appended after crop_and_resize.
# if true, the region will be resized to a square of 2xPOOLING_SIZE,
# then 2x2 max-pooling is applied; otherwise the region will be directly
# resized to a square of POOLING_SIZE
__C.RESNET.MAX_POOL = False
# Number of fixed blocks during training, by default the first of all 4 blocks is fixed
# Range: 0 (none) to 3 (all)
__C.RESNET.FIXED_BLOCKS = 1
#
# MobileNet options
#
__C.MOBILENET = edict()
# Whether to regularize the depth-wise filters during training
__C.MOBILENET.REGU_DEPTH = False
# Number of fixed layers during training, by default the first of all 14 layers is fixed
# Range: 0 (none) to 12 (all)
__C.MOBILENET.FIXED_LAYERS = 5
# Weight decay for the mobilenet weights
__C.MOBILENET.WEIGHT_DECAY = 0.00004
# Depth multiplier
__C.MOBILENET.DEPTH_MULTIPLIER = 1.
#
# MISC
#
# The mapping from image coordinates to feature map coordinates might cause
# some boxes that are distinct in image space to become identical in feature
# coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor
# for identifying duplicate boxes.
# 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16
__C.DEDUP_BOXES = 1. / 16.
# Pixel mean values (BGR order) as a (1, 1, 3) array
# We use the same pixel mean for all networks even though it's not exactly what
# they were trained with
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Data directory
__C.DATA_DIR = osp.abspath(osp.join(__C.ROOT_DIR, 'data'))
# Name (or path to) the matlab executable
__C.MATLAB = 'matlab'
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Use GPU implementation of non-maximum suppression
__C.USE_GPU_NMS = True
# Default GPU device id
__C.GPU_ID = 0
__C.POOLING_MODE = 'crop'
# Size of the pooled region after RoI pooling
__C.POOLING_SIZE = 7
# Maximal number of gt rois in an image during Training
__C.MAX_NUM_GT_BOXES = 20
# Anchor scales for RPN
__C.ANCHOR_SCALES = [8, 16, 32]
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = [0.5, 1, 2]
# Feature stride for RPN
__C.FEAT_STRIDE = [16, ]
__C.CUDA = False
__C.CROP_RESIZE_WITH_MAX_POOL = True
import pdb
def get_output_dir(imdb_name, net_name=None,output_dir='output'):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(cfg.ROOT_DIR, output_dir, cfg.EXP_DIR, imdb_name))
if net_name is not None:
outdir = osp.join(outdir, net_name)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def get_output_tb_dir(imdb, weights_filename):
"""Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
outdir = osp.abspath(osp.join(__C.ROOT_DIR, 'tensorboard', __C.EXP_DIR, imdb.name))
if weights_filename is None:
weights_filename = 'default'
outdir = osp.join(outdir, weights_filename)
if not os.path.exists(outdir):
os.makedirs(outdir)
return outdir
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
old_type = type(b[k])
if old_type is not type(v):
if isinstance(b[k], np.ndarray):
v = np.array(v, dtype=b[k].dtype)
else:
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print(('Error under config key: {}'.format(k)))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def cfg_from_list(cfg_list):
"""Set config keys via list (e.g., from command line)."""
from ast import literal_eval
assert len(cfg_list) % 2 == 0
for k, v in zip(cfg_list[0::2], cfg_list[1::2]):
key_list = k.split('.')
d = __C
for subkey in key_list[:-1]:
assert subkey in d
d = d[subkey]
subkey = key_list[-1]
assert subkey in d
try:
value = literal_eval(v)
except:
# handle the case when v is a string literal
value = v
assert type(value) == type(d[subkey]), \
'type {} does not match original type {}'.format(
type(value), type(d[subkey]))
d[subkey] = value
| model/utils/config.py | 12,253 | Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
Load a config file and merge it into the default options.
Set config keys via list (e.g., from command line).
Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
Return the directory where tensorflow summaries are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
`pip install easydict` if you don't have it Consumers can get config by: from fast_rcnn_config import cfg Training options Online hard negative mining Initial learning rate Momentum Weight decay, for regularization Factor for reducing the learning rate Step size for reducing the learning rate, currently only support one step Iteration intervals for showing the loss during training, on command line interface Iteration intervals for save check point Whether to double the learning rate for bias Whether to initialize the weights with truncated normal distribution Whether to have weight decay on bias as well Whether to add ground truth boxes to the pool when sampling regions Whether to use aspect-ratio grouping of training images, introduced merely for saving GPU memory The number of snapshots kept, older ones are deleted to save space The time interval for saving tensorflow summaries Scale to use during training (can list multiple scales) The scale is the pixel size of an image's shortest side Max pixel size of the longest side of a scaled input image Trim size for input images to create minibatch Images to use per minibatch Minibatch size (number of regions of interest [ROIs]) Fraction of minibatch that is labeled foreground (i.e. class > 0) Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH) Overlap threshold for a ROI to be considered background (class = 0 if overlap in [LO, HI)) Use horizontally-flipped images during training? Train bounding-box regressors Overlap required between a ROI and ground-truth box in order for that ROI to be used as a bounding-box regression training example Iterations between snapshots solver.prototxt specifies the snapshot path prefix, this adds an optional infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel __C.TRAIN.SNAPSHOT_INFIX = '' Use a prefetch thread in roi_data_layer.layer So far I haven't found this useful; likely more engineering work is required __C.TRAIN.USE_PREFETCH = False Normalize the targets (subtract empirical mean, divide by empirical stddev) Deprecated (inside weights) Normalize the targets using "precomputed" (or made up) means and stdevs (BBOX_NORMALIZE_TARGETS must also be True) Train using these proposals Make minibatches from images that have similar aspect ratios (i.e. both tall and thin or both short and wide) in order to avoid wasting computation on zero-padding. Use RPN to detect objects IOU >= thresh: positive example IOU < thresh: negative example If an anchor statisfied by positive and negative conditions set to negative Max number of foreground examples Total number of examples NMS threshold used on RPN proposals Number of top scoring boxes to keep before apply NMS to RPN proposals Number of top scoring boxes to keep after applying NMS to RPN proposals Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) Deprecated (outside weights) Give the positive RPN examples weight of p * 1 / {num positives} and give negatives a weight of (1 - p) Set to -1.0 to use uniform example weighting Whether to use all ground truth bounding boxes for training, For COCO, setting USE_ALL_GT to False will exclude boxes that are flagged as ''iscrowd'' Whether to tune the batch normalization parameters during training Testing options Scale to use during testing (can NOT list multiple scales) The scale is the pixel size of an image's shortest side Max pixel size of the longest side of a scaled input image Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) Experimental: treat the (K+1) units in the cls_score layer as linear predictors (trained, eg, with one-vs-rest SVMs). Test using bounding-box regressors Propose boxes Test using these proposals NMS threshold used on RPN proposals Number of top scoring boxes to keep before apply NMS to RPN proposals Number of top scoring boxes to keep after applying NMS to RPN proposals Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale) Testing mode, default to be 'nms', 'top' is slower but better See report for details Only useful when TEST.MODE is 'top', specifies the number of top proposals to select ResNet options Option to set if max-pooling is appended after crop_and_resize. if true, the region will be resized to a square of 2xPOOLING_SIZE, then 2x2 max-pooling is applied; otherwise the region will be directly resized to a square of POOLING_SIZE Number of fixed blocks during training, by default the first of all 4 blocks is fixed Range: 0 (none) to 3 (all) MobileNet options Whether to regularize the depth-wise filters during training Number of fixed layers during training, by default the first of all 14 layers is fixed Range: 0 (none) to 12 (all) Weight decay for the mobilenet weights Depth multiplier MISC The mapping from image coordinates to feature map coordinates might cause some boxes that are distinct in image space to become identical in feature coordinates. If DEDUP_BOXES > 0, then DEDUP_BOXES is used as the scale factor for identifying duplicate boxes. 1/16 is correct for {Alex,Caffe}Net, VGG_CNN_M_1024, and VGG16 Pixel mean values (BGR order) as a (1, 1, 3) array We use the same pixel mean for all networks even though it's not exactly what they were trained with For reproducibility A small number that's used many times Root directory of project Data directory Name (or path to) the matlab executable Place outputs under an experiments directory Use GPU implementation of non-maximum suppression Default GPU device id Size of the pooled region after RoI pooling Maximal number of gt rois in an image during Training Anchor scales for RPN Anchor ratios for RPN Feature stride for RPN a must specify keys that are in b the types must match, too recursively merge dicts handle the case when v is a string literal | 6,463 | en | 0.8614 |
# Code in this file is copied and adapted from
# https://github.com/berkeleydeeprlcourse
import json
"""
Some simple logging functionality, inspired by rllab's logging.
Assumes that each diagnostic gets logged each iteration
Call logz.configure_output_dir() to start logging to a
tab-separated-values file (some_folder_name/log.txt)
"""
import os.path as osp, shutil, time, atexit, os, subprocess
color2num = dict(
gray=30,
red=31,
green=32,
yellow=33,
blue=34,
magenta=35,
cyan=36,
white=37,
crimson=38
)
def colorize(string, color, bold=False, highlight=False):
attr = []
num = color2num[color]
if highlight: num += 10
attr.append(str(num))
if bold: attr.append('1')
return '\x1b[%sm%s\x1b[0m' % (';'.join(attr), string)
class G(object):
output_dir = None
output_file = None
first_row = True
log_headers = []
log_current_row = {}
def configure_output_dir(d=None):
"""
Set output directory to d, or to /tmp/somerandomnumber if d is None
"""
G.first_row = True
G.log_headers = []
G.log_current_row = {}
G.output_dir = d or "/tmp/experiments/%i"%int(time.time())
if not osp.exists(G.output_dir):
os.makedirs(G.output_dir)
G.output_file = open(osp.join(G.output_dir, "log.txt"), 'w')
atexit.register(G.output_file.close)
print(colorize("Logging data to %s"%G.output_file.name, 'green', bold=True))
def log_tabular(key, val):
"""
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
"""
if G.first_row:
G.log_headers.append(key)
else:
assert key in G.log_headers, "Trying to introduce a new key %s that you didn't include in the first iteration"%key
assert key not in G.log_current_row, "You already set %s this iteration. Maybe you forgot to call dump_tabular()"%key
G.log_current_row[key] = val
def save_params(params):
with open(osp.join(G.output_dir, "params.json"), 'w') as out:
out.write(json.dumps(params, separators=(',\n','\t:\t'), sort_keys=True))
def dump_tabular():
"""
Write all of the diagnostics from the current iteration
"""
vals = []
key_lens = [len(key) for key in G.log_headers]
max_key_len = max(15,max(key_lens))
keystr = '%'+'%d'%max_key_len
fmt = "| " + keystr + "s | %15s |"
n_slashes = 22 + max_key_len
print("-"*n_slashes)
for key in G.log_headers:
val = G.log_current_row.get(key, "")
if hasattr(val, "__float__"): valstr = "%8.3g"%val
else: valstr = val
print(fmt%(key, valstr))
vals.append(val)
print("-"*n_slashes)
if G.output_file is not None:
if G.first_row:
G.output_file.write("\t".join(G.log_headers))
G.output_file.write("\n")
G.output_file.write("\t".join(map(str,vals)))
G.output_file.write("\n")
G.output_file.flush()
G.log_current_row.clear()
G.first_row=False
| ADMCode/snuz/ars/logz.py | 3,011 | Set output directory to d, or to /tmp/somerandomnumber if d is None
Write all of the diagnostics from the current iteration
Log a value of some diagnostic
Call this once for each diagnostic quantity, each iteration
Code in this file is copied and adapted from https://github.com/berkeleydeeprlcourse | 302 | en | 0.858333 |
# !/usr/bin/env python
# -*-coding: utf-8 -*-
__author__ = 'wtq'
LOG_PATH = "monitor_logging.log"
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
# 采集的间隔与间断时长
MONITOR_INTERVAL = 1
MONITOR_PEROID = 3
# 监控的读写速率的网卡
NET_NAME = 'eth0'
# 系统内各台机器的名字,以此来计算系统的平均负载信息
SYSTEM_MACHINE_NAME = ["storage1", "storage2"]
# 用来计算客户端链接数的机器名字,一般为master
CLIENT_LINK_MACNHIE = ["storage1"]
DISK_ALL_SPACE = 100
CPU_KERNEL_NUMS = 32
MEM_ALL_SPACE = 100
FASTDFSPORT = '8000'
REDIS_SYSTEM_KEY = 'system'
FASTDFS_PEROID = 3
| config/config.py | 631 | !/usr/bin/env python -*-coding: utf-8 -*- 采集的间隔与间断时长 监控的读写速率的网卡 系统内各台机器的名字,以此来计算系统的平均负载信息 用来计算客户端链接数的机器名字,一般为master | 115 | zh | 0.930899 |
#!/usr/bin/env python
# (works in both Python 2 and Python 3)
# Offline HTML Indexer v1.32 (c) 2013-15,2020 Silas S. Brown.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a Python program for creating large indices of
# HTML text which can be queried using simple Javascript
# that works on many mobile phone browsers without needing
# an Internet connection or a Web server. This is useful if
# you want to load a dictionary or other reference onto your
# phone (or computer) for use when connectivity is not
# available.
# The input HTML should be interspersed with anchors like
# this: <a name="xyz"></a> where xyz is the index heading
# for the following text. There should be one such anchor
# before each entry and an extra anchor at the end of the
# text; everything before the first anchor is counted as the
# "header" and everything after the last as the "footer". If
# these are empty, a default "mobile friendly" HTML header
# and footer specifying UTF-8 encoding will be
# added. Anchors may be linked from other entries; these
# links are changed as necessary.
# Opening any of the resulting HTML files should display a
# textbox that lets you type the first few letters of the
# word you wish to look up; the browser will then jump to
# whatever heading is alphabetically nearest to the typed-in
# text.
# Configuration
# -------------
infile = None # None = standard input, or set a "filename"
outdir = "." # current directory by default
alphabet = "abcdefghijklmnopqrstuvwxyz" # set to None for all characters and case-sensitive
ignore_text_in_parentheses = True # or False, for parentheses in index headings
more_sensible_punctuation_sort_order = True
remove_utf8_diacritics = True # or False, for removing diacritics in index headings (not in main text);
# assumes UTF-8. (Letters with diacritics will be treated as though they did not have any.)
max_filesize = 64*1024 # of each HTML file
# (max_filesize can be exceeded by 1 very large entry)
# Where to find history:
# on GitHub at https://github.com/ssb22/indexer
# and on GitLab at https://gitlab.com/ssb22/indexer
# and on BitBucket https://bitbucket.org/ssb22/indexer
# and at https://gitlab.developers.cam.ac.uk/ssb22/indexer
# and in China: https://gitee.com/ssb22/indexer
# ---------------------------------------------------------------
import re,sys,os,time
if type("")==type(u""): izip = zip # Python 3
else: from itertools import izip # Python 2
if infile:
sys.stderr.write("Reading from "+infile+"... ")
infile = open(infile)
else:
sys.stderr.write("Reading from standard input... ")
infile = sys.stdin
fragments = re.split(r'<a name="([^"]*)"></a>',infile.read())
# odd indices should be the tag names, even should be the HTML in between
assert len(fragments)>3, "Couldn't find 2 or more hash tags (were they formatted correctly?)"
assert len(fragments)%2, "re.split not returning groups??"
header,footer = fragments[0],fragments[-1]
if not header.strip(): header="""<html><head><meta name="mobileoptimized" content="0"><meta name="viewport" content="width=device-width"><meta http-equiv="Content-Type" content="text/html; charset=utf-8"></head><body>"""
if not footer.strip(): footer = "</body></html>"
fragments = fragments[1:-1]
sys.stderr.write("%d entries\n" % len(fragments))
def alphaOnly(x):
if ignore_text_in_parentheses: x=re.sub(r"\([^)]*\)[;, ]*","",x)
if alphabet: x=''.join(c for c in x.lower() if c in alphabet)
return re.sub(r"^[@,;]*","",x) # see ohi_latex.py
if more_sensible_punctuation_sort_order:
_ao1 = alphaOnly
alphaOnly = lambda x: _ao1(re.sub('([;,]);+',r'\1',x.replace('-',' ').replace(',','~COM~').replace(';',',').replace('~COM~',';').replace(' ',';'))) # gives ; < , == space (useful if ; is used to separate definitions and , is used before extra words to be added at the start; better set space EQUAL to comma, not higher, or will end up in wrong place if user inputs something forgetting the comma)
if alphabet:
for c in '@,;':
if not c in alphabet: alphabet += c
if remove_utf8_diacritics:
_ao = alphaOnly ; import unicodedata
def S(s):
if type(u"")==type(""): return s # Python 3
else: return s.encode('utf-8') # Python 2
def U(s):
if type(s)==type(u""): return s
return s.decode('utf-8')
alphaOnly = lambda x: _ao(S(u''.join((c for c in unicodedata.normalize('NFD',U(x)) if not unicodedata.category(c).startswith('M')))))
fragments = list(zip(map(alphaOnly,fragments[::2]), fragments[1::2]))
fragments.sort()
class ChangedLetters:
def __init__(self): self.lastText = ""
def __call__(self,text):
"Find shortest prefix of text that differentiates it from previous item (empty string if no difference)"
assert text >= self.lastText, "input must have been properly sorted"
i = 0
for c1,c2 in izip(self.lastText+chr(0),text):
i += 1
if not c1==c2:
self.lastText = text
return text[:i]
assert text==self.lastText, repr(text)+"!="+repr(self.lastText)
return "" # no difference from lastText
changedLetters = ChangedLetters() ; f2 = []
fragments.reverse()
sys.stderr.write("Minimizing prefixes... ")
while fragments:
x,y = fragments.pop()
x = changedLetters(x)
if f2 and not x: f2[-1] = (f2[-1][0], f2[-1][1]+y) # combine effectively-identical ones
else: f2.append((x,y))
sys.stderr.write("done\n")
fragments = f2
def tag(n):
if n: return '<a name="%s"></a>' % n
else: return ''
def old_javascript_array(array):
"in case the browser doesn't support JSON, and to save some separator bytes"
array = list(array) # in case it was an iterator
sepChar = ord(' ')
chars_used = set(''.join(array))
assert '"' not in chars_used and '\\' not in chars_used and '<' not in chars_used and '&' not in chars_used, "Can't use special chars (unless you change this code to escape them)"
while True:
if chr(sepChar) not in chars_used and not chr(sepChar) in r'\"<&': break
sepChar += 1
assert sepChar < 127, "can't find a suitable separator char (hard-code the array instead?)"
return '"'+chr(sepChar).join(array)+'".split("'+chr(sepChar)+'")'
js_binchop = """function(a,i) {
function inner(a,i,lo,hi) {
var mid=lo+Math.floor((hi-lo)/2);
if(mid==lo || a[mid]==i) return a[mid];
if(a[mid] > i) return inner(a,i,lo,mid);
return inner(a,i,mid,hi);
} return inner(a,i,0,a.length);
}"""
js_binchop_dx = js_binchop.replace("return a[mid]","return mid")
def js_hashjump(hashtags): return """<script><!--
var h=location.hash; if(h.length > 1) { if(h!='#_h' && h!='#_f') { var n="#"+%s(%s,h.slice(1)); if (h!=n) location.hash=n; } } else location.href="index.html"
//--></script>""" % (js_binchop,old_javascript_array(hashtags)) # (the h!=n test is needed to avoid loop on some browsers e.g. PocketIE7)
# #_h and #_f are special hashes for header and footer, used for "Next page" and "Previous page" links
# (HTML5 defaults type to text/javascript, as do all pre-HTML5 browsers including NN2's 'script language="javascript"' thing, so we might as well save a few bytes)
__lastStartEnd = None
def htmlDoc(start,end,docNo):
"Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call)."
global __lastStartEnd,__lastDoc
if not (start,end) == __lastStartEnd:
__lastStartEnd = (start,end)
__lastDoc = header+js_hashjump(x for x,y in fragments[start:end] if x)
if start:
assert docNo, "Document 0 should start at 0"
__lastDoc += '<p><a name="_h" href="%d.html#_f">Previous page</a></p>' % (docNo-1,)
__lastDoc += ''.join(tag(x)+y for x,y in fragments[start:end])
if end<len(fragments): __lastDoc += '<p><a name="_f" href="%d.html#_h">Next page</a></p>' % (docNo+1,)
__lastDoc += footer
return linkSub(__lastDoc)
def linkSub(txt): return re.sub(r'(?i)<a href=("?)#',r'<a href=\1index.html#',txt) # (do link to index.html#whatever rather than directly, so link still works if docs change)
def findEnd(start,docNo):
"Given 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate."
eTry = len(fragments)-start
assert eTry, "must start before the end"
sLen = len(htmlDoc(start,start+eTry,docNo))
if sLen > max_filesize:
eTry = int(eTry / int(sLen / max_filesize)) # rough start point
while eTry > 1 and len(htmlDoc(start,start+eTry,docNo)) > max_filesize:
eTry = int(eTry/2)
if eTry < 1: eTry = 1
while eTry < len(fragments)-start and len(htmlDoc(start,start+eTry,docNo)) < max_filesize: eTry += 1
return start + max(1,eTry-1)
def allRanges():
start = docNo = 0
while start < len(fragments):
end = findEnd(start,docNo)
sys.stderr.write("\rSegmenting (%d/%d)" % (end,len(fragments)))
yield start,end
start = end ; docNo += 1
sys.stderr.write("Segmenting")
startsList = []
for start,end in allRanges():
open(("%s%s%d.html" % (outdir,os.sep,len(startsList))),"w").write(htmlDoc(start,end,len(startsList)))
startsList.append(start)
if alphabet:
assert not '"' in alphabet and not '\\' in alphabet and not '&' in alphabet and not '<' in alphabet, "Can't use special characters in alphabet (unless js_alphabet is modified to quote them)"
js_alphabet = """var a=val.toLowerCase(),i; val="";
for(i=0; i < a.length; i++) { var c=a.charAt(i); if("%s".indexOf(c)>-1) val += c }
""" % alphabet # TODO: what if user types letters with diacritics, when remove_utf8_diacritics is set?
else: js_alphabet = ""
if more_sensible_punctuation_sort_order: js_alphabet = "val = val.replace(/-/g,' ').replace(/,/g,'~COM~').replace(/;/g,',').replace(/~COM~/g,';').replace(/ /g,';').replace(/([;,]);+/g,'$1');" + js_alphabet
def hashReload(footer):
# If a footer refers to index.html#example, need to
# make sure the hash script runs when clicking there
# from the index page itself.
strToFind = '<a href="index.html#'
# TODO: what if it's quoted differently and/or has extra attributes? (ohi.html does specify using " quoting though)
while True:
i = footer.lower().find(strToFind)
if i==-1: return footer
footer = footer[:i]+'<a onclick="document.forms[0].q.value=\''+footer[i+len(strToFind):footer.index('"',i+len(strToFind))]+'\';jump()" href="index.html#'+footer[i+len(strToFind):]
open(outdir+os.sep+"index.html","w").write("""%s<script><!--
function jump() {
var val=document.forms[0].q.value; %s
location.href=%s(%s,val)+".html#"+val
}
if(navigator.userAgent.indexOf("Opera/9.50" /* sometimes found on WM6.1 phones from 2008 */) >= 0) document.write("<p><b>WARNING:</"+"b> Your version of Opera may have trouble jumping to anchors; please try Opera 10 or above.</"+"p>")
//--></script><noscript><p><b>ERROR:</b> Javascript needs to be switched on for this form to work.</p></noscript>
<form action="#" onSubmit="jump();return false">Lookup: <input type="text" name="q"><input type="submit" value="ok"></form><script><!--
if(location.hash.length > 1) { document.forms[0].q.value = location.hash.slice(1).replace(/(\+|%%20)/g,' '); jump(); } else document.forms[0].q.focus();
//--></script>%s""" % (hashReload(linkSub(header)),js_alphabet,js_binchop_dx,old_javascript_array(fragments[s][0] for s in startsList),hashReload(linkSub(footer))))
sys.stderr.write(" %d files\n" % (len(startsList)+1))
| ohi.py | 12,229 | Find shortest prefix of text that differentiates it from previous item (empty string if no difference)
Given 'start' (an index into 'fragments'), find an 'end' that produces the largest possible htmlDoc less than max_filesize. docNo is used to generate previous/next page links as appropriate.
Returns an HTML document containing fragments[start:end]. docNo is used to generate previous/next page links as appropriate. Caches its return value in case called again with same start,end (in which case docNo is ignored on second call).
in case the browser doesn't support JSON, and to save some separator bytes
!/usr/bin/env python (works in both Python 2 and Python 3) Offline HTML Indexer v1.32 (c) 2013-15,2020 Silas S. Brown. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This is a Python program for creating large indices of HTML text which can be queried using simple Javascript that works on many mobile phone browsers without needing an Internet connection or a Web server. This is useful if you want to load a dictionary or other reference onto your phone (or computer) for use when connectivity is not available. The input HTML should be interspersed with anchors like this: <a name="xyz"></a> where xyz is the index heading for the following text. There should be one such anchor before each entry and an extra anchor at the end of the text; everything before the first anchor is counted as the "header" and everything after the last as the "footer". If these are empty, a default "mobile friendly" HTML header and footer specifying UTF-8 encoding will be added. Anchors may be linked from other entries; these links are changed as necessary. Opening any of the resulting HTML files should display a textbox that lets you type the first few letters of the word you wish to look up; the browser will then jump to whatever heading is alphabetically nearest to the typed-in text. Configuration ------------- None = standard input, or set a "filename" current directory by default set to None for all characters and case-sensitive or False, for parentheses in index headings or False, for removing diacritics in index headings (not in main text); assumes UTF-8. (Letters with diacritics will be treated as though they did not have any.) of each HTML file (max_filesize can be exceeded by 1 very large entry) Where to find history: on GitHub at https://github.com/ssb22/indexer and on GitLab at https://gitlab.com/ssb22/indexer and on BitBucket https://bitbucket.org/ssb22/indexer and at https://gitlab.developers.cam.ac.uk/ssb22/indexer and in China: https://gitee.com/ssb22/indexer --------------------------------------------------------------- Python 3 Python 2 odd indices should be the tag names, even should be the HTML in between see ohi_latex.py gives ; < , == space (useful if ; is used to separate definitions and , is used before extra words to be added at the start; better set space EQUAL to comma, not higher, or will end up in wrong place if user inputs something forgetting the comma) Python 3 Python 2 no difference from lastText combine effectively-identical ones in case it was an iterator (the h!=n test is needed to avoid loop on some browsers e.g. PocketIE7) _h and _f are special hashes for header and footer, used for "Next page" and "Previous page" links (HTML5 defaults type to text/javascript, as do all pre-HTML5 browsers including NN2's 'script language="javascript"' thing, so we might as well save a few bytes) (do link to index.htmlwhatever rather than directly, so link still works if docs change) rough start point TODO: what if user types letters with diacritics, when remove_utf8_diacritics is set? If a footer refers to index.htmlexample, need to make sure the hash script runs when clicking there from the index page itself. TODO: what if it's quoted differently and/or has extra attributes? (ohi.html does specify using " quoting though) | 4,366 | en | 0.851421 |
import environ
from pathlib import Path
env = environ.Env(
# Sets debug to False if it cannot find .env
DEBUG=(bool, False)
)
environ.Env.read_env()
# GENERAL
# ------------------------------------------------------------------------------
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-SECRET_KEY
SECRET_KEY = env.str('SECRET_KEY')
# https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = env.bool('DEBUG')
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = tuple(env.list('ALLOWED_HOSTS'))
# APPS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'whitenoise.runserver_nostatic',
'django.contrib.staticfiles',
'django.contrib.sites',
# Third-party
'allauth',
'allauth.account',
'crispy_forms',
'debug_toolbar',
# Local
'accounts',
'pages',
'snacks',
]
# MIDDLEWARE
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#middleware
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# URLS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = "config.urls"
# https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = "config.wsgi.application"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# DATABASES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# PASSWORDS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# INTERNATIONALIZATION
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/topics/i18n/
# https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'UTC'
# https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-USE_I18N
USE_I18N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
# STATIC
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = str(BASE_DIR.joinpath('staticfiles'))
# https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = [str(BASE_DIR.joinpath('static'))]
# http://whitenoise.evans.io/en/stable/django.html#add-compression-and-caching-support
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# DJANGO-CRISPY-FORMS CONFIGS
# ------------------------------------------------------------------------------
# https://django-crispy-forms.readthedocs.io/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = "bootstrap4"
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#email-backend
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# DJANGO-DEBUG-TOOLBAR CONFIGS
# ------------------------------------------------------------------------------
# https://django-debug-toolbar.readthedocs.io/en/latest/installation.html
# https://docs.djangoproject.com/en/dev/ref/settings/#internal-ips
INTERNAL_IPS = ['127.0.0.1']
# CUSTOM USER MODEL CONFIGS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/topics/auth/customizing/#substituting-a-custom-user-model
AUTH_USER_MODEL = 'accounts.CustomUser'
# DJANGO-ALLAUTH CONFIGS
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# https://docs.djangoproject.com/en/dev/ref/settings/#login-redirect-url
LOGIN_REDIRECT_URL = 'home'
# https://django-allauth.readthedocs.io/en/latest/views.html#logout-account-logout
ACCOUNT_LOGOUT_REDIRECT_URL = 'home'
# https://django-allauth.readthedocs.io/en/latest/installation.html?highlight=backends
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# https://django-allauth.readthedocs.io/en/latest/configuration.html
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
| config/settings.py | 6,857 | Sets debug to False if it cannot find .env GENERAL ------------------------------------------------------------------------------ Build paths inside the project like this: BASE_DIR / 'subdir'. https://docs.djangoproject.com/en/dev/ref/settings/std:setting-SECRET_KEY https://docs.djangoproject.com/en/dev/ref/settings/debug https://docs.djangoproject.com/en/dev/ref/settings/allowed-hosts APPS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/installed-apps Third-party Local MIDDLEWARE ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/middleware URLS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/root-urlconf https://docs.djangoproject.com/en/dev/ref/settings/wsgi-application TEMPLATES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/templates DATABASES ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/databases PASSWORDS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/auth-password-validators INTERNATIONALIZATION ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/topics/i18n/ https://docs.djangoproject.com/en/dev/ref/settings/language-code https://docs.djangoproject.com/en/dev/ref/settings/time-zone https://docs.djangoproject.com/en/dev/ref/settings/std:setting-USE_I18N https://docs.djangoproject.com/en/dev/ref/settings/use-l10n https://docs.djangoproject.com/en/dev/ref/settings/use-tz STATIC ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/static-root https://docs.djangoproject.com/en/dev/ref/settings/static-url https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/std:setting-STATICFILES_DIRS http://whitenoise.evans.io/en/stable/django.htmladd-compression-and-caching-support DJANGO-CRISPY-FORMS CONFIGS ------------------------------------------------------------------------------ https://django-crispy-forms.readthedocs.io/en/latest/install.htmltemplate-packs EMAIL ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/email-backend DJANGO-DEBUG-TOOLBAR CONFIGS ------------------------------------------------------------------------------ https://django-debug-toolbar.readthedocs.io/en/latest/installation.html https://docs.djangoproject.com/en/dev/ref/settings/internal-ips CUSTOM USER MODEL CONFIGS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/topics/auth/customizing/substituting-a-custom-user-model DJANGO-ALLAUTH CONFIGS ------------------------------------------------------------------------------ https://docs.djangoproject.com/en/dev/ref/settings/site-id https://docs.djangoproject.com/en/dev/ref/settings/login-redirect-url https://django-allauth.readthedocs.io/en/latest/views.htmllogout-account-logout https://django-allauth.readthedocs.io/en/latest/installation.html?highlight=backends https://django-allauth.readthedocs.io/en/latest/configuration.html | 3,486 | en | 0.415933 |
# model settings
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
use_deconv=False,
norm_after_upsample=False,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=False,
num_classes=81,
use_exp_wh=False,
wh_offset_base=16,
wh_area_process='norm',
shortcut_cfg=(1, 2, 3),
shortcut_attention=(False, False, False),
norm_cfg=dict(type='BN'),
norm_wh=False,
avg_wh_weightv3=False,
center_ratio=0.2,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.05,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0003,
paramwise_options=dict(bias_lr_mult=2., bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 5,
step=[18, 22])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=18)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 24
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| configs/centernext/eft18_o16_v1norm_3lr_alpha2_wd3e4_s123_nos_2x.py | 4,096 | model settings training and testing settings dataset settings optimizer learning policy yapf:disable yapf:enable runtime settings | 129 | en | 0.778863 |
import multiprocessing as mp
import itertools
import traceback
import pickle
import numpy as np
from numba import cuda
from numba.cuda.testing import (skip_on_cudasim, skip_under_cuda_memcheck,
ContextResettingTestCase, ForeignArray)
import unittest
def core_ipc_handle_test(the_work, result_queue):
try:
arr = the_work()
# Catch anything going wrong in the worker function
except: # noqa: E722
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
def base_ipc_handle_test(handle, size, result_queue):
def the_work():
dtype = np.dtype(np.intp)
with cuda.open_ipc_array(handle, shape=size // dtype.itemsize,
dtype=dtype) as darr:
# copy the data to host
return darr.copy_to_host()
core_ipc_handle_test(the_work, result_queue)
def serialize_ipc_handle_test(handle, result_queue):
def the_work():
dtype = np.dtype(np.intp)
darr = handle.open_array(cuda.current_context(),
shape=handle.size // dtype.itemsize,
dtype=dtype)
# copy the data to host
arr = darr.copy_to_host()
handle.close()
return arr
core_ipc_handle_test(the_work, result_queue)
def ipc_array_test(ipcarr, result_queue):
try:
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
# Catch any exception so we can propagate it
except: # noqa: E722
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@skip_under_cuda_memcheck('Hangs cuda-memcheck')
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcMemory(ContextResettingTestCase):
def test_ipc_handle(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# manually prepare for serialization as bytes
handle_bytes = bytes(ipch.handle)
size = ipch.size
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (handle_bytes, size, result_queue)
proc = ctx.Process(target=base_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
proc.join(3)
def variants(self):
# Test with no slicing and various different slices
indices = (None, slice(3, None), slice(3, 8), slice(None, 8))
# Test with a Numba DeviceNDArray, or an array from elsewhere through
# the CUDA Array Interface
foreigns = (False, True)
return itertools.product(indices, foreigns)
def check_ipc_handle_serialization(self, index_arg=None, foreign=False):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
if index_arg is not None:
devarr = devarr[index_arg]
if foreign:
devarr = cuda.as_cuda_array(ForeignArray(devarr))
expect = devarr.copy_to_host()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=serialize_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_handle_serialization(self):
for index, foreign, in self.variants():
with self.subTest(index=index, foreign=foreign):
self.check_ipc_handle_serialization(index, foreign)
def check_ipc_array(self, index_arg=None, foreign=False):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# Slice
if index_arg is not None:
devarr = devarr[index_arg]
if foreign:
devarr = cuda.as_cuda_array(ForeignArray(devarr))
expect = devarr.copy_to_host()
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, result_queue)
proc = ctx.Process(target=ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
if not succ:
self.fail(out)
else:
np.testing.assert_equal(expect, out)
proc.join(3)
def test_ipc_array(self):
for index, foreign, in self.variants():
with self.subTest(index=index, foreign=foreign):
self.check_ipc_array(index, foreign)
def staged_ipc_handle_test(handle, device_num, result_queue):
def the_work():
with cuda.gpus[device_num]:
this_ctx = cuda.devices.get_context()
deviceptr = handle.open_staged(this_ctx)
arrsize = handle.size // np.dtype(np.intp).itemsize
hostarray = np.zeros(arrsize, dtype=np.intp)
cuda.driver.device_to_host(
hostarray, deviceptr, size=handle.size,
)
handle.close()
return hostarray
core_ipc_handle_test(the_work, result_queue)
def staged_ipc_array_test(ipcarr, device_num, result_queue):
try:
with cuda.gpus[device_num]:
with ipcarr as darr:
arr = darr.copy_to_host()
try:
# should fail to reopen
with ipcarr:
pass
except ValueError as e:
if str(e) != 'IpcHandle is already opened':
raise AssertionError('invalid exception message')
else:
raise AssertionError('did not raise on reopen')
# Catch any exception so we can propagate it
except: # noqa: E722
# FAILED. propagate the exception as a string
succ = False
out = traceback.format_exc()
else:
# OK. send the ndarray back
succ = True
out = arr
result_queue.put((succ, out))
@skip_under_cuda_memcheck('Hangs cuda-memcheck')
@skip_on_cudasim('Ipc not available in CUDASIM')
class TestIpcStaged(ContextResettingTestCase):
def test_staged(self):
# prepare data for IPC
arr = np.arange(10, dtype=np.intp)
devarr = cuda.to_device(arr)
# spawn new process for testing
mpctx = mp.get_context('spawn')
result_queue = mpctx.Queue()
# create IPC handle
ctx = cuda.current_context()
ipch = ctx.get_ipc_handle(devarr.gpu_data)
# pickle
buf = pickle.dumps(ipch)
ipch_recon = pickle.loads(buf)
self.assertIs(ipch_recon.base, None)
self.assertEqual(tuple(ipch_recon.handle), tuple(ipch.handle))
self.assertEqual(ipch_recon.size, ipch.size)
# Test on every CUDA devices
for device_num in range(len(cuda.gpus)):
args = (ipch, device_num, result_queue)
proc = mpctx.Process(target=staged_ipc_handle_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
def test_ipc_array(self):
for device_num in range(len(cuda.gpus)):
# prepare data for IPC
arr = np.random.random(10)
devarr = cuda.to_device(arr)
ipch = devarr.get_ipc_handle()
# spawn new process for testing
ctx = mp.get_context('spawn')
result_queue = ctx.Queue()
args = (ipch, device_num, result_queue)
proc = ctx.Process(target=staged_ipc_array_test, args=args)
proc.start()
succ, out = result_queue.get()
proc.join(3)
if not succ:
self.fail(out)
else:
np.testing.assert_equal(arr, out)
if __name__ == '__main__':
unittest.main()
| numba/cuda/tests/cudapy/test_ipc.py | 9,385 | Catch anything going wrong in the worker function noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back copy the data to host copy the data to host should fail to reopen Catch any exception so we can propagate it noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back prepare data for IPC create IPC handle manually prepare for serialization as bytes spawn new process for testing Test with no slicing and various different slices Test with a Numba DeviceNDArray, or an array from elsewhere through the CUDA Array Interface prepare data for IPC create IPC handle pickle spawn new process for testing prepare data for IPC Slice spawn new process for testing should fail to reopen Catch any exception so we can propagate it noqa: E722 FAILED. propagate the exception as a string OK. send the ndarray back prepare data for IPC spawn new process for testing create IPC handle pickle Test on every CUDA devices prepare data for IPC spawn new process for testing | 1,009 | en | 0.799655 |
#!/usr/bin/env python2
# Copyright 2016 Vimal Manohar
# 2016 Johns Hopkins University (author: Daniel Povey)
# Apache 2.0
from __future__ import print_function
import argparse
import logging
import sys
from collections import defaultdict
"""
This script reads and writes the 'ctm-edits' file that is
produced by get_ctm_edits.py.
It modifies the ctm-edits so that non-scored words
are not counted as errors: for instance, if there are things like
[COUGH] and [NOISE] in the transcript, deletions, insertions and
substitutions involving them are allowed, and we modify the reference
to correspond to the hypothesis.
If you supply the <lang> directory (the one that corresponds to
how you decoded the data) to this script, it assumes that the <lang>
directory contains phones/align_lexicon.int, and it uses this to work
out a reasonable guess of the non-scored phones, based on which have
a single-word pronunciation that maps to a silence phone.
It then uses the words.txt to work out the written form of those words.
Alternatively, you may specify a file containing the non-scored words one
per line, with the --non-scored-words option.
Non-scored words that were deleted (i.e. they were in the ref but not the
hyp) are simply removed from the ctm. For non-scored words that
were inserted or substituted, we change the reference word to match the
hyp word, but instead of marking the operation as 'cor' (correct), we
mark it as 'fix' (fixed), so that it will not be positively counted as a correct
word for purposes of finding the optimal segment boundaries.
e.g.
<file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit-type>
[note: the <channel> will always be 1].
AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
AJJacobs_2007P-0001605-0003029 1 0.24 0.25 thought 1.0 thought cor
AJJacobs_2007P-0001605-0003029 1 0.49 0.14 i'd 1.0 i'd cor
AJJacobs_2007P-0001605-0003029 1 0.63 0.22 tell 1.0 tell cor
AJJacobs_2007P-0001605-0003029 1 0.85 0.11 you 1.0 you cor
AJJacobs_2007P-0001605-0003029 1 0.96 0.05 a 1.0 a cor
AJJacobs_2007P-0001605-0003029 1 1.01 0.24 little 1.0 little cor
AJJacobs_2007P-0001605-0003029 1 1.25 0.5 about 1.0 about cor
AJJacobs_2007P-0001605-0003029 1 1.75 0.48 [UH] 1.0 [UH] cor
"""
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)s - '
'%(funcName)s - %(levelname)s ] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
parser = argparse.ArgumentParser(
description = "This program modifies the reference in the ctm-edits which "
"is output by steps/cleanup/internal/get_ctm_edits.py, to allow insertions, deletions and "
"substitutions of non-scored words, and [if --allow-repetitions=true], "
"duplications of single words or pairs of scored words (to account for dysfluencies "
"that were not transcribed). Note: deletions and substitutions of non-scored words "
"after the reference is corrected, will be marked as operation 'fix' rather than "
"'cor' (correct) so that the downstream processing knows that this was not in "
"the original reference. Also by defaults tags non-scored words as such when "
"they are correct; see the --tag-non-scored option.")
parser.add_argument("--verbose", type = int, default = 1,
choices=[0,1,2,3],
help = "Verbose level, higher = more verbose output")
parser.add_argument("--allow-repetitions", type = str, default = 'true',
choices=['true','false'],
help = "If true, allow repetitions in the transcript of one or "
"two-word sequences: for instance if the ref says 'i' but "
"the hyp says 'i i', or the ref says 'but then' and the hyp says "
"'but then but then', fix the reference accordingly. Intervening "
"non-scored words are allowed between the repetitions. These "
"fixes will be marked as 'cor', not as 'fix', since there is "
"generally no way to tell which repetition was the 'real' one "
"(and since we're generally confident that such things were "
"actually uttered).")
parser.add_argument("non_scored_words_in", metavar = "<non-scored-words-file>",
help="Filename of file containing a list of non-scored words, "
"one per line. See steps/cleanup/get_nonscored_words.py.")
parser.add_argument("ctm_edits_in", metavar = "<ctm-edits-in>",
help = "Filename of input ctm-edits file. "
"Use /dev/stdin for standard input.")
parser.add_argument("ctm_edits_out", metavar = "<ctm-edits-out>",
help = "Filename of output ctm-edits file. "
"Use /dev/stdout for standard output.")
args = parser.parse_args()
def ReadNonScoredWords(non_scored_words_file):
global non_scored_words
try:
f = open(non_scored_words_file)
except:
sys.exit("modify_ctm_edits.py: error opening file: "
"--non-scored-words=" + non_scored_words_file)
for line in f.readlines():
a = line.split()
if not len(line.split()) == 1:
sys.exit("modify_ctm_edits.py: bad line in non-scored-words "
"file {0}: {1}".format(non_scored_words_file, line))
non_scored_words.add(a[0])
f.close()
# The ctm-edits file format is as follows [note: file-id is really utterance-id
# in this context].
# <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit>
# e.g.:
# AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil
# AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor
# ...
# This function processes a single line of ctm-edits input for fixing
# "non-scored" words. The input 'a' is the split line as an array of fields.
# It modifies the object 'a'. This function returns the modified array,
# and please note that it is destructive of its input 'a'.
# If it returnso the empty array then the line is to be deleted.
def ProcessLineForNonScoredWords(a):
global num_lines, num_correct_lines, ref_change_stats
try:
assert len(a) == 8
num_lines += 1
# we could do:
# [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a
duration = a[3]
hyp_word = a[4]
ref_word = a[6]
edit_type = a[7]
if edit_type == 'ins':
assert ref_word == '<eps>'
if hyp_word in non_scored_words:
# insert this non-scored word into the reference.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
elif edit_type == 'del':
assert hyp_word == '<eps>' and float(duration) == 0.0
if ref_word in non_scored_words:
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
return []
elif edit_type == 'sub':
assert hyp_word != '<eps>'
if hyp_word in non_scored_words and ref_word in non_scored_words:
# we also allow replacing one non-scored word with another.
ref_change_stats[ref_word + ' -> ' + hyp_word] += 1
ref_word = hyp_word
edit_type = 'fix'
else:
assert edit_type == 'cor' or edit_type == 'sil'
num_correct_lines += 1
a[4] = hyp_word
a[6] = ref_word
a[7] = edit_type
return a
except Exception:
logger.error("bad line in ctm-edits input: "
"{0}".format(a))
raise RuntimeError
# This function processes the split lines of one utterance (as a
# list of lists of fields), to allow repetitions of words, so if the
# reference says 'i' but the hyp says 'i i', or the ref says
# 'you know' and the hyp says 'you know you know', we change the
# ref to match.
# It returns the modified list-of-lists [but note that the input
# is actually modified].
def ProcessUtteranceForRepetitions(split_lines_of_utt):
global non_scored_words, repetition_stats
# The array 'selected_lines' will contain the indexes of of selected
# elements of 'split_lines_of_utt'. Consider split_line =
# split_lines_of_utt[i]. If the hyp and ref words in split_line are both
# either '<eps>' or non-scoreable words, we discard the index.
# Otherwise we put it into selected_lines.
selected_line_indexes = []
# selected_edits will contain, for each element of selected_line_indexes, the
# corresponding edit_type from the original utterance previous to
# this function call ('cor', 'ins', etc.).
#
# As a special case, if there was a substitution ('sub') where the
# reference word was a non-scored word and the hyp word was a real word,
# we mark it in this array as 'ins', because for purposes of this algorithm
# it behaves the same as an insertion.
#
# Whenever we do any operation that will change the reference, we change
# all the selected_edits in the array to None so that they won't match
# any further operations.
selected_edits = []
# selected_hyp_words will contain, for each element of selected_line_indexes, the
# corresponding hyp_word.
selected_hyp_words = []
for i in range(len(split_lines_of_utt)):
split_line = split_lines_of_utt[i]
hyp_word = split_line[4]
ref_word = split_line[6]
# keep_this_line will be True if we are going to keep this line in the
# 'selected lines' for further processing of repetitions. We only
# eliminate lines involving non-scored words or epsilon in both hyp
# and reference position
# [note: epsilon in hyp position for non-empty segments indicates
# optional-silence, and it does make sense to make this 'invisible',
# just like non-scored words, for the purposes of this code.]
keep_this_line = True
if (hyp_word == '<eps>' or hyp_word in non_scored_words) and \
(ref_word == '<eps>' or ref_word in non_scored_words):
keep_this_line = False
if keep_this_line:
selected_line_indexes.append(i)
edit_type = split_line[7]
if edit_type == 'sub' and ref_word in non_scored_words:
assert not hyp_word in non_scored_words
# For purposes of this algorithm, substitution of, say,
# '[COUGH]' by 'hello' behaves like an insertion of 'hello',
# since we're willing to remove the '[COUGH]' from the
# transript.
edit_type = 'ins'
selected_edits.append(edit_type)
selected_hyp_words.append(hyp_word)
# indexes_to_fix will be a list of indexes into 'selected_indexes' where we
# plan to fix the ref to match the hyp.
indexes_to_fix = []
# This loop scans for, and fixes, two-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 3):
this_indexes = selected_line_indexes[i:i+4]
this_hyp_words = selected_hyp_words[i:i+4]
if this_hyp_words[0] == this_hyp_words[2] and \
this_hyp_words[1] == this_hyp_words[3] and \
this_hyp_words[0] != this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]...
this_edits = selected_edits[i:i+4]
if this_edits == [ 'cor', 'cor', 'ins', 'ins' ] or \
this_edits == [ 'ins', 'ins', 'cor', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix += [ i+2, i+3 ]
else:
indexes_to_fix += [ i, i+1 ]
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
word_pair = this_hyp_words[0] + ' ' + this_hyp_words[1]
# e.g. word_pair = 'hi there'
# add 2 because these stats are of words.
repetition_stats[word_pair] += 2
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+4] = [ None, None, None, None ]
# This loop scans for, and fixes, one-word insertions that follow,
# or precede, the corresponding correct words.
for i in range(0, len(selected_line_indexes) - 1):
this_indexes = selected_line_indexes[i:i+2]
this_hyp_words = selected_hyp_words[i:i+2]
if this_hyp_words[0] == this_hyp_words[1]:
# if the hyp words were of the form [ 'a', 'a' ]...
this_edits = selected_edits[i:i+2]
if this_edits == [ 'cor', 'ins' ] or this_edits == [ 'ins', 'cor' ]:
if this_edits[0] == 'cor':
indexes_to_fix.append(i+1)
else:
indexes_to_fix.append(i)
repetition_stats[this_hyp_words[0]] += 1
# the next line prevents this region of the text being used
# in any further edits.
selected_edits[i:i+2] = [ None, None ]
for i in indexes_to_fix:
j = selected_line_indexes[i]
split_line = split_lines_of_utt[j]
ref_word = split_line[6]
hyp_word = split_line[4]
assert ref_word == '<eps>' or ref_word in non_scored_words
# we replace reference with the decoded word, which will be a
# repetition.
split_line[6] = hyp_word
split_line[7] = 'cor'
return split_lines_of_utt
# note: split_lines_of_utt is a list of lists, one per line, each containing the
# sequence of fields.
# Returns the same format of data after processing.
def ProcessUtterance(split_lines_of_utt):
new_split_lines_of_utt = []
for split_line in split_lines_of_utt:
new_split_line = ProcessLineForNonScoredWords(split_line)
if new_split_line != []:
new_split_lines_of_utt.append(new_split_line)
if args.allow_repetitions == 'true':
new_split_lines_of_utt = ProcessUtteranceForRepetitions(new_split_lines_of_utt)
return new_split_lines_of_utt
def ProcessData():
try:
f_in = open(args.ctm_edits_in)
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits input "
"file {0}".format(args.ctm_edits_in))
try:
f_out = open(args.ctm_edits_out, 'w')
except:
sys.exit("modify_ctm_edits.py: error opening ctm-edits output "
"file {0}".format(args.ctm_edits_out))
num_lines_processed = 0
# Most of what we're doing in the lines below is splitting the input lines
# and grouping them per utterance, before giving them to ProcessUtterance()
# and then printing the modified lines.
first_line = f_in.readline()
if first_line == '':
sys.exit("modify_ctm_edits.py: empty input")
split_pending_line = first_line.split()
if len(split_pending_line) == 0:
sys.exit("modify_ctm_edits.py: bad input line " + first_line)
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance = []
while True:
if len(split_pending_line) == 0 or split_pending_line[0] != cur_utterance:
split_lines_of_cur_utterance = ProcessUtterance(split_lines_of_cur_utterance)
for split_line in split_lines_of_cur_utterance:
print(' '.join(split_line), file = f_out)
split_lines_of_cur_utterance = []
if len(split_pending_line) == 0:
break
else:
cur_utterance = split_pending_line[0]
split_lines_of_cur_utterance.append(split_pending_line)
next_line = f_in.readline()
split_pending_line = next_line.split()
if len(split_pending_line) == 0:
if next_line != '':
sys.exit("modify_ctm_edits.py: got an empty or whitespace input line")
try:
f_out.close()
except:
sys.exit("modify_ctm_edits.py: error closing ctm-edits output "
"(broken pipe or full disk?)")
def PrintNonScoredStats():
if args.verbose < 1:
return
if num_lines == 0:
print("modify_ctm_edits.py: processed no input.", file = sys.stderr)
num_lines_modified = sum(ref_change_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
if num_incorrect_lines > 0:
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 /
num_incorrect_lines)
else:
percent_of_incorrect_modified = float('nan')
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for non-scored words "
"({3}% of lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(ref_change_stats.keys(), reverse=True,
key = lambda x: ref_change_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common edits (as percentages "
"of all such edits) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, ref_change_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...'if num_keys_to_print < len(keys) else '',
file = sys.stderr)
def PrintRepetitionStats():
if args.verbose < 1 or sum(repetition_stats.values()) == 0:
return
num_lines_modified = sum(repetition_stats.values())
num_incorrect_lines = num_lines - num_correct_lines
percent_lines_incorrect= '%.2f' % (num_incorrect_lines * 100.0 / num_lines)
percent_modified = '%.2f' % (num_lines_modified * 100.0 / num_lines);
if num_incorrect_lines > 0:
percent_of_incorrect_modified = '%.2f' % (num_lines_modified * 100.0 /
num_incorrect_lines)
else:
percent_of_incorrect_modified = float('nan')
print("modify_ctm_edits.py: processed {0} lines of ctm ({1}% of which incorrect), "
"of which {2} were changed fixing the reference for repetitions ({3}% of "
"lines, or {4}% of incorrect lines)".format(
num_lines, percent_lines_incorrect, num_lines_modified,
percent_modified, percent_of_incorrect_modified),
file = sys.stderr)
keys = sorted(repetition_stats.keys(), reverse=True,
key = lambda x: repetition_stats[x])
num_keys_to_print = 40 if args.verbose >= 2 else 10
print("modify_ctm_edits.py: most common repetitions inserted into reference (as percentages "
"of all words fixed in this way) are:\n" +
('\n'.join([ '%s [%.2f%%]' % (k, repetition_stats[k]*100.0/num_lines_modified)
for k in keys[0:num_keys_to_print]]))
+ '\n...' if num_keys_to_print < len(keys) else '',
file = sys.stderr)
non_scored_words = set()
ReadNonScoredWords(args.non_scored_words_in)
num_lines = 0
num_correct_lines = 0
# ref_change_stats will be a map from a string like
# 'foo -> bar' to an integer count; it keeps track of how much we changed
# the reference.
ref_change_stats = defaultdict(int)
# repetition_stats will be a map from strings like
# 'a', or 'a b' (the repeated strings), to an integer count; like
# ref_change_stats, it keeps track of how many changes we made
# in allowing repetitions.
repetition_stats = defaultdict(int)
ProcessData()
PrintNonScoredStats()
PrintRepetitionStats()
| egs/wsj/s5/steps/cleanup/internal/modify_ctm_edits.py | 20,279 | !/usr/bin/env python2 Copyright 2016 Vimal Manohar 2016 Johns Hopkins University (author: Daniel Povey) Apache 2.0 The ctm-edits file format is as follows [note: file-id is really utterance-id in this context]. <file-id> <channel> <start-time> <duration> <conf> <hyp-word> <ref-word> <edit> e.g.: AJJacobs_2007P-0001605-0003029 1 0 0.09 <eps> 1.0 <eps> sil AJJacobs_2007P-0001605-0003029 1 0.09 0.15 i 1.0 i cor ... This function processes a single line of ctm-edits input for fixing "non-scored" words. The input 'a' is the split line as an array of fields. It modifies the object 'a'. This function returns the modified array, and please note that it is destructive of its input 'a'. If it returnso the empty array then the line is to be deleted. we could do: [ file, channel, start, duration, hyp_word, confidence, ref_word, edit_type ] = a insert this non-scored word into the reference. we also allow replacing one non-scored word with another. This function processes the split lines of one utterance (as a list of lists of fields), to allow repetitions of words, so if the reference says 'i' but the hyp says 'i i', or the ref says 'you know' and the hyp says 'you know you know', we change the ref to match. It returns the modified list-of-lists [but note that the input is actually modified]. The array 'selected_lines' will contain the indexes of of selected elements of 'split_lines_of_utt'. Consider split_line = split_lines_of_utt[i]. If the hyp and ref words in split_line are both either '<eps>' or non-scoreable words, we discard the index. Otherwise we put it into selected_lines. selected_edits will contain, for each element of selected_line_indexes, the corresponding edit_type from the original utterance previous to this function call ('cor', 'ins', etc.). As a special case, if there was a substitution ('sub') where the reference word was a non-scored word and the hyp word was a real word, we mark it in this array as 'ins', because for purposes of this algorithm it behaves the same as an insertion. Whenever we do any operation that will change the reference, we change all the selected_edits in the array to None so that they won't match any further operations. selected_hyp_words will contain, for each element of selected_line_indexes, the corresponding hyp_word. keep_this_line will be True if we are going to keep this line in the 'selected lines' for further processing of repetitions. We only eliminate lines involving non-scored words or epsilon in both hyp and reference position [note: epsilon in hyp position for non-empty segments indicates optional-silence, and it does make sense to make this 'invisible', just like non-scored words, for the purposes of this code.] For purposes of this algorithm, substitution of, say, '[COUGH]' by 'hello' behaves like an insertion of 'hello', since we're willing to remove the '[COUGH]' from the transript. indexes_to_fix will be a list of indexes into 'selected_indexes' where we plan to fix the ref to match the hyp. This loop scans for, and fixes, two-word insertions that follow, or precede, the corresponding correct words. if the hyp words were of the form [ 'a', 'b', 'a', 'b' ]... the next line prevents this region of the text being used in any further edits. e.g. word_pair = 'hi there' add 2 because these stats are of words. the next line prevents this region of the text being used in any further edits. This loop scans for, and fixes, one-word insertions that follow, or precede, the corresponding correct words. if the hyp words were of the form [ 'a', 'a' ]... the next line prevents this region of the text being used in any further edits. we replace reference with the decoded word, which will be a repetition. note: split_lines_of_utt is a list of lists, one per line, each containing the sequence of fields. Returns the same format of data after processing. Most of what we're doing in the lines below is splitting the input lines and grouping them per utterance, before giving them to ProcessUtterance() and then printing the modified lines. ref_change_stats will be a map from a string like 'foo -> bar' to an integer count; it keeps track of how much we changed the reference. repetition_stats will be a map from strings like 'a', or 'a b' (the repeated strings), to an integer count; like ref_change_stats, it keeps track of how many changes we made in allowing repetitions. | 4,397 | en | 0.863738 |
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow.
"""
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Fuse import Fuse
from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergyConsumer import EnergyConsumer
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Switch import Switch
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Disconnector import Disconnector
from CIM15.CDPSM.Connectivity.IEC61970.Wires.ACLineSegment import ACLineSegment
from CIM15.CDPSM.Connectivity.IEC61970.Wires.SynchronousMachine import SynchronousMachine
from CIM15.CDPSM.Connectivity.IEC61970.Wires.BusbarSection import BusbarSection
from CIM15.CDPSM.Connectivity.IEC61970.Wires.LoadBreakSwitch import LoadBreakSwitch
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTank import TransformerTank
from CIM15.CDPSM.Connectivity.IEC61970.Wires.GroundDisconnector import GroundDisconnector
from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformerEnd import PowerTransformerEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Junction import Junction
from CIM15.CDPSM.Connectivity.IEC61970.Wires.SeriesCompensator import SeriesCompensator
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Breaker import Breaker
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerTankEnd import TransformerTankEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Sectionaliser import Sectionaliser
from CIM15.CDPSM.Connectivity.IEC61970.Wires.DCLineSegment import DCLineSegment
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Line import Line
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Conductor import Conductor
from CIM15.CDPSM.Connectivity.IEC61970.Wires.PowerTransformer import PowerTransformer
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Ground import Ground
from CIM15.CDPSM.Connectivity.IEC61970.Wires.TransformerEnd import TransformerEnd
from CIM15.CDPSM.Connectivity.IEC61970.Wires.ShuntCompensator import ShuntCompensator
from CIM15.CDPSM.Connectivity.IEC61970.Wires.EnergySource import EnergySource
from CIM15.CDPSM.Connectivity.IEC61970.Wires.Jumper import Jumper
nsURI = "http://iec.ch/TC57/2010/CIM-schema-cim15?profile=http://iec.ch/TC57/2011/iec61968-13/CDPSM/Connectivity#Wires"
nsPrefix = "cimWires"
| CIM15/CDPSM/Connectivity/IEC61970/Wires/__init__.py | 3,466 | An extension to the Core and Topology package that models information on the electrical characteristics of Transmission and Distribution networks. This package is used by network applications such as State Estimation, Load Flow and Optimal Power Flow.
Copyright (C) 2010-2011 Richard Lincoln Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | 1,314 | en | 0.885325 |
from django.apps import AppConfig
class BooksConfig(AppConfig):
name = 'bookstudio.books'
verbose_name = 'books'
def ready(self):
"""Override this to put in:
Users system checks
Users signal registration
"""
pass
| bookstudio/books/apps.py | 276 | Override this to put in:
Users system checks
Users signal registration | 70 | en | 0.708613 |
# pylint: disable=too-few-public-methods, no-member
"""API for scheduling learning rate."""
from .. import symbol as sym
class LRScheduler(object):
"""Base class of a learning rate scheduler.
A scheduler returns a new learning rate based on the number of updates that have
been performed.
Parameters
----------
base_lr : float, optional
The initial learning rate.
"""
def __init__(self, base_lr=0.01, name='LRScheduler'):
self.name = name
self.base_lr = base_lr
def __call__(self, num_update):
"""Return a new learning rate based on number of updates.
Parameters
----------
num_update: nnvm Symbol
the number of updates applied to weight.
"""
raise NotImplementedError("__call__ method must be overridden.")
class FactorScheduler(LRScheduler):
"""Reduce the learning rate by a factor for every *n* steps.
It returns a new learning rate by::
base_lr * pow(factor, num_update/step)
Parameters
----------
step : int
Changes the learning rate for every n updates.
factor : float, optional
The factor to change the learning rate.
stop_factor_lr : float, optional
Stop updating the learning rate if it is less than this value.
"""
def __init__(self, step, factor=1, stop_factor_lr=1e-8, name='FactorScheduler', **kwargs):
super(FactorScheduler, self).__init__(name=name, **kwargs)
if step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor > 1.0:
raise ValueError("Factor must be no more than 1 to make lr reduce")
self.step = step
self.factor = factor
self.stop_factor_lr = stop_factor_lr
def __call__(self, num_update):
updated_lr = self.base_lr * self.factor ** (num_update / self.step)
return sym.clip(updated_lr, a_min=self.stop_factor_lr, a_max=self.base_lr)
| nnvm/python/nnvm/compiler/lr_scheduler.py | 1,985 | Reduce the learning rate by a factor for every *n* steps.
It returns a new learning rate by::
base_lr * pow(factor, num_update/step)
Parameters
----------
step : int
Changes the learning rate for every n updates.
factor : float, optional
The factor to change the learning rate.
stop_factor_lr : float, optional
Stop updating the learning rate if it is less than this value.
Base class of a learning rate scheduler.
A scheduler returns a new learning rate based on the number of updates that have
been performed.
Parameters
----------
base_lr : float, optional
The initial learning rate.
Return a new learning rate based on number of updates.
Parameters
----------
num_update: nnvm Symbol
the number of updates applied to weight.
API for scheduling learning rate.
pylint: disable=too-few-public-methods, no-member | 844 | en | 0.693085 |
import sys
import os
import re
import tempfile
import auto_editor
import auto_editor.vanparse as vanparse
from auto_editor.utils.log import Log
from auto_editor.ffwrapper import FFmpeg
def grep_options(parser):
parser.add_argument('--no-filename', action='store_true',
help='Never print filenames with output lines.')
parser.add_argument('--max-count', '-m', type=int, default=None,
help='Stop reading a file after NUM matching lines.')
parser.add_argument('--count', '-c', action='store_true',
help='Suppress normal output; instead print count of matching lines for each file.')
parser.add_argument('--ignore-case', '-i', action='store_true',
help='Ignore case distinctions for the PATTERN.')
parser.add_argument('--timecode', action='store_true',
help="Print the match's timecode.")
parser.add_argument('--time', action='store_true',
help="Print when the match happens. (Ignore ending).")
parser.add_argument('--ffmpeg-location', default=None,
help='Point to your custom ffmpeg file.')
parser.add_argument('--my-ffmpeg', action='store_true',
help='Use the ffmpeg on your PATH instead of the one packaged.')
parser.add_argument('--help', '-h', action='store_true',
help='Print info about the program or an option and exit.')
parser.add_required('input', nargs='*', help='The path to a file you want inspected.')
return parser
# stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string
def cleanhtml(raw_html: str) -> str:
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def grep_core(
media_file: str, add_prefix: bool, ffmpeg: FFmpeg, args, log: Log, TEMP: str
) -> None:
"""
We're using the WEBVTT subtitle format. It's better than srt
because it doesn't emit line numbers and the time code is in
(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)
"""
out_file = os.path.join(TEMP, 'media.vtt')
ffmpeg.run(['-i', media_file, out_file])
count = 0
flags = 0
if args.ignore_case:
flags = re.IGNORECASE
prefix = ''
if add_prefix:
prefix = '{}:'.format(os.path.splitext(os.path.basename(media_file))[0])
if args.max_count is None:
args.max_count = float('inf')
timecode = ''
line_number = -1
with open(out_file, 'r') as file:
while True:
line = file.readline()
line_number += 1
if line_number == 0:
continue
if not line or count >= args.max_count:
break
if line.strip() == '':
continue
if re.match(r'\d*:\d\d.\d*\s-->\s\d*:\d\d.\d*', line):
if args.time:
timecode = line.split('-->')[0].strip() + ' '
else:
timecode = line.strip() + '; '
continue
line = cleanhtml(line)
match = re.search(args.input[0], line, flags)
line = line.strip()
if match:
count += 1
if not args.count:
if args.timecode or args.time:
print(prefix + timecode + line)
else:
print(prefix + line)
if args.count:
print(prefix + str(count))
def main(sys_args=sys.argv[1:]):
parser = vanparse.ArgumentParser('grep', auto_editor.version,
description='Read and match subtitle tracks in media files.',
)
parser = grep_options(parser)
TEMP = tempfile.mkdtemp()
log = Log(temp=TEMP)
try:
args = parser.parse_args(sys_args)
except vanparse.ParserError as e:
log.error(str(e))
ffmpeg = FFmpeg(args.ffmpeg_location, args.my_ffmpeg, debug=False)
media_files = args.input[1:]
add_prefix = (len(media_files) > 1 or os.path.isdir(media_files[0])) and not args.no_filename
for media_file in media_files:
if not os.path.exists(media_file):
log.error(f'{media_file}: File does not exist.')
if os.path.isdir(media_file):
for _, _, files in os.walk(media_file):
for file in files:
if file == '.DS_Store':
continue
grep_core(os.path.join(media_file, file), add_prefix, ffmpeg, args,
log, TEMP)
else:
grep_core(media_file, add_prefix, ffmpeg, args, log, TEMP)
log.cleanup()
if __name__ == '__main__':
main()
| auto_editor/subcommands/grep.py | 4,597 | We're using the WEBVTT subtitle format. It's better than srt
because it doesn't emit line numbers and the time code is in
(hh:mm:ss.sss) instead of (dd:hh:mm:ss,sss)
stackoverflow.com/questions/9662346/python-code-to-remove-html-tags-from-a-string | 249 | en | 0.764102 |
"""
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
"""
from authlib.common.urls import add_params_to_uri
import flask
import requests
from cdislogging import get_logger
from fence.blueprints.login.cilogon import CilogonLogin, CilogonCallback
from fence.blueprints.login.cognito import CognitoLogin, CognitoCallback
from fence.blueprints.login.fence_login import FenceLogin, FenceCallback
from fence.blueprints.login.google import GoogleLogin, GoogleCallback
from fence.blueprints.login.shib import ShibbolethLogin, ShibbolethCallback
from fence.blueprints.login.microsoft import MicrosoftLogin, MicrosoftCallback
from fence.blueprints.login.okta import OktaLogin, OktaCallback
from fence.blueprints.login.orcid import ORCIDLogin, ORCIDCallback
from fence.blueprints.login.ras import RASLogin, RASCallback
from fence.blueprints.login.synapse import SynapseLogin, SynapseCallback
from fence.errors import InternalError
from fence.resources.audit.utils import enable_audit_logging
from fence.restful import RestfulApi
from fence.config import config
logger = get_logger(__name__)
# Mapping from IDP ID to the name in the URL on the blueprint (see below).
IDP_URL_MAP = {
"fence": "fence",
"google": "google",
"shibboleth": "shib",
"orcid": "orcid",
"synapse": "synapse",
"microsoft": "microsoft",
"okta": "okta",
"cognito": "cognito",
"ras": "ras",
"cilogon": "cilogon",
}
def absolute_login_url(provider_id, fence_idp=None, shib_idp=None):
"""
Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
"""
try:
base_url = config["BASE_URL"].rstrip("/")
login_url = base_url + "/login/{}".format(IDP_URL_MAP[provider_id])
except KeyError as e:
raise InternalError("identity provider misconfigured: {}".format(str(e)))
params = {}
if fence_idp:
params["idp"] = fence_idp
if shib_idp:
params["shib_idp"] = shib_idp
login_url = add_params_to_uri(login_url, params)
return login_url
def provider_info(login_details):
"""
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
"""
info = {
# "id" deprecated, replaced by "idp"
"id": login_details["idp"],
"idp": login_details["idp"],
"name": login_details["name"],
# "url" deprecated, replaced by "urls"
"url": absolute_login_url(login_details["idp"]),
"desc": login_details.get("desc", None),
"secondary": login_details.get("secondary", False),
}
# for Fence multi-tenant login
fence_idp = None
if login_details["idp"] == "fence":
fence_idp = login_details.get("fence_idp")
# handle Shibboleth IDPs: InCommon login can either be configured
# directly in this Fence, or through multi-tenant Fence
if (
login_details["idp"] == "shibboleth" or fence_idp == "shibboleth"
) and "shib_idps" in login_details:
# get list of all available shib IDPs
if not hasattr(flask.current_app, "all_shib_idps"):
flask.current_app.all_shib_idps = get_all_shib_idps()
requested_shib_idps = login_details["shib_idps"]
if requested_shib_idps == "*":
shib_idps = flask.current_app.all_shib_idps
elif isinstance(requested_shib_idps, list):
# get the display names for each requested shib IDP
shib_idps = []
for requested_shib_idp in requested_shib_idps:
shib_idp = next(
(
available_shib_idp
for available_shib_idp in flask.current_app.all_shib_idps
if available_shib_idp["idp"] == requested_shib_idp
),
None,
)
if not shib_idp:
raise InternalError(
'Requested shib_idp "{}" does not exist'.format(
requested_shib_idp
)
)
shib_idps.append(shib_idp)
else:
raise InternalError(
'fence provider misconfigured: "shib_idps" must be a list or "*", got {}'.format(
requested_shib_idps
)
)
info["urls"] = [
{
"name": shib_idp["name"],
"url": absolute_login_url(
login_details["idp"], fence_idp, shib_idp["idp"]
),
}
for shib_idp in shib_idps
]
# non-Shibboleth provider
else:
info["urls"] = [
{
"name": login_details["name"],
"url": absolute_login_url(login_details["idp"], fence_idp),
}
]
return info
def get_login_providers_info():
# default login option
if config.get("DEFAULT_LOGIN_IDP"):
default_idp = config["DEFAULT_LOGIN_IDP"]
elif "default" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on ENABLED_IDENTITY_PROVIDERS.default
default_idp = config["ENABLED_IDENTITY_PROVIDERS"]["default"]
else:
logger.warning("DEFAULT_LOGIN_IDP not configured")
default_idp = None
# other login options
if config["LOGIN_OPTIONS"]:
login_options = config["LOGIN_OPTIONS"]
elif "providers" in config.get("ENABLED_IDENTITY_PROVIDERS", {}):
# fall back on "providers" and convert to "login_options" format
enabled_providers = config["ENABLED_IDENTITY_PROVIDERS"]["providers"]
login_options = [
{
"name": details.get("name"),
"idp": idp,
"desc": details.get("desc"),
"secondary": details.get("secondary"),
}
for idp, details in enabled_providers.items()
]
else:
logger.warning("LOGIN_OPTIONS not configured or empty")
login_options = []
try:
all_provider_info = [
provider_info(login_details) for login_details in login_options
]
except KeyError as e:
raise InternalError("LOGIN_OPTIONS misconfigured: cannot find key {}".format(e))
# if several login_options are defined for this default IDP, will
# default to the first one:
default_provider_info = next(
(info for info in all_provider_info if info["idp"] == default_idp), None
)
if not default_provider_info:
raise InternalError(
"default provider misconfigured: DEFAULT_LOGIN_IDP is set to {}, which is not configured in LOGIN_OPTIONS".format(
default_idp
)
)
return default_provider_info, all_provider_info
def make_login_blueprint():
"""
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
"""
blueprint = flask.Blueprint("login", __name__)
blueprint_api = RestfulApi(blueprint, decorators=[enable_audit_logging])
@blueprint.route("", methods=["GET"])
def default_login():
"""
The default root login route.
"""
default_provider_info, all_provider_info = get_login_providers_info()
return flask.jsonify(
{"default_provider": default_provider_info, "providers": all_provider_info}
)
# Add identity provider login routes for IDPs enabled in the config.
configured_idps = config["OPENID_CONNECT"].keys()
if "fence" in configured_idps:
blueprint_api.add_resource(FenceLogin, "/fence", strict_slashes=False)
blueprint_api.add_resource(FenceCallback, "/fence/login", strict_slashes=False)
if "google" in configured_idps:
blueprint_api.add_resource(GoogleLogin, "/google", strict_slashes=False)
blueprint_api.add_resource(
GoogleCallback, "/google/login", strict_slashes=False
)
if "orcid" in configured_idps:
blueprint_api.add_resource(ORCIDLogin, "/orcid", strict_slashes=False)
blueprint_api.add_resource(ORCIDCallback, "/orcid/login", strict_slashes=False)
if "ras" in configured_idps:
blueprint_api.add_resource(RASLogin, "/ras", strict_slashes=False)
# note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs
blueprint_api.add_resource(RASCallback, "/ras/callback", strict_slashes=False)
if "synapse" in configured_idps:
blueprint_api.add_resource(SynapseLogin, "/synapse", strict_slashes=False)
blueprint_api.add_resource(
SynapseCallback, "/synapse/login", strict_slashes=False
)
if "microsoft" in configured_idps:
blueprint_api.add_resource(MicrosoftLogin, "/microsoft", strict_slashes=False)
blueprint_api.add_resource(
MicrosoftCallback, "/microsoft/login", strict_slashes=False
)
if "okta" in configured_idps:
blueprint_api.add_resource(OktaLogin, "/okta", strict_slashes=False)
blueprint_api.add_resource(OktaCallback, "/okta/login", strict_slashes=False)
if "cognito" in configured_idps:
blueprint_api.add_resource(CognitoLogin, "/cognito", strict_slashes=False)
blueprint_api.add_resource(
CognitoCallback, "/cognito/login", strict_slashes=False
)
if "shibboleth" in configured_idps:
blueprint_api.add_resource(ShibbolethLogin, "/shib", strict_slashes=False)
blueprint_api.add_resource(
ShibbolethCallback, "/shib/login", strict_slashes=False
)
if "cilogon" in configured_idps:
blueprint_api.add_resource(CilogonLogin, "/cilogon", strict_slashes=False)
blueprint_api.add_resource(
CilogonCallback, "/cilogon/login", strict_slashes=False
)
return blueprint
def get_all_shib_idps():
"""
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
"""
url = config["OPENID_CONNECT"].get("fence", {}).get("shibboleth_discovery_url")
if not url:
raise InternalError(
"Unable to get list of Shibboleth IDPs: OPENID_CONNECT.fence.shibboleth_discovery_url not configured"
)
res = requests.get(url)
assert (
res.status_code == 200
), "Unable to get list of Shibboleth IDPs from {}".format(url)
all_shib_idps = []
for shib_idp in res.json():
if "entityID" not in shib_idp:
logger.warning(
f"get_all_shib_idps(): 'entityID' field not in IDP data: {shib_idp}. Skipping this IDP."
)
continue
idp = shib_idp["entityID"]
if len(shib_idp.get("DisplayNames", [])) > 0:
name = get_shib_idp_en_name(shib_idp["DisplayNames"])
else:
logger.warning(
f"get_all_shib_idps(): 'DisplayNames' field not in IDP data: {shib_idp}. Using IDP ID '{idp}' as IDP name."
)
name = idp
all_shib_idps.append(
{
"idp": idp,
"name": name,
}
)
return all_shib_idps
def get_shib_idp_en_name(names):
"""
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
"""
for name in names:
if name.get("lang") == "en":
return name["value"]
return names[0]["value"]
| fence/blueprints/login/__init__.py | 13,602 | Args:
provider_id (str): provider to log in with; an IDP_URL_MAP key.
fence_idp (str, optional): if provider_id is "fence"
(multi-tenant Fence setup), fence_idp can be any of the
providers supported by the other Fence. If not specified,
will default to NIH login.
shib_idp (str, optional): if provider_id is "fence" and
fence_idp is "shibboleth", shib_idp can be any Shibboleth/
InCommon provider. If not specified, will default to NIH
login.
Returns:
str: login URL for this provider, including extra query
parameters if fence_idp and/or shib_idp are specified.
The default root login route.
Get the list of all existing Shibboleth IDPs.
This function only returns the information we need to generate login URLs.
Returns:
list: list of {"idp": "", "name": ""} dictionaries
Returns a name in English for a Shibboleth IDP, or the first available
name if no English name was provided.
Args:
names (list): list of {"lang": "", "value": ""} dictionaries
Example:
[
{
"value": "University of Chicago",
"lang": "en"
},
{
"value": "Universidad de Chicago",
"lang": "es"
}
]
Returns:
str: Display name to use for this Shibboleth IDP
Return:
flask.Blueprint: the blueprint used for ``/login`` endpoints
Raises:
ValueError: if app is not amenably configured
Args:
login_details (dict):
{ name, desc, idp, fence_idp, shib_idps, secondary }
- "idp": a configured provider.
Multiple options can be configured with the same idp.
- if provider_id is "fence", "fence_idp" can be any of the
providers supported by the other Fence. If not specified, will
default to NIH login.
- if provider_id is "fence" and fence_idp is "shibboleth", a
list of "shib_idps" can be configured for InCommon login. If
not specified, will default to NIH login.
- Optional parameters: "desc" (description) and "secondary"
(boolean - can be used by the frontend to display secondary
buttons differently).
Returns:
dict: { name, desc, idp, urls, secondary }
- urls: list of { name, url } dictionaries
Create a blueprint with endpoints for logins from configured identity providers.
The identity providers include, for example, Google, Shibboleth, or another
fence instance. See the other files in this directory for the definitions of
the endpoints for each provider.
Mapping from IDP ID to the name in the URL on the blueprint (see below). "id" deprecated, replaced by "idp" "url" deprecated, replaced by "urls" for Fence multi-tenant login handle Shibboleth IDPs: InCommon login can either be configured directly in this Fence, or through multi-tenant Fence get list of all available shib IDPs get the display names for each requested shib IDP non-Shibboleth provider default login option fall back on ENABLED_IDENTITY_PROVIDERS.default other login options fall back on "providers" and convert to "login_options" format if several login_options are defined for this default IDP, will default to the first one: Add identity provider login routes for IDPs enabled in the config. note that the callback endpoint is "/ras/callback", not "/ras/login" like other IDPs | 3,319 | en | 0.68775 |
#! /usr/bin/python
"""
Monitoring functions for xrootd cache server, producing classads
that can be handed to condor
"""
import os
import math
import time
import errno
import struct
import collections
import six
from six.moves import urllib
import classad
import XRootD.client
__all__ = ['collect_cache_stats']
# these paths in the cache are to be treated as top level "VOs" for stats collection
vo_paths = [ '/user', '/pnfs/fnal.gov/usr' ]
def _split_path(path):
""" Split a path into a list of directory names """
if path[0] != '/':
raise Exception("Not absolute path")
result = []
while path != '/':
path, tail = os.path.split(path)
if tail: result.append(tail)
return list(reversed(result))
def _is_prefix(lhs, rhs):
""" return True if the first list is a prefix of the second """
rhs = list(rhs)
while rhs:
if lhs == rhs: return True
rhs.pop()
return False
def scan_cache_dirs(rootdir):
""" Scan the top level directory of the cache.
Walks the path looking for directories that are not in vo_paths.
For each of these generate a cache summary
"""
results = {}
try:
root_components = _split_path(rootdir)
for dirpath, dirnames, filenames in os.walk(rootdir, topdown=True):
# get the path components as a list, removing the rootdir part
dirpath_components = _split_path(dirpath)[len(root_components):]
for name in list(dirnames):
path_components = dirpath_components + [name]
for p in [ _split_path(p) for p in vo_paths]:
# if this directory is in vo_paths, keep recursing
if _is_prefix( path_components, p):
break
else:
# if nothing is in vo_paths, get the stats and remove from dirnames
# so this walk goes no further
vo_name = os.path.join('/', *path_components)
try:
results[vo_name] = scan_vo_dir(os.path.join(dirpath, name))
except (OSError, IOError) as ex:
results[vo_name] = {'scan_vo_dir_error': str(ex) }
dirnames.remove(name)
return results
except (OSError, IOError) as ex:
return { 'scan_cache_dirs_error' : { 'message' : str(ex) } } # error message?
def scan_vo_dir(vodir):
""" Scan a VO directory (assumed to be the whole directory tree after the top level """
now = time.time()
totalsize = 0
nfiles = 0
naccesses = 0
accesses = collections.defaultdict(int)
most_recent_access = 0
bad_cinfo_files = 0
for root, dirs, files in os.walk(vodir):
fnames = set(files)
# Somebody might add a file ending in .cinfo in the cache
# so look for the f, f.cinfo pair
for f, cinfo in ((f, f + '.cinfo') for f in fnames if f + '.cinfo' in fnames):
try:
st = os.stat(os.path.join(root, f))
except OSError as ex:
if ex.errno == errno.ENOENT:
# must have just been deleted
continue
else: raise
try:
access_info = read_cinfo(os.path.join(root, cinfo), now)
except OSError as ex:
if ex.errno == errno.ENOENT:
continue
else:
bad_cinfo_files += 1
access_info = { "naccesses" : 0, "last_access": 0, "by_hour" : {} }
except ReadCInfoError as ex:
bad_cinfo_files += 1
access_info = ex.access_info
nfiles += 1
file_size = st.st_blocks*512 # allow for sparse files
totalsize += file_size
naccesses += access_info["naccesses"]
most_recent_access = max(most_recent_access, access_info["last_access"])
for h in access_info["by_hour"]:
accesses["naccesses_hr_" + h] += access_info["by_hour"][h]
accesses["bytes_hr_" + h] += access_info["bytes_hr"][h]
result = classad.ClassAd({
"used_bytes" : totalsize,
"nfiles" : nfiles,
"naccesses" : naccesses,
"bad_cinfo_files" : bad_cinfo_files
})
result.update(accesses)
if most_recent_access > 0:
result["most_recent_access_time"] = most_recent_access
return result
# Parsing the cinfo files
# The header (not a c struct; consecutive separate values with no padding)
# version + buffer size + file size (blocks)
# int + long long + long long
_header_fmt = '=iqq'
_header_fmt_size = struct.calcsize(_header_fmt)
# then the number of accesses
# int
_int_fmt = '@q'
_int_fmt_size = struct.calcsize(_int_fmt)
# each access contains a struct (native size + padding)
# AttachTime + DetachTime + BytesDisk + BytesRam + BytesMissed
# time_t + long long + long long + long long + long long
_status_fmt = '@qqqqq'
_status_fmt_size = struct.calcsize(_status_fmt)
class ReadCInfoError(Exception):
def __init__(self, *args):
Exception.__init__(self, *args)
if len(args) > 1:
self.access_info = args[1]
else:
self.access_info = {}
def read_cinfo(cinfo_file, now):
""" Try to extract useful info from the cinfo file """
result = { "naccesses": 0,
"last_access": 0,
"by_hour" : { "01": 0, "12": 0, "24": 0 },
"bytes_hr" : { "01": 0, "12": 0, "24": 0 },
}
cf = open(cinfo_file, 'rb')
# read and unpack the header
buf = cf.read(_header_fmt_size)
if len(buf) < _header_fmt_size:
# a mangled file
raise ReadCInfoError("%s header too short" % cinfo_file, result)
version, buffer_size, file_size = struct.unpack(_header_fmt, buf)
# we only understand version 2
if version != 2:
raise ReadCInfoError("%s unknown version: %s" % (cinfo_file, version), result)
# Get the size of the state vector and skip over it
# buff_synced uses 1 bit per bufferSize block of bytes
# Length is rounded up to the nearest byte
buff_synced_len = int(math.ceil(float(file_size)/buffer_size/8))
# If the file_size is zero, state vector length is 1
# (Difference is due to Python's integer division returning the floor)
if file_size == 0:
buff_synced_len = 1
cf.read(buff_synced_len)
# Go past cksum (char[16]) and creationTime (time_t)
cf.read(16 + 8)
# now the access count (an int)
buf = cf.read(_int_fmt_size)
if len(buf) < _int_fmt_size:
raise ReadCInfoError("%s: invalid access field" % cinfo_file, result)
access_count, = struct.unpack(_int_fmt, buf)
result["naccesses"] = access_count
if access_count < 0:
raise ReadCInfoError("%s: invalid access count: %s" % (cinfo_file, access_count), result)
elif access_count == 0:
return result
# read the access times
hr_01 = now - 60*60
hr_12 = now - 12*60*60
hr_24 = now - 24*60*60
# Read AStat structs
try:
for buf in iter(lambda: cf.read(_status_fmt_size), b''):
access_time, _, bytes_disk, bytes_ram, _ = struct.unpack(_status_fmt, buf)
result["last_access"] = access_time
#print access_time, bytes_disk, bytes_ram
#print time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(access_time))
intervals = list()
if access_time >= hr_01: intervals.append('01')
if access_time >= hr_12: intervals.append('12')
if access_time >= hr_24: intervals.append('24')
else:
# no longer interested
next
for interval in intervals:
result["by_hour"][interval] += 1
result["bytes_hr"][interval] += bytes_disk + bytes_ram
except struct.error as ex:
# return what we've got
raise ReadCInfoError("%s unable to decode access time data: %s" % (cinfo_file, str(ex)), result)
return result
def test_xrootd_server(url):
""" Contact the xrootd server to check if it's alive
"""
try:
myclient = XRootD.client.FileSystem(url)
startt = time.time()
response, _ = myclient.ping(timeout=10)
elapsed = time.time() - startt
if response.fatal:
status = "fatal"
elif response.error:
status = "error"
elif response.ok:
status = "ok"
else:
status = "unknown"
result = {"ping_response_status" : status, "ping_response_code" : response.code,
"ping_response_message" : response.message, "ping_elapsed_time" : elapsed}
return result
except Exception as ex: # more specific exception would be better
return {"ping_response_status" : "failed", "ping_response_code" : -1,
"ping_response_message" : str(ex), "ping_elapsed_time" : 0.0}
def get_cache_info(rootdir, cache_max_fs_fraction):
"""Get information about the cache itself"""
result = {}
try:
stat = os.statvfs(rootdir)
total_size = int(stat.f_blocks*stat.f_bsize*cache_max_fs_fraction)
free_size = int(total_size - (stat.f_blocks-stat.f_bfree)*stat.f_bsize)
result['total_cache_bytes'] = total_size
result['free_cache_bytes'] = free_size
result['free_cache_fraction'] = 1 - float(stat.f_blocks-stat.f_bfree)/int(stat.f_blocks*cache_max_fs_fraction)
return result
except (OSError, IOError) as ex:
return {}
def collect_cache_stats(url, rootdir, cache_max_fs_fraction=1.0):
""" Collect stats on the cache server """
start_time = time.time()
parsed_url = urllib.parse.urlparse(url)
# Python 2.6's urlparse returns a ParseResult object whereas
# Python 2.4's urlparse returns a tuple that doesn't handle
# root:// properly
try:
if parsed_url.scheme not in ('root', 'xroot'):
raise Exception("URL '%s' is not an xrootd url" % url)
hostname = parsed_url.netloc
except AttributeError:
if parsed_url[0] not in ('root', 'xroot'):
raise Exception("URL '%s' is not an xrootd url" % url)
hostname = parsed_url[2][2:] # Avoid the '//' prefix
result = {'MyType' : 'Machine', 'Name': 'xrootd@%s' % hostname, 'stats_time' : int(start_time)}
result.update(test_xrootd_server(url))
result.update(get_cache_info(rootdir, cache_max_fs_fraction))
stats_per_vo = scan_cache_dirs(rootdir)
# add up the sizes
totals = dict()
most_recent_access = 0
result['VO'] = {}
for vo, vostats in stats_per_vo.items():
for k, v in vostats.items():
if k == "most_recent_access_time":
most_recent_access = max(most_recent_access, v)
else:
try:
totals[k] += v
except KeyError:
totals[k] = v
result['VO'][vo] = vostats
result['used_cache_bytes'] = totals.pop("used_bytes", 0)
for k, v in totals.items():
result["total_" + k] = v
if most_recent_access > 0:
result["most_recent_access_time"] = most_recent_access
result['time_to_collect_stats'] = time.time() - start_time
return classad.ClassAd(result)
if __name__ == '__main__':
import sys
args = sys.argv[1:]
if len(args) > 2:
args[2] = float(args[2])
elif len(args) == 2:
args.append(0.99) # max cache fraction
print(collect_cache_stats(*args))
| src/xrootd_cache_stats.py | 11,746 | return True if the first list is a prefix of the second
Split a path into a list of directory names
Collect stats on the cache server
Get information about the cache itself
Try to extract useful info from the cinfo file
Scan the top level directory of the cache.
Walks the path looking for directories that are not in vo_paths.
For each of these generate a cache summary
Scan a VO directory (assumed to be the whole directory tree after the top level
Contact the xrootd server to check if it's alive
Monitoring functions for xrootd cache server, producing classads
that can be handed to condor
! /usr/bin/python these paths in the cache are to be treated as top level "VOs" for stats collection get the path components as a list, removing the rootdir part if this directory is in vo_paths, keep recursing if nothing is in vo_paths, get the stats and remove from dirnames so this walk goes no further error message? Somebody might add a file ending in .cinfo in the cache so look for the f, f.cinfo pair must have just been deleted allow for sparse files Parsing the cinfo files The header (not a c struct; consecutive separate values with no padding) version + buffer size + file size (blocks) int + long long + long long then the number of accesses int each access contains a struct (native size + padding) AttachTime + DetachTime + BytesDisk + BytesRam + BytesMissed time_t + long long + long long + long long + long long read and unpack the header a mangled file we only understand version 2 Get the size of the state vector and skip over it buff_synced uses 1 bit per bufferSize block of bytes Length is rounded up to the nearest byte If the file_size is zero, state vector length is 1 (Difference is due to Python's integer division returning the floor) Go past cksum (char[16]) and creationTime (time_t) now the access count (an int) read the access times Read AStat structsprint access_time, bytes_disk, bytes_ramprint time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(access_time)) no longer interested return what we've got more specific exception would be better Python 2.6's urlparse returns a ParseResult object whereas Python 2.4's urlparse returns a tuple that doesn't handle root:// properly Avoid the '//' prefix add up the sizes max cache fraction | 2,289 | en | 0.77337 |
'''
dShell output classes
@author: tparker
'''
import os
import sys
import logging
import struct
import datetime
import dshell
import util
class Output(object):
'''
dShell output base class, extended by output types
'''
_DEFAULT_FORMAT = ''
_DEFAULT_TIMEFORMAT = '%Y-%m-%d %H:%M:%S'
_DEFAULT_DELIM = ' '
_NULL = None
# true if you want to remove extra fields from the parsed record
_FILTER_EXTRA = False
def __init__(self, *a, **kw):
'''
base output class constructor
configuration kwords:
logger=<existing logging object> to pass in a logger
format='format string' to override default formatstring for output class
pcap = filename to write pcap
'''
# setup the logger
self.logger = kw.get('logger', logging)
# parse the format string
self.setformat(kw.get('format', self._DEFAULT_FORMAT))
self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT))
self.delim = (kw.get('delim', self._DEFAULT_DELIM))
if 'pcap' in kw:
self.pcapwriter = PCAPWriter(kw['pcap'])
else:
self.pcapwriter = None
# this is up to the output plugin to process
# by default stuffs extra fields and data into 'extra' field
# if _FILTER_EXTRA is true
self.extra = kw.get('extra', False)
# create the default session writer
if 'session' in kw:
self.sessionwriter = SessionWriter(**kw)
else:
self.sessionwriter = None
# write a message to the log
def log(self, msg, level=logging.INFO, *args, **kw):
'''write a message to the log
passes all args and kwargs thru to logging
except for level= is used to set logging level'''
self.logger.log(level, msg, *args, **kw)
def setformat(self, formatstr=None, typemap=None):
'''parse a format string and extract the field info
if no string given, reverts to default for class
will set self.fields to be a list of (name,type,spec) tuples
self.fieldnames to a list of fieldnames
and self.fieldmap to a list of key=in value=out mappings
format string can also map in field to out field with %(in:out)spectype
or specify an explicit out type with %(in:out)specintype:outtype
(note this breaks compatibility with text formatting,
but useful for db or other output modules)
a typemap of [intype]=outtype (or [in]=(newintype,outtype)
can be used to map and replace types
'''
if formatstr:
self.format = formatstr + "\n"
else:
self.format = self._DEFAULT_FORMAT + "\n"
self.fields = [] # will be a (name,type,length) tuple
self.fieldnames = []
self.fieldmap = {}
# get all the field names
e = 0
while True:
# find the next format spec of %(...)
s = self.format.find('%', e) + 1
if s < 1 or self.format[s] != '(':
break # not %(...
e = self.format.find(')', s)
if e < 0:
break # didn't find a closing paren
# get text between parens as field name
fname = self.format[s + 1:e]
# len/precision specs will be 0-9 between ) and type char
fspec = ''
for i in xrange(e + 1, len(self.format)):
if self.format[i] in '1234567890.+-# lLh':
fspec += self.format[i]
else:
break # this char is not a spec char, it is the type char
ftype = self.format[i]
i += 1
# is the field type a intype:outtype def?
if i < len(self.format) and self.format[i] == ':':
e = self.format.find(' ', i) # find the end whitespace
# split on: to get input:output mapping
ftype, outtype = self.format[i - 1:e].split(':')
else:
outtype = None # output will be same as input type
e = i # start at next char on loop
try: # field name to column mapping
fname, fmap = fname.split(':')
except:
fmap = fname # no mapping
if typemap and ftype in typemap and not outtype:
try:
(ftype, outtype) = typemap[ftype]
except:
outtype = typemap[ftype]
# append the field name,type,spec,mapping
self.fields.append((fname, ftype, fspec))
self.fieldnames.append(fname)
if outtype:
self.fieldmap[fname] = (fmap, outtype) # map of in to out,type
def parse(self, *args, **kw):
'''parse the input args/kwargs into a record dict according to format string
- timestamps are formatted to date/time strings
- fields not in the input will be defined but blank
- extra fields in the record will be formatted into a
"name=value name2=value2..." string and put in 'extra'
- args will go into 'data'
- format keyword can contain a new format string to use (this also sets format for future output)
'''
# convert timestamps to proper format
for ts in [k for k in kw if k == 'ts' or k.endswith('time')]:
dt = ts[:-4] + 'datetime' # ts->datetime , Xtime -> Xdatetime
kw[dt] = datetime.datetime.fromtimestamp(
float(kw[ts])).strftime(self.timeformat) # format properly
if kw.get('direction') is 'cs':
kw['dir_arrow'] = '->'
elif kw.get('direction') is 'sc':
kw['dir_arrow'] = '<-'
else:
kw['dir_arrow'] = '--'
if 'format' in kw:
self.setformat(kw['format']) # change the format string?
del kw['format']
# create the record initialized to the _NULL value
rec = dict((f, self._NULL) for f in self.fieldnames)
# populate record from datadict if datadict key is a field
if self._FILTER_EXTRA:
rec.update(
dict((f, kw[f]) for f in self.fieldnames if (f in kw and kw[f] != None)))
# place extra datadict keys into the extra field (and exclude the
# addr tuple)
if self.extra:
rec['extra'] = self.delim.join(['%s=%s' % (f, kw[f]) for f in sorted(
kw.keys()) if f not in self.fieldnames and f != 'addr'])
else: # not filtering extra, just lump them in as fields
rec.update(kw)
# populate the data field
if args:
rec['data'] = self.delim.join(map(str, args))
return rec
def dump(self, pkt=None, **kw): # pass packets to pcap
'''dump raw packet data to an output
override this if you want a format other than pcap'''
pktdata = str(pkt) # might be string, might be a dpkt object
pktlen = kw.get('len', len(pktdata))
if self.pcapwriter:
self.pcapwriter.write(pktlen, pktdata, kw['ts'])
else:
self.log(util.hexPlusAscii(str(pkt)), level=logging.DEBUG)
# close the PCAP output
def close(self):
if self.pcapwriter:
self.pcapwriter.close()
def dispatch(self, m, *args, **kwargs):
'''dispatch from Q pop'''
if m == 'write':
self.write(*args, **kwargs)
if m == 'alert':
self.alert(*args, **kwargs)
if m == 'dump':
self.dump(*args, **kwargs)
class FileOutput(Output):
def __init__(self, *args, **kw):
'''configuration for fileoutput:
fh=<existing open file handle>
file=filename to write to
mode=mode to open file as, default 'w'
'''
# do base init first
Output.__init__(self, *args, **kw)
# get the output filehandle or file
f = None
if 'fh' in kw:
self.fh = kw['fh']
return
elif 'file' in kw:
f = kw['file']
elif args:
f = args[0]
if f:
if 'mode' in kw:
mode = kw['mode']
else:
mode = 'w'
if mode == 'noclobber':
mode = 'w'
try:
while os.stat(f):
p = f.split('-')
try:
p, n = p[:-1], int(p[-1])
except ValueError:
n = 0
f = '-'.join(p + ['%04d' % (int(n) + 1)])
except OSError:
pass # file not found
self.fh = open(f, mode)
else:
self.fh = sys.stdout
def write(self, obj, **kw):
'''write session data to the session output or stdout'''
if self.sessionwriter:
self.sessionwriter.write(obj, **kw)
elif self.fh:
self.fh.write(str(obj))
def close(self):
'''close output if not stdout'''
if self.fh != sys.stdout:
self.fh.close()
Output.close(self)
class TextOutput(FileOutput):
'''formatted text output to file or stdout'''
_DEFAULT_FORMAT = "%(decoder)s %(datetime)s %(sip)16s:%(sport)-5s %(dir_arrow)s %(dip)16s:%(dport)-5s ** %(data)s **"
_NULL = ''
_FILTER_EXTRA = True
def __init__(self, *args, **kw):
if 'extra' in kw:
self._DEFAULT_FORMAT += " [ %(extra)s ]"
FileOutput.__init__(self, *args, **kw)
def alert(self, *args, **kw):
'''write an alert record
we pass in the decoder object and args/dict'''
rec = self.parse(*args, **kw)
if rec:
self.fh.write(self.format % rec)
class DBOutput(Output):
'''format strings as used by the DBOutput module to create tables and map fields
these follow the usual %(name)type and in most cases a custom format string will work
defualt type maps are:
s,r = VARCHAR (if field len given) /TEXT (if no len)
c = CHAR(1)
x,X,o = VARCHAR
d,i,u = INTEGER
e,E,f,F,g,G = DECIMAL
with the following extra: (using these breaks text format string compatibility)
b = boolean
t = timestamp
D = datetime
T = this field selects table
(following are postgres-only)
A = inet
H = host
N = cidr
M = macaddr
format string can also map field to column with %(field:column)type
or specify an explicit column type with %(field:column)pytype:DBTYPE
(note this also breaks compatibility with text format strings)
'''
_DEFAULT_FORMAT = "%(decoder)T %(ts:timestamp)t %(sip)s %(sport)s %(dip)s %(dport)s %(data:alert)s"
_NULL = None
# format type to (type,coltype) map
_TYPEMAP = {'s': 'VARCHAR', 'r': 'VARCHAR', 'c': 'CHAR(1)',
'x': 'VARCHAR', 'X': 'VARCHAR', 'o': 'VARCHAR',
'd': 'INTEGER', 'i': 'INTEGER', 'u': 'INTEGER',
'e': 'DECIMAL', 'E': 'DECIMAL',
'f': 'DECIMAL', 'F': 'DECIMAL',
'g': 'DECIMAL', 'G': 'DECIMAL',
# 'b' isn't a python type, so (ftype,DBTYPE) tuple for value formats input as ftype
'b': ('d', 'BOOLEAN'),
# not standard across database types!
't': ('f', 'TIMESTAMP'), 'D': ('s', 'DATETIME'),
'A': ('s', 'INET'), 'H': ('s', 'HOST'), 'N': ('s', 'CIDR'), 'M': ('s', 'MACADDR')} # these are postgres specific
# acceptable params to pass to db module connect method
_DBCONNPARAMS = ['host', 'user', 'passwd',
'password', 'db', 'database', 'port', 'charset']
# map of db type to insert placeholder. '%s' is the default, but sqlite3 doesn't like it
# you can override this with the 'placeholder' config keyword
_DBTYPE_PLACEHOLDER_MAP = {'sqlite3': '?'}
def __init__(self, *args, **kw):
'''configuration:
config=db config .ini file name to parse
config keywords:
dbtype=database type, selects DB API module to load
in conf file use [dbtype] section name instead
host,user,passwd,password,db,database,port will be passed to db module if present
table=db table to use if not specified by a field
insert_param=character to use as parameter placeholder for INSERT
(sqlite3=?, default=%%s)
format_types=types to format before insert (default=x)
('s' to pad strings, 'x' to convert to hex, 'f' to format floats, 'fx' for hex and floats...)
'''
self.dbconfig = kw.copy()
# if we were passed a config.ini file, parse it and add the k/v pairs
# to the config
if 'config' in self.dbconfig:
import ConfigParser
config = ConfigParser.ConfigParser()
config.read(self.dbconfig['config'])
sections = config.sections()
if len(sections) > 0:
self.dbconfig['dbtype'] = sections[0]
for k, v in config.items(sections[0], raw=True):
self.dbconfig[k] = v
# import the db module
self.db = __import__(self.dbconfig['dbtype'])
# create a connection, using a dict filtered to db conn params
self.dbconn = self.db.connect(
*args, **dict((k, self.dbconfig[k]) for k in self._DBCONNPARAMS if k in self.dbconfig))
# do the base init last to catch the format string, etc.. (as it may
# have come from the config file)
Output.__init__(self, *args, **self.dbconfig)
def createtable(self, table=None):
'''creates a table based on the format string'''
if not table and 'table' in self.dbconfig:
table = self.dbconfig['table']
try:
cursor = self.dbconn.cursor()
sqlfields = []
for fname, ftype, fspec in [f for f in self.fields if f[1] != 'T']:
ctype = self.fieldmap[fname][1]
# if no width spec, use TEXT instead of VARCHAR and hope the db
# likes it
if ctype == 'VARCHAR' and not fspec:
ctype = 'TEXT'
fdef = self.fieldmap[fname][0] + ' ' + ctype
if fspec:
# try to conver python format spec to something SQL will
# take
fdef += '(' + \
fspec.strip('+-# lLh').replace('.', ',') + ')'
sqlfields.append(fdef)
sql = 'CREATE TABLE "' + table + '" (' + ','.join(sqlfields) + ')'
self.log(sql, logging.DEBUG)
return cursor.execute(sql)
except:
raise
def close(self):
'''closes database connection'''
self.dbconn.close()
Output.close(self)
def alert(self, *args, **kw):
'''write an output record
we pass in the decoder object and args/dict'''
rec = self.parse(self, *args, **kw)
if rec:
self.insert(rec)
def setformat(self, formatstr=None):
'''calls main setformat and then builds the insert SQL'''
# what is the insert param?? some databases use %s, some use ?
# try to map it or take the placeholder keyword from config
ph = self.dbconfig.get('insert_param',
self._DBTYPE_PLACEHOLDER_MAP.get(
self.dbconfig['dbtype'], '%%s')
)
# these are the types we need to format before passing to the db
self.format_types = self.dbconfig.get('format_types', 'x')
Output.setformat(self, formatstr, typemap=self._TYPEMAP)
# build all fields we map (except for [T]able select)
self.tablefield = 'decoder' # default to decodername
for fname, ftype, fspec in self.fields:
if ftype == 'T':
self.tablefield = fname
sqlfields = [self.fieldmap[fname][0]
for (fname, ftype, fspec) in self.fields if fname in self.fieldmap]
self.insertsql = 'INSERT INTO "%%s" (%s) VALUES (%s)' % (
','.join(sqlfields), ','.join([ph] * len(sqlfields)))
def insert(self, rec, table=None):
''' inserts rec dict using self.format into table (if given, else default or specified by field)
if insert fails, tries to create table and insert again before raising exception '''
if not table:
if 'table' in self.dbconfig:
table = self.dbconfig['table']
elif rec[self.tablefield]:
table = rec[self.tablefield]
try:
sqlvalues = []
cursor = self.dbconn.cursor()
for fname, ftype, fspec in self.fields:
if fname in self.fieldmap:
# do we preformat this data?
if ftype in self.format_types:
sqlvalues.append(('%' + fspec + ftype) % rec[fname])
else:
sqlvalues.append(rec[fname])
# create a INSERT INTO table (fields) VALUES (?,?,?) for execute
sql = self.insertsql % table
self.log(sql + ' %s' % sqlvalues, logging.DEBUG)
except:
raise
# try once, if it fails, try to create table and retry
# throws on second failure or create table failure
fail = False
while True:
try:
cursor.execute(sql, sqlvalues)
self.dbconn.commit()
break # success
except Exception, e:
self.log(e, level=logging.WARNING)
if fail:
raise
else:
fail = True
try:
self.createtable(table)
except:
raise
class PCAPWriter(FileOutput):
'''writes a pcap file'''
def __init__(self, *args, **kw):
FileOutput.__init__(self, *args, **kw)
if self.fh:
self.fh.write(
struct.pack('IHHIIII', 0xa1b2c3d4, 2, 4, 0, 0, 65535, 1))
# overrides Output.write to write session as PCAP
# data flow is Output.dump->pcapwriter.write
def write(self, pktlen, pktdata, ts):
if self.fh:
self.fh.write(
struct.pack('II', int(ts), int((ts - int(ts)) * 1000000)))
# captured length, original length
self.fh.write(struct.pack('II', len(pktdata), pktlen))
self.fh.write(pktdata)
class SessionWriter(Output):
'''writes the session to one or more files'''
def __init__(self, session=None, **kw):
self.file = kw.get('session', session)
self.dir = kw.get('direction', 'both')
self.mode = kw.get('mode', 'a')
self.timeformat = (kw.get('timeformat', self._DEFAULT_TIMEFORMAT))
self.fieldnames = []
def write(self, obj, **kwargs):
out = None
kw = dict(**kwargs)
# if a session object with info() and data() methods (conn or blob, but
# not packet)
try:
kw.update(**obj.info()) # get object info
kw = self.parse(**kw)
if self.dir == 'both':
ds = [None]
elif self.dir == 'split':
ds = ['cs', 'sc']
else:
ds = [self.dir]
for d in ds:
kw.update(direction=d if d else 'both') # set direction
# format filename and open
out = FileOutput(self.file % kw, mode=self.mode)
# write obj data for direction
out.fh.write(obj.data(direction=d))
out.close()
except: # if not a session object
# build filename from kw
out = FileOutput(self.file % kw, mode=self.mode)
out.fh.write(str(obj))
out.close()
class QueueOutput(Output):
'''pipes pickled packets to parent process'''
def __init__(self, q, **kwargs):
self.queue = q
Output.__init__(self, **kwargs)
def write(self, *args, **kw): self.dispatch('write', *args, **kw)
def alert(self, *args, **kw): self.dispatch('alert', *args, **kw)
def dump(self, *args, **kw): self.dispatch('dump', *args, **kw)
def dispatch(self, m, *args, **kw): # takes (method,...) to Q
self.queue.put((m, args, kw))
def close(self):
self.queue.close()
Output.close(self)
# default output module
obj = TextOutput
| lib/output/output.py | 21,310 | true if you want to remove extra fields from the parsed record setup the logger parse the format string this is up to the output plugin to process by default stuffs extra fields and data into 'extra' field if _FILTER_EXTRA is true create the default session writer write a message to the log will be a (name,type,length) tuple get all the field names find the next format spec of %(...) not %(... didn't find a closing paren get text between parens as field name len/precision specs will be 0-9 between ) and type char this char is not a spec char, it is the type char is the field type a intype:outtype def? find the end whitespace split on: to get input:output mapping output will be same as input type start at next char on loop field name to column mapping no mapping append the field name,type,spec,mapping map of in to out,type convert timestamps to proper format ts->datetime , Xtime -> Xdatetime format properly change the format string? create the record initialized to the _NULL value populate record from datadict if datadict key is a field place extra datadict keys into the extra field (and exclude the addr tuple) not filtering extra, just lump them in as fields populate the data field pass packets to pcap might be string, might be a dpkt object close the PCAP output do base init first get the output filehandle or file file not found format type to (type,coltype) map 'b' isn't a python type, so (ftype,DBTYPE) tuple for value formats input as ftype not standard across database types! these are postgres specific acceptable params to pass to db module connect method map of db type to insert placeholder. '%s' is the default, but sqlite3 doesn't like it you can override this with the 'placeholder' config keyword if we were passed a config.ini file, parse it and add the k/v pairs to the config import the db module create a connection, using a dict filtered to db conn params do the base init last to catch the format string, etc.. (as it may have come from the config file) if no width spec, use TEXT instead of VARCHAR and hope the db likes it try to conver python format spec to something SQL will take what is the insert param?? some databases use %s, some use ? try to map it or take the placeholder keyword from config these are the types we need to format before passing to the db build all fields we map (except for [T]able select) default to decodername do we preformat this data? create a INSERT INTO table (fields) VALUES (?,?,?) for execute try once, if it fails, try to create table and retry throws on second failure or create table failure success overrides Output.write to write session as PCAP data flow is Output.dump->pcapwriter.write captured length, original length if a session object with info() and data() methods (conn or blob, but not packet) get object info set direction format filename and open write obj data for direction if not a session object build filename from kw takes (method,...) to Q default output module | 2,967 | en | 0.761941 |
"""Auto-generated file, do not edit by hand. MQ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_MQ = PhoneMetadata(id='MQ', country_code=596, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[56]\\d{8}', possible_number_pattern='\\d{9}'),
fixed_line=PhoneNumberDesc(national_number_pattern='596(?:0[2-5]|[12]0|3[05-9]|4[024-8]|[5-7]\\d|89|9[4-8])\\d{4}', possible_number_pattern='\\d{9}', example_number='596301234'),
mobile=PhoneNumberDesc(national_number_pattern='696(?:[0-479]\\d|5[01]|8[0-689])\\d{4}', possible_number_pattern='\\d{9}', example_number='696201234'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='0',
national_prefix_for_parsing='0',
number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})(\\d{2})', format=u'\\1 \\2 \\3 \\4', national_prefix_formatting_rule=u'0\\1')])
| python/phonenumbers/data/region_MQ.py | 1,706 | Auto-generated file, do not edit by hand. MQ metadata | 53 | en | 0.786707 |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VpnSiteArgs', 'VpnSite']
@pulumi.input_type
class VpnSiteArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
address_space: Optional[pulumi.Input['AddressSpaceArgs']] = None,
bgp_properties: Optional[pulumi.Input['BgpSettingsArgs']] = None,
device_properties: Optional[pulumi.Input['DevicePropertiesArgs']] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input['SubResourceArgs']] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a VpnSite resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.
:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
if address_space is not None:
pulumi.set(__self__, "address_space", address_space)
if bgp_properties is not None:
pulumi.set(__self__, "bgp_properties", bgp_properties)
if device_properties is not None:
pulumi.set(__self__, "device_properties", device_properties)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_security_site is not None:
pulumi.set(__self__, "is_security_site", is_security_site)
if location is not None:
pulumi.set(__self__, "location", location)
if site_key is not None:
pulumi.set(__self__, "site_key", site_key)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if virtual_wan is not None:
pulumi.set(__self__, "virtual_wan", virtual_wan)
if vpn_site_links is not None:
pulumi.set(__self__, "vpn_site_links", vpn_site_links)
if vpn_site_name is not None:
pulumi.set(__self__, "vpn_site_name", vpn_site_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name of the VpnSite.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> Optional[pulumi.Input['AddressSpaceArgs']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@address_space.setter
def address_space(self, value: Optional[pulumi.Input['AddressSpaceArgs']]):
pulumi.set(self, "address_space", value)
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> Optional[pulumi.Input['BgpSettingsArgs']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@bgp_properties.setter
def bgp_properties(self, value: Optional[pulumi.Input['BgpSettingsArgs']]):
pulumi.set(self, "bgp_properties", value)
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> Optional[pulumi.Input['DevicePropertiesArgs']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@device_properties.setter
def device_properties(self, value: Optional[pulumi.Input['DevicePropertiesArgs']]):
pulumi.set(self, "device_properties", value)
@property
@pulumi.getter
def id(self) -> Optional[pulumi.Input[str]]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@id.setter
def id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "id", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> Optional[pulumi.Input[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@is_security_site.setter
def is_security_site(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_security_site", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> Optional[pulumi.Input[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@site_key.setter
def site_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "site_key", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> Optional[pulumi.Input['SubResourceArgs']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@virtual_wan.setter
def virtual_wan(self, value: Optional[pulumi.Input['SubResourceArgs']]):
pulumi.set(self, "virtual_wan", value)
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
@vpn_site_links.setter
def vpn_site_links(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]]]):
pulumi.set(self, "vpn_site_links", value)
@property
@pulumi.getter(name="vpnSiteName")
def vpn_site_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the VpnSite being created or updated.
"""
return pulumi.get(self, "vpn_site_name")
@vpn_site_name.setter
def vpn_site_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vpn_site_name", value)
class VpnSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VpnSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
VpnSite Resource.
:param str resource_name: The name of the resource.
:param VpnSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VpnSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
address_space: Optional[pulumi.Input[pulumi.InputType['AddressSpaceArgs']]] = None,
bgp_properties: Optional[pulumi.Input[pulumi.InputType['BgpSettingsArgs']]] = None,
device_properties: Optional[pulumi.Input[pulumi.InputType['DevicePropertiesArgs']]] = None,
id: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_security_site: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
site_key: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
virtual_wan: Optional[pulumi.Input[pulumi.InputType['SubResourceArgs']]] = None,
vpn_site_links: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]]] = None,
vpn_site_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = address_space
__props__.__dict__["bgp_properties"] = bgp_properties
__props__.__dict__["device_properties"] = device_properties
__props__.__dict__["id"] = id
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["is_security_site"] = is_security_site
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["site_key"] = site_key
__props__.__dict__["tags"] = tags
__props__.__dict__["virtual_wan"] = virtual_wan
__props__.__dict__["vpn_site_links"] = vpn_site_links
__props__.__dict__["vpn_site_name"] = vpn_site_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:network/v20200301:VpnSite"), pulumi.Alias(type_="azure-native:network:VpnSite"), pulumi.Alias(type_="azure-nextgen:network:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20180801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181001:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20181201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20190901:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191101:VpnSite"), pulumi.Alias(type_="azure-native:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20191201:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200401:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200501:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200601:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200701:VpnSite"), pulumi.Alias(type_="azure-native:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20200801:VpnSite"), pulumi.Alias(type_="azure-native:network/v20201101:VpnSite"), pulumi.Alias(type_="azure-nextgen:network/v20201101:VpnSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(VpnSite, __self__).__init__(
'azure-native:network/v20200301:VpnSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'VpnSite':
"""
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = VpnSiteArgs.__new__(VpnSiteArgs)
__props__.__dict__["address_space"] = None
__props__.__dict__["bgp_properties"] = None
__props__.__dict__["device_properties"] = None
__props__.__dict__["etag"] = None
__props__.__dict__["ip_address"] = None
__props__.__dict__["is_security_site"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["site_key"] = None
__props__.__dict__["tags"] = None
__props__.__dict__["type"] = None
__props__.__dict__["virtual_wan"] = None
__props__.__dict__["vpn_site_links"] = None
return VpnSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="addressSpace")
def address_space(self) -> pulumi.Output[Optional['outputs.AddressSpaceResponse']]:
"""
The AddressSpace that contains an array of IP address ranges.
"""
return pulumi.get(self, "address_space")
@property
@pulumi.getter(name="bgpProperties")
def bgp_properties(self) -> pulumi.Output[Optional['outputs.BgpSettingsResponse']]:
"""
The set of bgp properties.
"""
return pulumi.get(self, "bgp_properties")
@property
@pulumi.getter(name="deviceProperties")
def device_properties(self) -> pulumi.Output[Optional['outputs.DevicePropertiesResponse']]:
"""
The device properties.
"""
return pulumi.get(self, "device_properties")
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[Optional[str]]:
"""
The ip-address for the vpn-site.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isSecuritySite")
def is_security_site(self) -> pulumi.Output[Optional[bool]]:
"""
IsSecuritySite flag.
"""
return pulumi.get(self, "is_security_site")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioning state of the VPN site resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="siteKey")
def site_key(self) -> pulumi.Output[Optional[str]]:
"""
The key for vpn-site that can be used for connections.
"""
return pulumi.get(self, "site_key")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualWan")
def virtual_wan(self) -> pulumi.Output[Optional['outputs.SubResourceResponse']]:
"""
The VirtualWAN to which the vpnSite belongs.
"""
return pulumi.get(self, "virtual_wan")
@property
@pulumi.getter(name="vpnSiteLinks")
def vpn_site_links(self) -> pulumi.Output[Optional[Sequence['outputs.VpnSiteLinkResponse']]]:
"""
List of all vpn site links.
"""
return pulumi.get(self, "vpn_site_links")
| sdk/python/pulumi_azure_native/network/v20200301/vpn_site.py | 23,051 | The set of arguments for constructing a VpnSite resource.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input['AddressSpaceArgs'] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input['BgpSettingsArgs'] bgp_properties: The set of bgp properties.
:param pulumi.Input['DevicePropertiesArgs'] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input['SubResourceArgs'] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input['VpnSiteLinkArgs']]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
VpnSite Resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['AddressSpaceArgs']] address_space: The AddressSpace that contains an array of IP address ranges.
:param pulumi.Input[pulumi.InputType['BgpSettingsArgs']] bgp_properties: The set of bgp properties.
:param pulumi.Input[pulumi.InputType['DevicePropertiesArgs']] device_properties: The device properties.
:param pulumi.Input[str] id: Resource ID.
:param pulumi.Input[str] ip_address: The ip-address for the vpn-site.
:param pulumi.Input[bool] is_security_site: IsSecuritySite flag.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The resource group name of the VpnSite.
:param pulumi.Input[str] site_key: The key for vpn-site that can be used for connections.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Resource tags.
:param pulumi.Input[pulumi.InputType['SubResourceArgs']] virtual_wan: The VirtualWAN to which the vpnSite belongs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VpnSiteLinkArgs']]]] vpn_site_links: List of all vpn site links.
:param pulumi.Input[str] vpn_site_name: The name of the VpnSite being created or updated.
VpnSite Resource.
:param str resource_name: The name of the resource.
:param VpnSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
The AddressSpace that contains an array of IP address ranges.
The AddressSpace that contains an array of IP address ranges.
The set of bgp properties.
The set of bgp properties.
The device properties.
The device properties.
A unique read-only string that changes whenever the resource is updated.
Get an existing VpnSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
Resource ID.
The ip-address for the vpn-site.
The ip-address for the vpn-site.
IsSecuritySite flag.
IsSecuritySite flag.
Resource location.
Resource location.
Resource name.
The provisioning state of the VPN site resource.
The resource group name of the VpnSite.
The key for vpn-site that can be used for connections.
The key for vpn-site that can be used for connections.
Resource tags.
Resource tags.
Resource type.
The VirtualWAN to which the vpnSite belongs.
The VirtualWAN to which the vpnSite belongs.
List of all vpn site links.
List of all vpn site links.
The name of the VpnSite being created or updated.
coding=utf-8 *** WARNING: this file was generated by the Pulumi SDK Generator. *** *** Do not edit by hand unless you're certain you know what you are doing! *** | 4,029 | en | 0.59594 |
# -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pathlib import Path
import click
from rnacentral_pipeline.rnacentral import attempted, r2dt
@click.group("r2dt")
def cli():
"""
A group of commands for parsing data from secondary structures into an
importable format.
"""
pass
@cli.command("process-svgs")
@click.option("--allow-missing", is_flag=True, default=False)
@click.argument("model_info", type=click.File("r"))
@click.argument("directory", type=click.Path())
@click.argument("output", type=click.File("w"))
def process_svgs(model_info, directory, output, allow_missing=False):
"""
Process all SVG secondary structures in the given directory and produce a
single data file that can be imported into the database.
"""
r2dt.write(model_info, directory, output, allow_missing=allow_missing)
@cli.group("should-show")
def should_show():
"""
Some commands relating to building a model for should show as well as
running it.
"""
@should_show.command("convert-sheet")
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def convert_sheet(filename, output):
"""
This command is to convert a downloaded google sheet csv into a csv that can
be used for training data. Often we will build a spreadsheet of example URS
and then use that to build a training set. It is nice since you can embedd
an SVG in google sheets so it is fast for us to compare several of them.
In order to move that back into the training data you can download that
sheet as a CSV and then run this command on it to build the CSV that is used
in training. It requires there be a 'urs' and 'Labeled Should show' column
to build the CSV. The values in labeled should show must be true/false
(ignoring case).
"""
r2dt.write_converted_sheet(filename, output)
@should_show.command("fetch-data")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def fetch_training_data(filename, output, db_url=None):
"""
This builds a CSV file of training data to use for the model building. I
keep it separate so I can build a training csv and play with it interactivly
before committing the final modeling building logic to the pipeline.
"""
r2dt.write_training_data(filename, db_url, output)
@should_show.command("inspect-data")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def fetch_inspect_data(filename, output, db_url=None):
"""
This is the command to use when trying to fetch more examples to add to the
training set. This will fetch some information that is useful for a person
to evaluate a diagram and decide if it should be true/false in the training
set.
"""
r2dt.write_training_data(filename, db_url, output)
@should_show.command("build-model")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("training-info", type=click.File("r"))
@click.argument("model", type=click.Path())
def build_model(training_info, model, db_url=None):
"""
This builds a model given then training information. The training
information should be a csv file of:
URS,flag
The flag must be 1 or 0 to indicate if the URS should be shown or not. THis
will fetch the data like the fetch-data command but will then build a model
and write it out the the output file directly.
"""
r2dt.build_model(training_info, db_url, Path(model))
@should_show.command("compute")
@click.option("--db-url", envvar="PGDATABASE")
@click.argument("model", type=click.Path())
@click.argument("filename", type=click.File("r"))
@click.argument("output", type=click.File("w"))
def write_should_show(model, filename, output, db_url=None):
"""
This computes the should show values for the data in the given file and a
file listing urs ids to use. The data needed for the URS will be fetched
from the database. This is meant to operate on large batches, like
relabeling the entire database.
"""
r2dt.write_should_show(model, filename, db_url, output)
@cli.group("model-info")
def model_info():
"""
Commands for parsing and generating data files we can import into the
database as model info files.
"""
pass
@model_info.command("crw")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def crw_model_info(filename, output):
"""
Parse the CRW metadata file and produce
"""
r2dt.write_crw(filename, output)
@model_info.command("ribovision")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def ribovision_model_info(filename, output):
"""
Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database.
"""
r2dt.write_ribovision(filename, output)
@model_info.command("gtrnadb")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def gtrnadb_model_info(filename, output):
"""
Parse the metadata.tsv file from R2DT for gtrnadb models to
produce something we can put in our database.
"""
r2dt.write_gtrnadb(filename, output)
@model_info.command("rnase-p")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def rnase_p_model_info(filename, output):
"""
Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database.
"""
r2dt.write_rnase_p(filename, output)
@cli.command("create-attempted")
@click.argument("filename", type=click.File("r"))
@click.argument("output", default="-", type=click.File("w"))
def r2dt_create_attempted(filename, output):
attempted.r2dt(filename, output)
@cli.command("publish")
@click.option("--suffix", default="")
@click.option("--allow-missing", is_flag=True, default=False)
@click.argument("model_info", type=click.File("r"))
@click.argument(
"directory",
type=click.Path(
writable=False,
dir_okay=True,
file_okay=False,
),
)
@click.argument(
"output",
type=click.Path(
writable=True,
dir_okay=True,
file_okay=False,
),
)
def r2dt_publish(model_info, directory, output, allow_missing, suffix=""):
r2dt.publish(
model_info, directory, output, allow_missing=allow_missing, suffix=suffix
)
@cli.command("prepare-s3")
@click.option("--allow-missing", is_flag=True, default=False)
@click.argument("model_info", type=click.File("r"))
@click.argument(
"directory",
type=click.Path(
writable=False,
dir_okay=True,
file_okay=False,
),
)
@click.argument(
"output",
type=click.Path(
writable=True,
dir_okay=True,
file_okay=False,
),
)
@click.argument("file_list", type=click.Path())
def r2dt_prepare_s3(model_info, directory, output, file_list, allow_missing):
file_list = Path(file_list)
output = Path(output)
r2dt.prepare_s3(
model_info, directory, output, file_list, allow_missing=allow_missing
)
| rnacentral_pipeline/cli/r2dt.py | 7,915 | This builds a model given then training information. The training
information should be a csv file of:
URS,flag
The flag must be 1 or 0 to indicate if the URS should be shown or not. THis
will fetch the data like the fetch-data command but will then build a model
and write it out the the output file directly.
A group of commands for parsing data from secondary structures into an
importable format.
This command is to convert a downloaded google sheet csv into a csv that can
be used for training data. Often we will build a spreadsheet of example URS
and then use that to build a training set. It is nice since you can embedd
an SVG in google sheets so it is fast for us to compare several of them.
In order to move that back into the training data you can download that
sheet as a CSV and then run this command on it to build the CSV that is used
in training. It requires there be a 'urs' and 'Labeled Should show' column
to build the CSV. The values in labeled should show must be true/false
(ignoring case).
Parse the CRW metadata file and produce
This is the command to use when trying to fetch more examples to add to the
training set. This will fetch some information that is useful for a person
to evaluate a diagram and decide if it should be true/false in the training
set.
This builds a CSV file of training data to use for the model building. I
keep it separate so I can build a training csv and play with it interactivly
before committing the final modeling building logic to the pipeline.
Parse the metadata.tsv file from R2DT for gtrnadb models to
produce something we can put in our database.
Commands for parsing and generating data files we can import into the
database as model info files.
Process all SVG secondary structures in the given directory and produce a
single data file that can be imported into the database.
Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database.
Parse the metadata.tsv file from R2DT for Ribovision models to
produce something we can put in our database.
Some commands relating to building a model for should show as well as
running it.
This computes the should show values for the data in the given file and a
file listing urs ids to use. The data needed for the URS will be fetched
from the database. This is meant to operate on large batches, like
relabeling the entire database.
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-*- coding: utf-8 -*- | 2,995 | en | 0.895942 |
import os
import pyudev
import psutil
import logging
import time
from arm.ripper import music_brainz
from arm.ui import db
from arm.config.config import cfg
from flask_login import LoginManager, current_user, login_user, UserMixin # noqa: F401
from prettytable import PrettyTable
hidden_attribs = ("OMDB_API_KEY", "EMBY_USERID", "EMBY_PASSWORD", "EMBY_API_KEY", "PB_KEY", "IFTTT_KEY", "PO_KEY",
"PO_USER_KEY", "PO_APP_KEY", "ARM_API_KEY", "TMDB_API_KEY")
HIDDEN_VALUE = "<hidden>"
class Job(db.Model):
job_id = db.Column(db.Integer, primary_key=True)
arm_version = db.Column(db.String(20))
crc_id = db.Column(db.String(63))
logfile = db.Column(db.String(256))
start_time = db.Column(db.DateTime)
stop_time = db.Column(db.DateTime)
job_length = db.Column(db.String(12))
status = db.Column(db.String(32))
stage = db.Column(db.String(63))
no_of_titles = db.Column(db.Integer)
title = db.Column(db.String(256))
title_auto = db.Column(db.String(256))
title_manual = db.Column(db.String(256))
year = db.Column(db.String(4))
year_auto = db.Column(db.String(4))
year_manual = db.Column(db.String(4))
video_type = db.Column(db.String(20))
video_type_auto = db.Column(db.String(20))
video_type_manual = db.Column(db.String(20))
imdb_id = db.Column(db.String(15))
imdb_id_auto = db.Column(db.String(15))
imdb_id_manual = db.Column(db.String(15))
poster_url = db.Column(db.String(256))
poster_url_auto = db.Column(db.String(256))
poster_url_manual = db.Column(db.String(256))
devpath = db.Column(db.String(15))
mountpoint = db.Column(db.String(20))
hasnicetitle = db.Column(db.Boolean)
errors = db.Column(db.Text)
disctype = db.Column(db.String(20)) # dvd/bluray/data/music/unknown
label = db.Column(db.String(256))
path = db.Column(db.String(256))
ejected = db.Column(db.Boolean)
updated = db.Column(db.Boolean)
pid = db.Column(db.Integer)
pid_hash = db.Column(db.Integer)
tracks = db.relationship('Track', backref='job', lazy='dynamic')
config = db.relationship('Config', uselist=False, backref="job")
def __init__(self, devpath):
"""Return a disc object"""
self.devpath = devpath
self.mountpoint = "/mnt" + devpath
self.hasnicetitle = False
self.video_type = "unknown"
self.ejected = False
self.updated = False
if cfg['VIDEOTYPE'] != "auto":
self.video_type = cfg['VIDEOTYPE']
self.parse_udev()
self.get_pid()
def parse_udev(self):
"""Parse udev for properties of current disc"""
context = pyudev.Context()
device = pyudev.Devices.from_device_file(context, self.devpath)
self.disctype = "unknown"
for key, value in device.items():
if key == "ID_FS_LABEL":
self.label = value
if value == "iso9660":
self.disctype = "data"
elif key == "ID_CDROM_MEDIA_BD":
self.disctype = "bluray"
elif key == "ID_CDROM_MEDIA_DVD":
self.disctype = "dvd"
elif key == "ID_CDROM_MEDIA_TRACK_COUNT_AUDIO":
self.disctype = "music"
else:
pass
def get_pid(self):
pid = os.getpid()
p = psutil.Process(pid)
self.pid = pid
self.pid_hash = hash(p)
def get_disc_type(self, found_hvdvd_ts):
if self.disctype == "music":
logging.debug("Disc is music.")
self.label = music_brainz.main(self)
elif os.path.isdir(self.mountpoint + "/VIDEO_TS"):
logging.debug(f"Found: {self.mountpoint}/VIDEO_TS")
self.disctype = "dvd"
elif os.path.isdir(self.mountpoint + "/video_ts"):
logging.debug(f"Found: {self.mountpoint}/video_ts")
self.disctype = "dvd"
elif os.path.isdir(self.mountpoint + "/BDMV"):
logging.debug(f"Found: {self.mountpoint}/BDMV")
self.disctype = "bluray"
elif os.path.isdir(self.mountpoint + "/HVDVD_TS"):
logging.debug(f"Found: {self.mountpoint}/HVDVD_TS")
# do something here
elif found_hvdvd_ts:
logging.debug("Found file: HVDVD_TS")
# do something here too
else:
logging.debug("Did not find valid dvd/bd files. Changing disctype to 'data'")
self.disctype = "data"
def identify_audio_cd(self):
"""
Get the title for audio cds to use for the logfile name.
Needs the job class passed into it so it can be forwarded to mb
return - only the logfile - setup_logging() adds the full path
"""
# Use the music label if we can find it - defaults to music_cd.log
disc_id = music_brainz.get_disc_id(self)
mb_title = music_brainz.get_title(disc_id, self)
if mb_title == "not identified":
self.label = self.title = "not identified"
logfile = "music_cd.log"
new_log_file = f"music_cd_{round(time.time() * 100)}.log"
else:
logfile = f"{mb_title}.log"
new_log_file = f"{mb_title}_{round(time.time() * 100)}.log"
temp_log_full = os.path.join(cfg['LOGPATH'], logfile)
logfile = new_log_file if os.path.isfile(temp_log_full) else logfile
return logfile
def __str__(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
s = s + "(" + str(attr) + "=" + str(value) + ") "
return s
def pretty_table(self):
"""Returns a string of the prettytable"""
x = PrettyTable()
x.field_names = ["Config", "Value"]
x._max_width = {"Config": 50, "Value": 60}
for attr, value in self.__dict__.items():
if attr == "config":
x.add_row([str(attr), str(value.pretty_table())])
else:
x.add_row([str(attr), str(value)])
return str(x.get_string())
def get_d(self):
r = {}
for key, value in self.__dict__.items():
if '_sa_instance_state' not in key:
r[str(key)] = str(value)
return r
def __repr__(self):
return '<Job {}>'.format(self.label)
def eject(self):
"""Eject disc if it hasn't previously been ejected"""
if not self.ejected:
self.ejected = True
try:
if os.system("umount " + self.devpath):
logging.debug("we unmounted disc" + self.devpath)
if os.system("eject " + self.devpath):
logging.debug("we ejected disc" + self.devpath)
self.ejected = True
else:
logging.debug("failed to eject" + self.devpath)
except Exception as e:
logging.debug(self.devpath + " couldn't be ejected " + str(e))
class Track(db.Model):
track_id = db.Column(db.Integer, primary_key=True)
job_id = db.Column(db.Integer, db.ForeignKey('job.job_id'))
track_number = db.Column(db.String(4))
length = db.Column(db.Integer)
aspect_ratio = db.Column(db.String(20))
fps = db.Column(db.Float)
main_feature = db.Column(db.Boolean)
basename = db.Column(db.String(256))
filename = db.Column(db.String(256))
orig_filename = db.Column(db.String(256))
new_filename = db.Column(db.String(256))
ripped = db.Column(db.Boolean)
status = db.Column(db.String(32))
error = db.Column(db.Text)
source = db.Column(db.String(32))
def __init__(self, job_id, track_number, length, aspect_ratio, fps, main_feature, source, basename, filename):
"""Return a track object"""
self.job_id = job_id
self.track_number = track_number
self.length = length
self.aspect_ratio = aspect_ratio
self.fps = fps
self.main_feature = main_feature
self.source = source
self.basename = basename
self.filename = filename
self.ripped = False
def __repr__(self):
return '<Post {}>'.format(self.track_number)
class Config(db.Model):
CONFIG_ID = db.Column(db.Integer, primary_key=True)
job_id = db.Column(db.Integer, db.ForeignKey('job.job_id'))
ARM_CHECK_UDF = db.Column(db.Boolean)
GET_VIDEO_TITLE = db.Column(db.Boolean)
SKIP_TRANSCODE = db.Column(db.Boolean)
VIDEOTYPE = db.Column(db.String(25))
MINLENGTH = db.Column(db.String(6))
MAXLENGTH = db.Column(db.String(6))
MANUAL_WAIT = db.Column(db.Boolean)
MANUAL_WAIT_TIME = db.Column(db.Integer)
RAW_PATH = db.Column(db.String(255))
TRANSCODE_PATH = db.Column(db.String(255))
COMPLETED_PATH = db.Column(db.String(255))
EXTRAS_SUB = db.Column(db.String(255))
INSTALLPATH = db.Column(db.String(255))
LOGPATH = db.Column(db.String(255))
LOGLEVEL = db.Column(db.String(255))
LOGLIFE = db.Column(db.Integer)
DBFILE = db.Column(db.String(255))
WEBSERVER_IP = db.Column(db.String(25))
WEBSERVER_PORT = db.Column(db.Integer)
SET_MEDIA_PERMISSIONS = db.Column(db.Boolean)
CHMOD_VALUE = db.Column(db.Integer)
SET_MEDIA_OWNER = db.Column(db.Boolean)
CHOWN_USER = db.Column(db.String(50))
CHOWN_GROUP = db.Column(db.String(50))
RIPMETHOD = db.Column(db.String(25))
MKV_ARGS = db.Column(db.String(25))
DELRAWFILES = db.Column(db.Boolean)
HASHEDKEYS = db.Column(db.Boolean)
HB_PRESET_DVD = db.Column(db.String(256))
HB_PRESET_BD = db.Column(db.String(256))
DEST_EXT = db.Column(db.String(10))
HANDBRAKE_CLI = db.Column(db.String(25))
MAINFEATURE = db.Column(db.Boolean)
HB_ARGS_DVD = db.Column(db.String(256))
HB_ARGS_BD = db.Column(db.String(256))
EMBY_REFRESH = db.Column(db.Boolean)
EMBY_SERVER = db.Column(db.String(25))
EMBY_PORT = db.Column(db.String(6))
EMBY_CLIENT = db.Column(db.String(25))
EMBY_DEVICE = db.Column(db.String(50))
EMBY_DEVICEID = db.Column(db.String(128))
EMBY_USERNAME = db.Column(db.String(50))
EMBY_USERID = db.Column(db.String(128))
EMBY_PASSWORD = db.Column(db.String(128))
EMBY_API_KEY = db.Column(db.String(64))
NOTIFY_RIP = db.Column(db.Boolean)
NOTIFY_TRANSCODE = db.Column(db.Boolean)
PB_KEY = db.Column(db.String(64))
IFTTT_KEY = db.Column(db.String(64))
IFTTT_EVENT = db.Column(db.String(25))
PO_USER_KEY = db.Column(db.String(64))
PO_APP_KEY = db.Column(db.String(64))
OMDB_API_KEY = db.Column(db.String(64))
def __init__(self, c, job_id):
self.__dict__.update(c)
self.job_id = job_id
def list_params(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
if s:
s = s + "\n"
if str(attr) in hidden_attribs and value:
value = HIDDEN_VALUE
s = s + str(attr) + ":" + str(value)
return s
def __str__(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
if str(attr) in hidden_attribs and value:
value = HIDDEN_VALUE
s = s + "(" + str(attr) + "=" + str(value) + ") "
return s
def pretty_table(self):
"""Returns a string of the prettytable"""
x = PrettyTable()
x.field_names = ["Config", "Value"]
x._max_width = {"Config": 20, "Value": 30}
for attr, value in self.__dict__.items():
if str(attr) in hidden_attribs and value:
value = HIDDEN_VALUE
x.add_row([str(attr), str(value)])
return str(x.get_string())
def get_d(self):
r = {}
for key, value in self.__dict__.items():
if str(key) not in hidden_attribs:
r[str(key)] = str(value)
return r
class User(db.Model, UserMixin):
user_id = db.Column(db.Integer, index=True, primary_key=True)
email = db.Column(db.String(64))
password = db.Column(db.String(128))
hash = db.Column(db.String(256))
def __init__(self, email=None, password=None, hashed=None):
self.email = email
self.password = password
self.hash = hashed
def __repr__(self):
return '<User %r>' % (self.email)
def get_id(self):
return self.user_id
class AlembicVersion(db.Model):
version_num = db.Column(db.String(36), autoincrement=False, primary_key=True)
def __init__(self, version=None):
self.version_num = version
class UISettings(db.Model):
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
use_icons = db.Column(db.Boolean)
save_remote_images = db.Column(db.Boolean)
bootstrap_skin = db.Column(db.String(64))
language = db.Column(db.String(4))
index_refresh = db.Column(db.Integer)
database_limit = db.Column(db.Integer)
def __init__(self, use_icons=None, save_remote_images=None, bootstrap_skin=None, language=None, index_refresh=None,
database_limit=None):
self.use_icons = use_icons
self.save_remote_images = save_remote_images
self.bootstrap_skin = bootstrap_skin
self.language = language
self.index_refresh = index_refresh
self.database_limit = database_limit
def __repr__(self):
return '<UISettings %r>' % self.id
def __str__(self):
"""Returns a string of the object"""
s = self.__class__.__name__ + ": "
for attr, value in self.__dict__.items():
s = s + "(" + str(attr) + "=" + str(value) + ") "
return s
def get_d(self):
r = {}
for key, value in self.__dict__.items():
if '_sa_instance_state' not in key:
r[str(key)] = str(value)
return r
| arm/models/models.py | 14,015 | Return a disc object
Return a track object
Returns a string of the object
Returns a string of the object
Returns a string of the object
Eject disc if it hasn't previously been ejected
Get the title for audio cds to use for the logfile name.
Needs the job class passed into it so it can be forwarded to mb
return - only the logfile - setup_logging() adds the full path
Returns a string of the object
Parse udev for properties of current disc
Returns a string of the prettytable
Returns a string of the prettytable
noqa: F401 dvd/bluray/data/music/unknown do something here do something here too Use the music label if we can find it - defaults to music_cd.log | 662 | en | 0.628308 |
# Copyright 2020 XEBIALABS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# https://stackoverflow.com/questions/16910955/programmatically-configure-logback-appender?noredirect=1
#
import ch.qos.logback.core.Appender as LogAppender
import ch.qos.logback.core.util.COWArrayList as COWArrayList
import ch.qos.logback.classic.encoder.PatternLayoutEncoder as PatternLayoutEncoder
import ch.qos.logback.core.FileAppender as FileAppender
import org.slf4j.LoggerFactory as LoggerFactory
import ch.qos.logback.classic.Level as logLevels
import json
def getLogAppenders( loggerName="console" ):
loggerMap = []
myLogger = LoggerFactory.getLogger("logmanager")
loggerContext = LoggerFactory.getILoggerFactory()
myLogger.error("===================")
appenderMap = {}
for logger in loggerContext.getLoggerList():
appenderList = logger.iteratorForAppenders()
while appenderList.hasNext():
appender = appenderList.next()
logger.error("Logger %s" % appender.getName())
if appender.getName() not in appenderMap.keys():
loggerMap.append({"name": appender.getName(), "appender": "NA"})
myLogger.error("Appender %s: %s" % (appender.getName(), "NA"))
myLogger.error("===================")
return loggerMap
def createLogAppender( name, file ):
lc = LoggerFactory.getILoggerFactory()
ple = PatternLayoutEncoder()
ple.setPattern("%date %level [%thread] %logger{10} [%file:%line] %msg%n")
ple.setContext(lc)
ple.start()
fileAppender = FileAppender()
fileAppender.setFile(file)
fileAppender.setEncoder(ple)
fileAppender.setContext(lc)
fileAppender.start()
logger = LoggerFactory.getLogger(string)
logger.addAppender(fileAppender)
#logger.setLevel(logLevels.DEBUG)
# set to true if root should log too
logger.setAdditive(True)
return logger
myLogger = LoggerFactory.getLogger("logmanager")
verb = "GET"
if (request):
if (request.query):
if (request.query['verb']):
verb = request.query['verb']
if( verb == "create"):
string = request.query['string']
file = request.query['file']
myLogger.info("Setting %s to %s" % (string, file))
createLogAppender(string, file)
loggerMap = getLogAppenders()
myLogger.error("%s" % json.dumps(loggerMap, indent=4, sort_keys=True))
response.entity = {"status": "OK", "data":loggerMap }
| src/main/resources/restapi/logger/getLogAppenders.py | 3,408 | Copyright 2020 XEBIALABS Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. https://stackoverflow.com/questions/16910955/programmatically-configure-logback-appender?noredirect=1logger.setLevel(logLevels.DEBUG) set to true if root should log too | 1,214 | en | 0.837147 |
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from DistributionGroupAppsDeleteRequest.clsDistributionGroupAppsDeleteRequest import DistributionGroupAppsDeleteRequest # noqa: E501
from appcenter_sdk.rest import ApiException
class TestDistributionGroupAppsDeleteRequest(unittest.TestCase):
"""DistributionGroupAppsDeleteRequest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDistributionGroupAppsDeleteRequest(self):
"""Test DistributionGroupAppsDeleteRequest"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsDistributionGroupAppsDeleteRequest.DistributionGroupAppsDeleteRequest() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| sdks/python/test/test_DistributionGroupAppsDeleteRequest.py | 1,084 | DistributionGroupAppsDeleteRequest unit test stubs
Test DistributionGroupAppsDeleteRequest
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
coding: utf-8 noqa: E501 FIXME: construct object with mandatory attributes with example values model = appcenter_sdk.models.clsDistributionGroupAppsDeleteRequest.DistributionGroupAppsDeleteRequest() noqa: E501 | 506 | en | 0.509512 |
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .tags import TagPatterns
class Criticality(object):
def __init__(self, critical_tags=None, non_critical_tags=None):
self.critical_tags = self._get_tag_patterns(critical_tags)
self.non_critical_tags = self._get_tag_patterns(non_critical_tags)
def _get_tag_patterns(self, tags):
return TagPatterns(tags) if not isinstance(tags, TagPatterns) else tags
def tag_is_critical(self, tag):
return self.critical_tags.match(tag)
def tag_is_non_critical(self, tag):
return self.non_critical_tags.match(tag)
def test_is_critical(self, test):
if self.critical_tags and not self.critical_tags.match(test.tags):
return False
return not self.non_critical_tags.match(test.tags)
def __bool__(self):
return bool(self.critical_tags or self.non_critical_tags)
#PY2
def __nonzero__(self):
return self.__bool__()
| src/robot/model/criticality.py | 1,527 | Copyright 2008-2015 Nokia Solutions and Networks Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.PY2 | 582 | en | 0.857814 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.