hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa98c00f1e2784586e8ec1a8103f6480107e952e | 1,021 | py | Python | adaptive_filter/lms.py | runngezhang/pyaec | 4db53af0f418171a89a8e3a2dd26c28166b3aaae | [
"Apache-2.0"
] | 2 | 2021-07-21T11:57:15.000Z | 2021-07-23T08:33:19.000Z | adaptive_filter/lms.py | zqhy-bigtime/pyaec | ca2ae14eb1656bde7671cd6bc8b8865322a7ee5a | [
"Apache-2.0"
] | null | null | null | adaptive_filter/lms.py | zqhy-bigtime/pyaec | ca2ae14eb1656bde7671cd6bc8b8865322a7ee5a | [
"Apache-2.0"
] | 1 | 2021-06-18T09:06:39.000Z | 2021-06-18T09:06:39.000Z | # Copyright 2020 ewan xu<ewan_xu@outlook.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
""" least mean squares filter """
import numpy as np
def lms(x, d, N = 4, mu = 0.05):
L = min(len(x),len(d))
h = np.zeros(N)
e = np.zeros(L-N)
for n in range(L-N):
x_n = x[n:n+N][::-1]
d_n = d[n]
y_n = np.dot(h, x_n.T)
e_n = d_n - y_n
h = h + mu * e_n * x_n
e[n] = e_n
return e | 32.935484 | 79 | 0.592556 |
99c54803b9a8cf6d24cad012199abfced53a5f7a | 7,451 | py | Python | src/testproject/sdk/drivers/webdriver/remote.py | Fnckerpoi/python-opensdk | 37e528ea613686975d58b230c16647324271a0bd | [
"Apache-2.0"
] | 1 | 2021-07-15T06:57:21.000Z | 2021-07-15T06:57:21.000Z | src/testproject/sdk/drivers/webdriver/remote.py | xjc90s/python-opensdk | 5ae1032f16bbcda7405e8c2943ec0c1e0e941fbc | [
"Apache-2.0"
] | null | null | null | src/testproject/sdk/drivers/webdriver/remote.py | xjc90s/python-opensdk | 5ae1032f16bbcda7405e8c2943ec0c1e0e941fbc | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 TestProject (https://testproject.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from appium.webdriver.webdriver import WebDriver as AppiumWebDriver
from src.testproject.classes import StepSettings
from src.testproject.enums import EnvironmentVariable
from src.testproject.enums.report_type import ReportType
from src.testproject.helpers import (
ReportHelper,
LoggingHelper,
ConfigHelper,
AddonHelper,
)
from src.testproject.rest import ReportSettings
from src.testproject.sdk.exceptions import SdkException
from src.testproject.sdk.internal.agent import AgentClient
from src.testproject.sdk.internal.helpers import CustomAppiumCommandExecutor
from src.testproject.sdk.internal.reporter import Reporter
from src.testproject.sdk.internal.session import AgentSession
class Remote(AppiumWebDriver):
"""Used to create a new Android driver instance
Args:
desired_capabilities (dict): Automation session desired capabilities and options
token (str): Developer token to be used to communicate with the Agent
project_name (str): Project name to report
job_name (str): Job name to report
disable_reports (bool): set to True to disable all reporting (no report will be created on TestProject)
Attributes:
_desired_capabilities (dict): Automation session desired capabilities and options
_agent_client (AgentClient): client responsible for communicating with the TestProject agent
_agent_session (AgentSession): stores properties of the current agent session
command_executor (CustomAppiumCommandExecutor): the HTTP command executor used to send commands
w3c (bool): indicates whether or not the driver instance uses the W3C dialect
session_id (str): contains the current session ID
"""
__instance = None
def __init__(
self,
desired_capabilities: dict = None,
token: str = None,
project_name: str = None,
job_name: str = None,
disable_reports: bool = False,
report_type: ReportType = ReportType.CLOUD_AND_LOCAL,
agent_url: str = None,
report_name: str = None,
report_path: str = None,
):
if Remote.__instance is not None:
raise SdkException("A driver session already exists")
LoggingHelper.configure_logging()
self._desired_capabilities = desired_capabilities
env_token = ConfigHelper.get_developer_token()
if env_token is not None and token is not None:
logging.info("Using token from environment variable...")
self._token = env_token if env_token is not None else token
if disable_reports:
# Setting the project and job name to empty strings will cause the Agent to not initialize a report
self._project_name = ""
self._job_name = ""
else:
self._project_name = project_name if project_name is not None else ReportHelper.infer_project_name()
if job_name:
self._job_name = job_name
else:
self._job_name = ReportHelper.infer_job_name()
# Can update job name at runtime if not specified.
os.environ[EnvironmentVariable.TP_UPDATE_JOB_NAME.value] = "True"
report_settings = ReportSettings(self._project_name, self._job_name, report_type, report_name, report_path)
self._agent_client: AgentClient = AgentClient(
token=self._token,
capabilities=self._desired_capabilities,
agent_url=agent_url,
report_settings=report_settings,
)
self._agent_session: AgentSession = self._agent_client.agent_session
self.w3c = True if self._agent_session.dialect == "W3C" else False
AppiumWebDriver.__init__(
self,
command_executor=self._agent_session.remote_address,
desired_capabilities=self._desired_capabilities,
)
self.command_executor = CustomAppiumCommandExecutor(
agent_client=self._agent_client,
remote_server_addr=self._agent_session.remote_address,
)
self.command_executor.disable_reports = disable_reports
# this ensures that mobile-specific commands are also available for our command executor
self._addCommands()
# Disable automatic command and test reports if Behave reporting is enabled.
if os.getenv("TP_DISABLE_AUTO_REPORTING") == "True":
self.command_executor.disable_command_reports = True
self.command_executor.disable_auto_test_reports = True
Remote.__instance = self
@classmethod
def instance(cls):
"""Returns the singleton instance of the driver object"""
return Remote.__instance
@property
def step_settings(self):
return self.command_executor.settings
@step_settings.setter
def step_settings(self, step_settings: StepSettings):
self.command_executor.settings = step_settings
def start_session(self, capabilities, browser_profile=None):
"""Sets capabilities and sessionId obtained from the Agent when creating the original session."""
self.session_id = self._agent_session.session_id
logging.info(f"Session ID is {self.session_id}")
def report(self) -> Reporter:
"""Enables access to the TestProject reporting actions from the driver object"""
return Reporter(self.command_executor)
def addons(self) -> AddonHelper:
"""Enables access to the TestProject addon execution actions from the driver object
Returns:
AddonHelper: object giving access to addon proxy methods
"""
return AddonHelper(self._agent_client, self.command_executor)
def pause(self, milliseconds: int):
self.command_executor.pause(milliseconds)
def update_job_name(self, job_name):
"""Updates the job name of the execution during runtime
Args:
job_name (str): updated job name to set for the execution.
"""
self._agent_client.update_job_name(job_name=job_name)
def quit(self):
"""Quits the driver and stops the session with the Agent, cleaning up after itself."""
# Report any left over driver command reports
self.command_executor.clear_stash()
# Make instance available again
Remote.__instance = None
try:
AppiumWebDriver.quit(self)
except Exception:
pass
# Stop the Agent client
self.command_executor.agent_client.stop()
# Clean up any environment variables set in the decorator
for env_var in [
EnvironmentVariable.TP_TEST_NAME,
EnvironmentVariable.TP_PROJECT_NAME,
EnvironmentVariable.TP_JOB_NAME,
]:
EnvironmentVariable.remove(env_var)
| 38.407216 | 115 | 0.696417 |
f0742d8228aaf19136958a7bebc567fbdbbba56a | 80 | py | Python | deepmath/deephol/jup_predict/default/st_enc.py | aahadley/deepmath | ad4426323be64bacd4a6727f1e48f4e54ee1e259 | [
"Apache-2.0"
] | 8 | 2019-12-12T19:13:28.000Z | 2021-08-08T09:29:35.000Z | deepmath/deephol/jup_predict/default/st_enc.py | aahadley/deepmath | ad4426323be64bacd4a6727f1e48f4e54ee1e259 | [
"Apache-2.0"
] | null | null | null | deepmath/deephol/jup_predict/default/st_enc.py | aahadley/deepmath | ad4426323be64bacd4a6727f1e48f4e54ee1e259 | [
"Apache-2.0"
] | 2 | 2020-10-02T11:55:03.000Z | 2021-09-26T23:53:25.000Z |
def _proof_state_encoding(predictor, state_emb):
return state_emb.goal_emb
| 20 | 48 | 0.8125 |
10f7137253d0a5b5ead6ce3b3e9e32fc53d3d10b | 390 | py | Python | boared/boared/wsgi.py | joeltio/boared-django | 51d5a969af0130753dfe61bdfe2191ee1685f4b5 | [
"MIT"
] | null | null | null | boared/boared/wsgi.py | joeltio/boared-django | 51d5a969af0130753dfe61bdfe2191ee1685f4b5 | [
"MIT"
] | null | null | null | boared/boared/wsgi.py | joeltio/boared-django | 51d5a969af0130753dfe61bdfe2191ee1685f4b5 | [
"MIT"
] | null | null | null | """
WSGI config for boared project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "boared.settings")
application = get_wsgi_application()
| 22.941176 | 78 | 0.784615 |
bf772276760dd81a9673e1c6795e3f467200e219 | 629 | py | Python | user/migrations/0023_auto_20201217_1038.py | thapaliya19/covaplad | f3ff6db582165611d03dab6d227b4e5d81214d43 | [
"MIT"
] | 1 | 2021-02-03T15:09:45.000Z | 2021-02-03T15:09:45.000Z | user/migrations/0023_auto_20201217_1038.py | thapaliya19/covaplad | f3ff6db582165611d03dab6d227b4e5d81214d43 | [
"MIT"
] | 25 | 2021-03-08T07:01:32.000Z | 2022-03-31T09:05:30.000Z | user/migrations/0023_auto_20201217_1038.py | thapaliya19/covaplad | f3ff6db582165611d03dab6d227b4e5d81214d43 | [
"MIT"
] | 3 | 2021-02-22T15:06:56.000Z | 2021-08-03T13:00:00.000Z | # Generated by Django 3.1.4 on 2020-12-17 04:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("user", "0022_auto_20201216_2333"),
]
operations = [
migrations.RemoveConstraint(
model_name="user",
name="user_gender_in_valid_choices",
),
migrations.AddConstraint(
model_name="user",
constraint=models.CheckConstraint(
check=models.Q(gender__in=("M", "F", "L", "G", "B", "T", "N")),
name="user_gender_in_valid_choices",
),
),
]
| 25.16 | 79 | 0.558029 |
7f6230301002c17c7ca5cc4a62a187cd8ee639e0 | 2,418 | py | Python | ssz/sedes/container.py | pipermerriam/py-ssz | 2a4f436778a50ec2c66fb792676d34dae4e6849a | [
"MIT"
] | null | null | null | ssz/sedes/container.py | pipermerriam/py-ssz | 2a4f436778a50ec2c66fb792676d34dae4e6849a | [
"MIT"
] | null | null | null | ssz/sedes/container.py | pipermerriam/py-ssz | 2a4f436778a50ec2c66fb792676d34dae4e6849a | [
"MIT"
] | null | null | null | from typing import (
Any,
Dict,
Generator,
Sequence,
Tuple,
TypeVar,
)
from eth_utils import (
to_dict,
)
from mypy_extensions import (
TypedDict,
)
from ssz.exceptions import (
DeserializationError,
)
from ssz.hash import (
hash_eth2,
)
from ssz.sedes.base import (
BaseSedes,
LengthPrefixedSedes,
)
from ssz.utils import (
get_duplicates,
)
AnyTypedDict = TypedDict("AnyTypedDict", {})
TAnyTypedDict = TypeVar("TAnyTypedDict", bound=AnyTypedDict)
class Container(LengthPrefixedSedes[TAnyTypedDict, Dict[str, Any]]):
length_bytes = 4
def __init__(self, fields: Sequence[Tuple[str, BaseSedes[Any, Any]]]) -> None:
field_names = tuple(field_name for field_name, field_sedes in fields)
duplicate_field_names = get_duplicates(field_names)
if duplicate_field_names:
raise ValueError(
f"The following fields are duplicated {','.join(sorted(duplicate_field_names))}"
)
self.fields = fields
def serialize_content(self, value: TAnyTypedDict) -> bytes:
return b"".join(
field_sedes.serialize(value[field_name])
for field_name, field_sedes in self.fields
)
@to_dict
def deserialize_content(self, content: bytes) -> Generator[Tuple[str, Any], None, None]:
field_start_index = 0
for field_name, field_sedes in self.fields:
field_value, next_field_start_index = field_sedes.deserialize_segment(
content,
field_start_index,
)
yield field_name, field_value
if next_field_start_index <= field_start_index:
raise Exception("Invariant: must always make progress")
field_start_index = next_field_start_index
if field_start_index < len(content):
extra_bytes = len(content) - field_start_index
raise DeserializationError(f"Serialized container ends with {extra_bytes} extra bytes")
if field_start_index > len(content):
raise Exception("Invariant: must not consume more data than available")
def intermediate_tree_hash(self, value: TAnyTypedDict) -> bytes:
field_hashes = [
field_sedes.intermediate_tree_hash(value[field_name])
for field_name, field_sedes in self.fields
]
return hash_eth2(b"".join(field_hashes))
| 29.487805 | 99 | 0.659222 |
83d4cdae201e46f223649e7a5f4ab5860c632d58 | 683 | py | Python | app/core/migrations/0002_tag.py | aldhiramdans/recipe-app-api | 2c9f0902372a5eb23c4a19c06611379e7540ed73 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | aldhiramdans/recipe-app-api | 2c9f0902372a5eb23c4a19c06611379e7540ed73 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | aldhiramdans/recipe-app-api | 2c9f0902372a5eb23c4a19c06611379e7540ed73 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-03-26 09:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333 | 118 | 0.616398 |
299d8940edb9809817034c2c5edadb8cf78a5767 | 5,795 | py | Python | ImageTeam/resources/archiver/archiver_url.py | ralph-gee/training_clone | 76c3a8457dcd01fe78fe467a1f70998723c67400 | [
"Apache-2.0"
] | null | null | null | ImageTeam/resources/archiver/archiver_url.py | ralph-gee/training_clone | 76c3a8457dcd01fe78fe467a1f70998723c67400 | [
"Apache-2.0"
] | null | null | null | ImageTeam/resources/archiver/archiver_url.py | ralph-gee/training_clone | 76c3a8457dcd01fe78fe467a1f70998723c67400 | [
"Apache-2.0"
] | null | null | null | """This program archives images from a set of cameras.
To use this program:
python archiver.py <input_file> <duration> <interval>
where:
<input_file> is the path to the two-column space-separated input file. The
first column is the integer camera ID, and the second column is the camera URL.
<duration> is the archiving duration in seconds.
<interval> is the interval between two frames in seconds (or 0 for the maximum
frame rate possible).
For example, this command downloads a snapshot from every camera every one
second for 60 seconds.
python archiver.py cams.txt 60 1
Sample input file:
31837 http://207.251.86.238/cctv290.jpg
31838 http://207.251.86.238/cctv294.jpg
31839 http://207.251.86.238/cctv296.jpg
31840 http://207.251.86.238/cctv297.jpg
31843 http://207.251.86.238/cctv302.jpg
31844 http://207.251.86.238/cctv303.jpg
31918 http://207.251.86.238/cctv428.jpg
31919 http://207.251.86.238/cctv429.jpg
31921 http://207.251.86.238/cctv431.jpg
31950 http://207.251.86.238/cctv467.jpg
31954 http://207.251.86.238/cctv470.jpg
31963 http://207.251.86.238/cctv482.jpg
Notes
-----
This program has a single third-party dependency: the PIL library. It can be
installed using the following command:
sudo apt-get install python-imaging
"""
import os
import sys
import time
import datetime
import threading
from urllib2 import urlopen
from StringIO import StringIO
from PIL import Image
# The path of the results directory.
RESULTS_PATH = 'results'
def read_file(path):
"""Read the input two-column file into a dictionary.
Parameters
----------
path : str
The path of the input file.
Returns
-------
cams : dict
The dictionary withe camera IDs and URLs as the keys and values.
"""
with open(path) as f:
lines = f.read().strip().splitlines()
cams = {}
for line in lines:
parts = line.split()
cams[parts[0]] = parts[1]
return cams
def download_image(image_url):
"""Download an online image given its URL.
Parameters
----------
image_url : str
The full URL of the image to be downloaded.
Returns
-------
image : PIL.Image.Image
The downloaded image in RGB format.
Raises
------
Exception
If there is any error downloading the image.
"""
try:
# Download the image.
image = Image.open(StringIO(urlopen(image_url, timeout=5).read()))
# Convert the image format to RGB if it is not.
if image.mode != "RGB":
image = image.convert("RGB")
except Exception, e:
raise Exception('Error downloading the image.')
else:
return image
class CameraHandler(threading.Thread):
"""The thread to download snapshots from a single camera.
Parameters
----------
id : int
The ID of the the camera.
url : str
The URL of the camera image stream.
duration : int
The duration of downloading the images in seconds.
interval : int
The interval between each two successive snapshots.
Attributes
----------
id : int
The ID of the the camera.
url : str
The URL of the camera image stream.
duration : int
The duration of downloading the images in seconds.
interval : int
The interval between each two successive snapshots.
"""
def __init__(self, id, url, duration, interval):
threading.Thread.__init__(self)
self.id = id
self.url = url
self.duration = duration
self.interval = interval
def run(self):
"""Download snapshots from the camera, and save locally."""
# Create the camera results directory.
cam_directory = os.path.join(RESULTS_PATH, str(self.id))
try:
os.makedirs(cam_directory)
except OSError:
pass
# Set the starting timestamp, and process until the end of the duration.
start_timestamp = time.time()
while (time.time() - start_timestamp) < self.duration:
# Set the timestamp of the snapshot that will be downloaded.
frame_timestamp = time.time()
try:
# Download the image.
image = download_image(self.url)
except Exception:
pass
else:
# Save the image.
file_name = '{}/{}_{}.png'.format(
cam_directory, self.id,
datetime.datetime.fromtimestamp(
frame_timestamp).strftime('%Y-%m-%d_%H-%M-%S-%f'))
image.save(file_name)
# Sleep until the interval between frames ends.
time_to_sleep = self.interval - (time.time() - frame_timestamp)
if time_to_sleep > 0:
time.sleep(time_to_sleep)
def main(args):
# Read the input arguments.
try:
assert len(args) == 4
input_file_path = args[1]
duration = int(args[2])
interval = int(args[3])
except:
import archiver
print archiver.__doc__
return
# Read the input file.
cams = read_file(input_file_path)
camera_handlers = []
for id, url in cams.iteritems():
# Create a new thread to handle the camera.
camera_handler = CameraHandler(id, url, duration, interval)
# Run the thread.
camera_handler.start()
# Add the thread to the array of threads.
camera_handlers.append(camera_handler)
# Sleep to shift the starting time of all the threads.
time.sleep(interval / len(cams))
# Wait for all the threads to finish execution.
for camera_handler in camera_handlers:
camera_handler.join()
if __name__ == '__main__':
main(sys.argv)
| 27.079439 | 80 | 0.627437 |
34b6dd79db32047201f4ce6e344f48cc547abf6b | 1,857 | py | Python | src/parse_MEDIC_dictionary.py | fshdnc/disease_normalization | 68b8fc118fe0f971fbd056ad2bffb44caa0e7abf | [
"Apache-2.0"
] | 1 | 2021-01-28T09:24:27.000Z | 2021-01-28T09:24:27.000Z | src/parse_MEDIC_dictionary.py | fshdnc/disease_normalization | 68b8fc118fe0f971fbd056ad2bffb44caa0e7abf | [
"Apache-2.0"
] | 1 | 2019-07-08T03:25:30.000Z | 2019-12-13T08:33:55.000Z | src/parse_MEDIC_dictionary.py | fshdnc/disease_normalization | 68b8fc118fe0f971fbd056ad2bffb44caa0e7abf | [
"Apache-2.0"
] | null | null | null | #!/usr/bin7env python3
# coding:utf8
from collections import namedtuple
'''read in MEDIC terminology, terminology file from DNorm system
a lot of files in DNorm-0.0.7/data not looked into
read in MEDIC terminology into namedtuples
read-in format:
DiseaseName
DiseaseID
AltDiseaseIDs
Definition (usually none)
ParentIDs
TreeNumbers
ParentTreeNumbers
Synonyms
returned format:
namedtuple(
DiseaseID
DiseaseName
AllDiseaseIDs: DiseaseID + AltDiseaseIDs
AllNames: DiseaseName + Synonyms
'''
MEDIC_ENTRY = namedtuple('MEDIC_ENTRY','DiseaseID DiseaseName AllDiseaseIDs AllNames Def')
#namedtuple: https://stackoverflow.com/questions/2970608/what-are-named-tuples-in-pythons
def parse_MEDIC_dictionary(filename):
with open(filename,'r') as f:
for line in f:
if not line.startswith("#"):
DiseaseName, DiseaseID, AltDiseaseIDs, Def, _, _, _, Synonyms = line.strip('\n').split('\t')
AllDiseaseIDs = tuple([DiseaseID]+AltDiseaseIDs.split('|')) if AltDiseaseIDs else tuple([DiseaseID])
AllNames = tuple([DiseaseName]+Synonyms.split('|')) if Synonyms else tuple([DiseaseName])
entry = MEDIC_ENTRY(DiseaseID,DiseaseName,AllDiseaseIDs,AllNames,Def)
yield DiseaseID, entry
#dunno what will happend if no altID or syn
#some AllNames tuples have comma at the end but does not seem to affect the functionality
def parse_MEDIC_dictionary_newer(filename):
with open(filename,'r') as f:
for line in f:
if not line.startswith("#"):
DiseaseName, DiseaseID, AltDiseaseIDs, Def, _, _, _, Synonyms, _ = line.strip('\n').split('\t')
AllDiseaseIDs = tuple([DiseaseID]+AltDiseaseIDs.split('|')) if AltDiseaseIDs else tuple([DiseaseID])
AllNames = tuple([DiseaseName]+Synonyms.split('|')) if Synonyms else tuple([DiseaseName])
entry = MEDIC_ENTRY(DiseaseID,DiseaseName,AllDiseaseIDs,AllNames)
yield DiseaseID, entry | 35.037736 | 104 | 0.754443 |
9911e19980b44372dfffadd53967ba04092b9d98 | 3,688 | py | Python | main.py | Dranixia/dranixia | 250354a47db107f5e55e9f873b227976b33041ee | [
"MIT"
] | 1 | 2020-05-21T14:35:02.000Z | 2020-05-21T14:35:02.000Z | main.py | Dranixia/dranixia | 250354a47db107f5e55e9f873b227976b33041ee | [
"MIT"
] | 3 | 2020-04-02T21:07:22.000Z | 2021-02-10T18:26:06.000Z | main.py | Dranixia/UCU-Semester-Work | 250354a47db107f5e55e9f873b227976b33041ee | [
"MIT"
] | null | null | null | """
Butynets' Danylo
Python 3.8
"""
import sys
from time import sleep
from modules.music_adt import MusicADT
class Menu:
"""
Class for working with Research Program.
"""
def __init__(self):
"""
Initialize menu and program.
"""
print("Welcome to the Genre Research (Spotify)!")
answer = ["Y", "N"]
self.fullmode = input("Do you want to have track and genre "
"search on? It will set year "
"range to maximum. [Y] or [N]: ")
while self.fullmode.upper() not in answer:
self.fullmode = input("Do you want to have track and genre "
"search on? It will set year "
"range to maximum. [Y] or [N]: ")
start = int(input("Enter the first year of the period you want to research: "))
last = int(input("Enter the last year of the period you want to research: "))
self.adt = MusicADT(start, last, fullmode=self.fullmode)
self.adt.fill_adt("docs/results.json")
self.choices = {
"1": self.year_top_func,
"2": self.mult_year_top_func,
"3": self.whole_year_func,
"4": self.track_search_func,
"5": self.genre_search_func,
"6": self.adt.graph,
"7": self.quit
}
@staticmethod
def display_menu():
"""
Print menu options.
"""
print("""
Genre Research Menu
1. Receive Top genre of the year
2. Receive Top genre of the Time Period
3. Receive Full info about the year
4. Search Track (fullmode only)
5. Search Genre (fullmode only)
6. Open adt as the graph
7. Quit """)
def year_top_func(self):
"""
Launch ADT method.
:return: None
"""
y = int(input("Enter the year from the period you are discovering: "))
self.adt.years_top_genre(y)
def mult_year_top_func(self):
"""
Launch ADT method.
:return: None
"""
first_y = int(input("Enter the year from the period you are discovering: "))
last_y = int(input("Enter the year from the period you "
"are discovering.(larger than previous): "))
self.adt.multiple_year_top(first_y, last_y)
def whole_year_func(self):
"""
Launch ADT method.
:return: None
"""
y = int(input("Enter the year from the period you are discovering."))
self.adt.get_whole_year(y)
def track_search_func(self):
"""
Launch ADT method.
:return: None
"""
if self.fullmode:
track = input("Enter track name.")
self.adt.track_search(track)
else:
print("Sorry, no full mode on.")
def genre_search_func(self):
"""
Launch ADT method.
:return: None
"""
if self.fullmode:
genre = input("Enter genre you want.")
self.adt.genre_search(genre)
else:
print("Sorry, no full mode on.")
def run(self):
"""
Run the program.
"""
while True:
self.display_menu()
choice = input("Enter an option: ")
action = self.choices.get(choice)
if action:
action()
else:
print("{0} is not a valid choice.".format(choice))
@staticmethod
def quit():
"""
Terminate the program.
"""
print("Thank you for using this program.")
sleep(3)
sys.exit(0)
if __name__ == "__main__":
Menu().run()
| 27.939394 | 87 | 0.529284 |
94c20d988be3eb4c909eb2237f05229d34c1d8fe | 2,092 | py | Python | enaml/validation/regex_validator.py | mmckerns/enaml | ebf417b4dce9132bffa038a588ad90436a59d37e | [
"BSD-3-Clause"
] | 11 | 2015-01-04T14:29:23.000Z | 2019-12-25T05:38:37.000Z | enaml/validation/regex_validator.py | mmckerns/enaml | ebf417b4dce9132bffa038a588ad90436a59d37e | [
"BSD-3-Clause"
] | 36 | 2015-02-20T00:56:53.000Z | 2020-12-04T10:02:14.000Z | enaml/validation/regex_validator.py | mmckerns/enaml | ebf417b4dce9132bffa038a588ad90436a59d37e | [
"BSD-3-Clause"
] | 3 | 2015-11-19T15:11:37.000Z | 2019-03-11T23:45:02.000Z | #------------------------------------------------------------------------------
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
import re
from traits.api import Str, Property, cached_property
from .validator import Validator
class RegexValidator(Validator):
""" A concrete Validator which handles text input.
This validator ensures that the text matches a provided regular
expression string.
"""
#: The regular expression string to use for validation. The default
#: regex matches everything.
regex = Str(r'.*')
#: A read only cached property which holds the compiled regular
#: expression object.
_regex = Property(depends_on='regex')
@cached_property
def _get__regex(self):
""" The getter for the '_regex' property.
Returns
-------
result : sre object
A compiled regular expression object for the current regex
string.
"""
return re.compile(self.regex, re.UNICODE)
def validate(self, text, component):
""" Validates the given text matches the regular expression.
Parameters
----------
text : unicode
The unicode text edited by the client widget.
component : Declarative
The declarative component currently making use of the
validator.
Returns
-------
result : (unicode, bool)
A 2-tuple of (optionally modified) unicode text, and whether
or not that text should be considered valid.
"""
return (text, bool(self._regex.match(text)))
def client_validator(self):
""" The client side regex validator.
Returns
-------
result : dict
The dict representation of the client side regex validator.
"""
res = {}
res['type'] = 'regex'
res['message'] = self.message
res['arguments'] = {'regex': self.regex}
return res
| 27.526316 | 79 | 0.551147 |
2090a216c1dcb25099f7de4c9bd5fbf9d8451f92 | 234 | py | Python | stobu/elms/states.py | NovelLab/storybuilder | bc52fc999718db29fb9aa83c787950a842e15f3f | [
"MIT"
] | null | null | null | stobu/elms/states.py | NovelLab/storybuilder | bc52fc999718db29fb9aa83c787950a842e15f3f | [
"MIT"
] | 1 | 2021-03-11T02:28:02.000Z | 2021-03-11T02:28:02.000Z | stobu/elms/states.py | NovelLab/storybuilder | bc52fc999718db29fb9aa83c787950a842e15f3f | [
"MIT"
] | null | null | null | """Define data name of state file."""
# Official Libraries
from enum import Enum
__all__ = (
'StateItem',
)
# Main
class StateItem(Enum):
NAME = 'name'
def __str__(self) -> str:
return self.value
| 13 | 37 | 0.589744 |
407af9da49c00d5c0f3f3f8373418852a8ca3392 | 1,137 | py | Python | setup.py | chpmrc/django-easygeoip | ce16bb52e935fa8c7c7468ff0187c0acefdc1524 | [
"MIT"
] | 2 | 2019-03-26T03:52:15.000Z | 2020-02-09T13:20:39.000Z | setup.py | chpmrc/django-easygeoip | ce16bb52e935fa8c7c7468ff0187c0acefdc1524 | [
"MIT"
] | 1 | 2016-08-25T13:47:17.000Z | 2016-08-25T14:28:43.000Z | setup.py | chpmrc/django-easygeoip | ce16bb52e935fa8c7c7468ff0187c0acefdc1524 | [
"MIT"
] | 2 | 2015-12-18T10:02:48.000Z | 2016-06-28T01:13:27.000Z | import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-easygeoip',
version='0.0.3',
packages=['easygeoip'],
include_package_data=True,
license='MIT License',
description='A simple Django app to include IP address-based geo-location capabilities to your project.',
long_description=README,
url='http://lambdacomplete.me',
author='Marco Chiappetta',
author_email='lambdacomplete@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
)
| 33.441176 | 109 | 0.649077 |
d171cdb625a594cf26c22c6b61b42746f4d4cdba | 3,557 | py | Python | core/broker.py | xlcbingo1999/CloudSimPy | de515eceb5d177142bb154deff0d44c58f6aca88 | [
"MIT"
] | null | null | null | core/broker.py | xlcbingo1999/CloudSimPy | de515eceb5d177142bb154deff0d44c58f6aca88 | [
"MIT"
] | null | null | null | core/broker.py | xlcbingo1999/CloudSimPy | de515eceb5d177142bb154deff0d44c58f6aca88 | [
"MIT"
] | null | null | null | from core.job import Job
from core.machine import MachineConfig
import sys
from playground.Non_DAG.utils.tools import debugPrinter, infoPrinter
import time
from tqdm import tqdm, trange
class JobBroker(object):
job_cls = Job
def __init__(self, env, job_configs):
self.env = env
self.simulation = None
self.cluster = None
self.destroyed = False
self.job_configs = job_configs
def attach(self, simulation):
self.simulation = simulation
self.cluster = simulation.cluster
def run(self):
for i in tqdm(range(len(self.job_configs))):
job_config = self.job_configs[i]
assert job_config.submit_time >= self.env.now
yield self.env.timeout(job_config.submit_time - self.env.now)
infoPrinter(__file__, sys._getframe(),"xiaolinchang用户提交任务: 当前时间: " + str(self.env.now) + str(job_config.printState()) + "\n\n")
job = JobBroker.job_cls(self.env, job_config)
# print('a task arrived at time %f' % self.env.now)
self.cluster.add_job(job)
self.destroyed = True
class MachineBroker(object):
def __init__(self, env, machine_action_configs):
self.env = env
self.destroyed = False
self.simulation = None
self.cluster = None
self.machine_action_configs = machine_action_configs
def attach(self, simulation):
self.simulation = simulation
self.cluster = simulation.cluster
def initMachineActionRun(self):
assert self.cluster is not None
for machine_action_config in self.machine_action_configs:
if machine_action_config.submite_time < 0 and machine_action_config.operation == 'init_m':
infoPrinter(__file__, sys._getframe(), "当前时间: {0} 初始化集群 action: {1}".format(self.env.now, machine_action_config.state))
machine_config = MachineConfig(machine_action_config)
self.cluster.add_machines([machine_config])
def run(self):
for i in tqdm(range(len(self.machine_action_configs))):
machine_action_config = self.machine_action_configs[i]
if machine_action_config.operation == 'init_m':
continue
assert machine_action_config.submite_time >= self.env.now
yield self.env.timeout(machine_action_config.submite_time - self.env.now)
assert self.cluster is not None
infoPrinter(__file__, sys._getframe(), "当前时间: {0} 检查修改machine状态 action: {1}".format(self.env.now, machine_action_config.state))
if machine_action_config.operation == 'add_m':
machine_config = MachineConfig(machine_action_config)
self.cluster.add_machines([machine_config])
elif machine_action_config.operation == 'remove_m':
self.cluster.remove_machine(machine_action_config.machine_id)
elif machine_action_config.operation == 'add_resource_m':
self.cluster.add_machine_resource(machine_action_config)
elif machine_action_config.operation == 'remove_resource_m':
self.cluster.remove_machine_resource(machine_action_config)
elif machine_action_config.operation == 'init_m':
continue
else:
raise RuntimeError("当前动作错误: {0}".format(machine_action_config.operation))
debugPrinter(__file__, sys._getframe(), "当前时间: {0} 检查集群状态: {1}".format(self.env.now, self.cluster.state))
self.destroyed = True | 46.194805 | 139 | 0.660669 |
1b95aa947d8e6f4e85ec1dcab8b39f3ace48f148 | 2,260 | py | Python | tests/operators/test_gcs_to_sftp_system.py | suensummit/airflow | 37a342d0e96a91ce2d34085e225a4e86f54c4e21 | [
"Apache-2.0"
] | 1 | 2017-06-25T14:18:15.000Z | 2017-06-25T14:18:15.000Z | tests/operators/test_gcs_to_sftp_system.py | suensummit/airflow | 37a342d0e96a91ce2d34085e225a4e86f54c4e21 | [
"Apache-2.0"
] | 3 | 2020-07-07T20:39:24.000Z | 2021-09-29T17:34:46.000Z | tests/operators/test_gcs_to_sftp_system.py | suensummit/airflow | 37a342d0e96a91ce2d34085e225a4e86f54c4e21 | [
"Apache-2.0"
] | 1 | 2020-11-04T03:17:51.000Z | 2020-11-04T03:17:51.000Z | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""System tests for Google Cloud Build operators"""
import unittest
from tests.gcp.utils.base_gcp_system_test_case import (
OPERATORS_EXAMPLES_DAG_FOLDER, SKIP_TEST_WARNING, TestDagGcpSystem,
)
from tests.gcp.utils.gcp_authenticator import GCP_GCS_KEY
from tests.operators.test_gcs_to_sftp_system_helper import GcsToSFTPTestHelper
@unittest.skipIf(TestDagGcpSystem.skip_check(GCP_GCS_KEY), SKIP_TEST_WARNING)
class GcsToSftpExampleDagsSystemTest(TestDagGcpSystem):
"""
System tests for Google Cloud Storage to SFTP transfer operator
It use a real service.
"""
def __init__(self, method_name="runTest"):
super().__init__(
method_name,
dag_id="example_gcs_to_sftp",
dag_name="example_gcs_to_sftp.py",
example_dags_folder=OPERATORS_EXAMPLES_DAG_FOLDER,
gcp_key=GCP_GCS_KEY,
)
self.helper = GcsToSFTPTestHelper()
def setUp(self):
super().setUp()
self.gcp_authenticator.gcp_authenticate()
try:
self.helper.create_buckets()
finally:
self.gcp_authenticator.gcp_revoke_authentication()
def test_run_example_dag(self):
self._run_dag()
def tearDown(self):
self.gcp_authenticator.gcp_authenticate()
try:
self.helper.delete_buckets()
finally:
self.gcp_authenticator.gcp_revoke_authentication()
super().tearDown()
| 34.769231 | 78 | 0.718142 |
b7e98ab09bd9f9693a632d708e605bf216543ad3 | 5,506 | py | Python | assignment1/cs231n/classifiers/linear_classifier.py | Lion-HuangGz/CS231nAssignment | 9adfdeda1c892c468c7bab9661fe8eb4d1063510 | [
"MIT"
] | 419 | 2019-09-02T08:25:31.000Z | 2022-03-28T06:09:47.000Z | assignment1/cs231n/classifiers/linear_classifier.py | Pichenze/CS231nAssignment | 9adfdeda1c892c468c7bab9661fe8eb4d1063510 | [
"MIT"
] | 9 | 2019-09-28T09:03:40.000Z | 2020-11-11T08:21:13.000Z | assignment1/cs231n/classifiers/linear_classifier.py | Pichenze/CS231nAssignment | 9adfdeda1c892c468c7bab9661fe8eb4d1063510 | [
"MIT"
] | 164 | 2019-09-20T07:58:04.000Z | 2022-03-15T11:31:25.000Z | from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
if self.W is None:
# lazily initialize W
self.W = 0.001 * np.random.randn(dim, num_classes)
# Run stochastic gradient descent to optimize W
loss_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (batch_size, dim) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
index = np.random.choice(range(num_train), batch_size, replace=False)
X_batch = X[index]
y_batch = y[index]
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
# evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
# perform parameter update
#########################################################################
# Update the weights using the gradient and the learning rate. #
#########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
self.W = self.W - learning_rate * grad
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[0])
###########################################################################
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
y_pred = np.argmax(X @ self.W, 1)
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
| 41.398496 | 97 | 0.533418 |
b729c1567e0282b65348bf31e419dbdfc9dbc2be | 1,800 | py | Python | questions/53032423/main.py | sesu089/stackoverflow | 6fae69be6fa74fba9d554e6b5f387e5d3c1aad73 | [
"MIT"
] | 302 | 2017-03-04T00:05:23.000Z | 2022-03-28T22:51:29.000Z | questions/53032423/main.py | sesu089/stackoverflow | 6fae69be6fa74fba9d554e6b5f387e5d3c1aad73 | [
"MIT"
] | 30 | 2017-12-02T19:26:43.000Z | 2022-03-28T07:40:36.000Z | questions/53032423/main.py | sesu089/stackoverflow | 6fae69be6fa74fba9d554e6b5f387e5d3c1aad73 | [
"MIT"
] | 388 | 2017-07-04T16:53:12.000Z | 2022-03-18T22:20:19.000Z | import os
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
dir_path = os.path.dirname(os.path.realpath(__file__))
positions_button_KA = [(330, 70, 10, 20),(270, 110, 10, 20),(300, 110, 10, 20),(360, 110, 10, 20),(330, 150, 10, 20),
(180, 190, 10, 20),(240, 190, 10, 20),(300, 190, 10, 20),(360, 190, 10, 20),(210, 230, 10, 20),
(270, 230, 10, 20),(330, 230, 10, 20),(180, 270, 10, 20),(240, 270, 10, 20),(270, 270, 10, 20),
(300, 270, 10, 20),(360, 270, 10, 20),(210, 310, 10, 20),(330, 310, 10, 20),(180, 350, 10, 20),
(240, 350, 10, 20),(300, 350, 10, 20),(360, 350, 10, 20),(210, 390, 10, 20),(270, 390, 10, 20),
(330, 390, 10, 20),(180, 430, 10, 20),(240, 430, 10, 20),(300, 430, 10, 20),(360, 430, 10, 20)]
class Ui_MainWindows(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_MainWindows,self).__init__()
self.central_widget = QtWidgets.QWidget()
self.setCentralWidget(self.central_widget)
self.setWindowTitle("Vision Room")
self.buttons_KA = {}
self.add_buttons_KA_IU()
self.resize(1280, 960)
def add_buttons_KA_IU(self):
name_group = "button_KA"
for i, geom in enumerate(positions_button_KA):
b = QtWidgets.QPushButton(self.central_widget)
b.setGeometry(*geom)
path_image = os.path.join(dir_path, "Ressource/LightOff.png")
qss = 'border-image: url({})'.format(path_image)
b.setStyleSheet(qss)
self.buttons_KA[i] = b
def main():
app = QtWidgets.QApplication(sys.argv)
MainWindow = Ui_MainWindows()
MainWindow.show()
rc = app.exec_()
sys.exit(rc)
if __name__ == "__main__":
main() | 36.734694 | 118 | 0.565 |
a8761efbcbee9496abae4ea6c61f55a6696260e4 | 27,361 | py | Python | test/qa/rpc-tests/bip68-112-113-p2p.py | MagnaChain/MagnaChain-dev-master | c83e7a8b9e8f9ae8684a0e3a11f1eeb42dfa1272 | [
"MIT"
] | 26 | 2018-11-30T09:01:34.000Z | 2020-03-11T00:41:52.000Z | test/qa/rpc-tests/bip68-112-113-p2p.py | MagnaChain/MagnaChain-dev-master | c83e7a8b9e8f9ae8684a0e3a11f1eeb42dfa1272 | [
"MIT"
] | 3 | 2019-01-01T09:06:22.000Z | 2019-04-01T10:06:01.000Z | test/qa/rpc-tests/bip68-112-113-p2p.py | MagnaChain/MagnaChain-dev-master | c83e7a8b9e8f9ae8684a0e3a11f1eeb42dfa1272 | [
"MIT"
] | 24 | 2018-11-30T03:32:53.000Z | 2020-03-20T04:30:34.000Z | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The MagnaChain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import ComparisonTestFramework
from test_framework.util import *
from test_framework.mininode import ToHex, CTransaction, NetworkThread
from test_framework.blocktools import create_coinbase, create_block
from test_framework.comptool import TestInstance, TestManager
from test_framework.script import *
from io import BytesIO
import time
'''
This test is meant to exercise activation of the first version bits soft fork
This soft fork will activate the following BIPS:
BIP 68 - nSequence relative lock times
BIP 112 - CHECKSEQUENCEVERIFY
BIP 113 - MedianTimePast semantics for nLockTime
regtest lock-in with 108/144 block signalling
activation after a further 144 blocks
mine 82 blocks whose coinbases will be used to generate inputs for our tests
mine 61 blocks to transition from DEFINED to STARTED
mine 144 blocks only 100 of which are signaling readiness in order to fail to change state this period
mine 144 blocks with 108 signaling and verify STARTED->LOCKED_IN
mine 140 blocks and seed block chain with the 82 inputs will use for our tests at height 572
mine 3 blocks and verify still at LOCKED_IN and test that enforcement has not triggered
mine 1 block and test that enforcement has triggered (which triggers ACTIVE)
Test BIP 113 is enforced
Mine 4 blocks so next height is 580 and test BIP 68 is enforced for time and height
Mine 1 block so next height is 581 and test BIP 68 now passes time but not height
Mine 1 block so next height is 582 and test BIP 68 now passes time and height
Test that BIP 112 is enforced
Various transactions will be used to test that the BIPs rules are not enforced before the soft fork activates
And that after the soft fork activates transactions pass and fail as they should according to the rules.
For each BIP, transactions of versions 1 and 2 will be tested.
----------------
BIP 113:
bip113tx - modify the nLocktime variable
BIP 68:
bip68txs - 16 txs with nSequence relative locktime of 10 with various bits set as per the relative_locktimes below
BIP 112:
bip112txs_vary_nSequence - 16 txs with nSequence relative_locktimes of 10 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_nSequence_9 - 16 txs with nSequence relative_locktimes of 9 evaluated against 10 OP_CSV OP_DROP
bip112txs_vary_OP_CSV - 16 txs with nSequence = 10 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112txs_vary_OP_CSV_9 - 16 txs with nSequence = 9 evaluated against varying {relative_locktimes of 10} OP_CSV OP_DROP
bip112tx_special - test negative argument to OP_CSV
'''
base_relative_locktime = 10
seq_disable_flag = 1<<31
seq_random_high_bit = 1<<25
seq_type_flag = 1<<22
seq_random_low_bit = 1<<18
# b31,b25,b22,b18 represent the 31st, 25th, 22nd and 18th bits respectively in the nSequence field
# relative_locktimes[b31][b25][b22][b18] is a base_relative_locktime with the indicated bits set if their indices are 1
relative_locktimes = []
for b31 in range(2):
b25times = []
for b25 in range(2):
b22times = []
for b22 in range(2):
b18times = []
for b18 in range(2):
rlt = base_relative_locktime
if (b31):
rlt = rlt | seq_disable_flag
if (b25):
rlt = rlt | seq_random_high_bit
if (b22):
rlt = rlt | seq_type_flag
if (b18):
rlt = rlt | seq_random_low_bit
b18times.append(rlt)
b22times.append(b18times)
b25times.append(b22times)
relative_locktimes.append(b25times)
def all_rlt_txs(txarray):
txs = []
for b31 in range(2):
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
txs.append(txarray[b31][b25][b22][b18])
return txs
class BIP68_112_113Test(ComparisonTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
def setup_network(self):
# Must set the blockversion for this test
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
extra_args=[['-debug', '-whitelist=127.0.0.1', '-blockversion=4']],
binary=[self.options.testbinary])
def run_test(self):
test = TestManager(self, self.options.tmpdir)
test.add_all_connections(self.nodes)
NetworkThread().start() # Start up network handling in another thread
test.run()
def send_generic_input_tx(self, node, coinbases):
amount = Decimal("49.99")
return node.sendrawtransaction(ToHex(self.sign_transaction(node, self.create_transaction(node, node.getblock(coinbases.pop())['tx'][0], self.nodeaddress, amount))))
def create_transaction(self, node, txid, to_address, amount):
inputs = [{ "txid" : txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = node.createrawtransaction(inputs, outputs)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(rawtx))
tx.deserialize(f)
return tx
def sign_transaction(self, node, unsignedtx):
rawtx = ToHex(unsignedtx)
signresult = node.signrawtransaction(rawtx)
tx = CTransaction()
f = BytesIO(hex_str_to_bytes(signresult['hex']))
tx.deserialize(f)
return tx
def generate_blocks(self, number, version, test_blocks = []):
for i in range(number):
block = self.create_test_block([], version)
test_blocks.append([block, True])
self.last_block_time += 600
self.tip = block.sha256
self.tipheight += 1
return test_blocks
def create_test_block(self, txs, version = 536870912):
block = create_block(self.tip, create_coinbase(self.tipheight + 1), self.last_block_time + 600)
block.nVersion = version
block.vtx.extend(txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
block.solve()
return block
def create_bip68txs(self, bip68inputs, txversion, locktime_delta = 0):
txs = []
assert(len(bip68inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip68inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
tx.nVersion = txversion
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
b18txs.append(self.sign_transaction(self.nodes[0], tx))
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def create_bip112special(self, input, txversion):
tx = self.create_transaction(self.nodes[0], input, self.nodeaddress, Decimal("49.98"))
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
signtx.vin[0].scriptSig = CScript([-1, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
return signtx
def create_bip112txs(self, bip112inputs, varyOP_CSV, txversion, locktime_delta = 0):
txs = []
assert(len(bip112inputs) >= 16)
i = 0
for b31 in range(2):
b25txs = []
for b25 in range(2):
b22txs = []
for b22 in range(2):
b18txs = []
for b18 in range(2):
tx = self.create_transaction(self.nodes[0], bip112inputs[i], self.nodeaddress, Decimal("49.98"))
i += 1
if (varyOP_CSV): # if varying OP_CSV, nSequence is fixed
tx.vin[0].nSequence = base_relative_locktime + locktime_delta
else: # vary nSequence instead, OP_CSV is fixed
tx.vin[0].nSequence = relative_locktimes[b31][b25][b22][b18] + locktime_delta
tx.nVersion = txversion
signtx = self.sign_transaction(self.nodes[0], tx)
if (varyOP_CSV):
signtx.vin[0].scriptSig = CScript([relative_locktimes[b31][b25][b22][b18], OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
else:
signtx.vin[0].scriptSig = CScript([base_relative_locktime, OP_CHECKSEQUENCEVERIFY, OP_DROP] + list(CScript(signtx.vin[0].scriptSig)))
b18txs.append(signtx)
b22txs.append(b18txs)
b25txs.append(b22txs)
txs.append(b25txs)
return txs
def get_tests(self):
long_past_time = int(time.time()) - 600 * 1000 # enough to build up to 1000 blocks 10 minutes apart without worrying about getting into the future
self.nodes[0].setmocktime(long_past_time - 100) # enough so that the generated blocks will still all be before long_past_time
self.coinbase_blocks = self.nodes[0].generate(1 + 16 + 2*32 + 1) # 82 blocks generated for inputs
self.nodes[0].setmocktime(0) # set time back to present so yielded blocks aren't in the future as we advance last_block_time
self.tipheight = 82 # height of the next block to build
self.last_block_time = long_past_time
self.tip = int("0x" + self.nodes[0].getbestblockhash(), 0)
self.nodeaddress = self.nodes[0].getnewaddress()
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'defined')
test_blocks = self.generate_blocks(61, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 1
# Advanced from DEFINED to STARTED, height = 143
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# Fail to achieve LOCKED_IN 100 out of 144 signal bit 0
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(50, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(20, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(24, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 2
# Failed to advance past STARTED, height = 287
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'started')
# 108 out of 144 signal bit 0 to achieve lock-in
# using a variety of bits to simulate multiple parallel softforks
test_blocks = self.generate_blocks(58, 536870913) # 0x20000001 (signalling ready)
test_blocks = self.generate_blocks(26, 4, test_blocks) # 0x00000004 (signalling not)
test_blocks = self.generate_blocks(50, 536871169, test_blocks) # 0x20000101 (signalling ready)
test_blocks = self.generate_blocks(10, 536936448, test_blocks) # 0x20010000 (signalling not)
yield TestInstance(test_blocks, sync_every_block=False) # 3
# Advanced from STARTED to LOCKED_IN, height = 431
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# 140 more version 4 blocks
test_blocks = self.generate_blocks(140, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 4
### Inputs at height = 572
# Put inputs for all tests in the chain at height 572 (tip now = 571) (time increases by 600s per block)
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
for i in range(16):
bip68inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
for j in range(2):
inputs = []
for i in range(16):
inputs.append(self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks))
bip112diverseinputs.append(inputs)
# 1 special input with -1 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112specialinput = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
# 1 normal input
bip113input = self.send_generic_input_tx(self.nodes[0], self.coinbase_blocks)
self.nodes[0].setmocktime(self.last_block_time + 600)
inputblockhash = self.nodes[0].generate(1)[0] # 1 block generated for inputs to be in chain at height 572
self.nodes[0].setmocktime(0)
self.tip = int("0x" + inputblockhash, 0)
self.tipheight += 1
self.last_block_time += 600
assert_equal(len(self.nodes[0].getblock(inputblockhash,True)["tx"]), 82+1)
# 2 more version 4 blocks
test_blocks = self.generate_blocks(2, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 5
# Not yet advanced to ACTIVE, height = 574 (will activate for block 576, not 575)
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'locked_in')
# Test both version 1 and version 2 transactions for all tests
# BIP113 test transaction will be modified before each use to put in appropriate block time
bip113tx_v1 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v1.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v1.nVersion = 1
bip113tx_v2 = self.create_transaction(self.nodes[0], bip113input, self.nodeaddress, Decimal("49.98"))
bip113tx_v2.vin[0].nSequence = 0xFFFFFFFE
bip113tx_v2.nVersion = 2
# For BIP68 test all 16 relative sequence locktimes
bip68txs_v1 = self.create_bip68txs(bip68inputs, 1)
bip68txs_v2 = self.create_bip68txs(bip68inputs, 2)
# For BIP112 test:
# 16 relative sequence locktimes of 10 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_v1 = self.create_bip112txs(bip112basicinputs[0], False, 1)
bip112txs_vary_nSequence_v2 = self.create_bip112txs(bip112basicinputs[0], False, 2)
# 16 relative sequence locktimes of 9 against 10 OP_CSV OP_DROP inputs
bip112txs_vary_nSequence_9_v1 = self.create_bip112txs(bip112basicinputs[1], False, 1, -1)
bip112txs_vary_nSequence_9_v2 = self.create_bip112txs(bip112basicinputs[1], False, 2, -1)
# sequence lock time of 10 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_v1 = self.create_bip112txs(bip112diverseinputs[0], True, 1)
bip112txs_vary_OP_CSV_v2 = self.create_bip112txs(bip112diverseinputs[0], True, 2)
# sequence lock time of 9 against 16 (relative_lock_time) OP_CSV OP_DROP inputs
bip112txs_vary_OP_CSV_9_v1 = self.create_bip112txs(bip112diverseinputs[1], True, 1, -1)
bip112txs_vary_OP_CSV_9_v2 = self.create_bip112txs(bip112diverseinputs[1], True, 2, -1)
# -1 OP_CSV OP_DROP input
bip112tx_special_v1 = self.create_bip112special(bip112specialinput, 1)
bip112tx_special_v2 = self.create_bip112special(bip112specialinput, 2)
### TESTING ###
##################################
### Before Soft Forks Activate ###
##################################
# All txs should pass
### Version 1 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
success_txs.append(bip113signed1)
success_txs.append(bip112tx_special_v1)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v1))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v1))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 6
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
success_txs = []
# add BIP113 tx and -1 CSV tx
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
success_txs.append(bip113signed2)
success_txs.append(bip112tx_special_v2)
# add BIP 68 txs
success_txs.extend(all_rlt_txs(bip68txs_v2))
# add BIP 112 with seq=10 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_v2))
# try BIP 112 with seq=9 txs
success_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2))
success_txs.extend(all_rlt_txs(bip112txs_vary_OP_CSV_9_v2))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 7
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# 1 more version 4 block to get us to height 575 so the fork should now be active for the next block
test_blocks = self.generate_blocks(1, 4)
yield TestInstance(test_blocks, sync_every_block=False) # 8
assert_equal(get_bip9_status(self.nodes[0], 'csv')['status'], 'active')
#################################
### After Soft Forks Activate ###
#################################
### BIP 113 ###
# BIP 113 tests should now fail regardless of version number if nLockTime isn't satisfied by new rules
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 # = MTP of prior block (not <) but < time put on current block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), False]]) # 9,10
# BIP 113 tests should now pass if the locktime is < MTP
bip113tx_v1.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed1 = self.sign_transaction(self.nodes[0], bip113tx_v1)
bip113tx_v2.nLockTime = self.last_block_time - 600 * 5 - 1 # < MTP of prior block
bip113signed2 = self.sign_transaction(self.nodes[0], bip113tx_v2)
for bip113tx in [bip113signed1, bip113signed2]:
yield TestInstance([[self.create_test_block([bip113tx]), True]]) # 11,12
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Next block height = 580 after 4 blocks of random version
test_blocks = self.generate_blocks(4, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 13
### BIP 68 ###
### Version 1 txs ###
# All still pass
success_txs = []
success_txs.extend(all_rlt_txs(bip68txs_v1))
yield TestInstance([[self.create_test_block(success_txs), True]]) # 14
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Version 2 txs ###
bip68success_txs = []
# All txs with SEQUENCE_LOCKTIME_DISABLE_FLAG set pass
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
bip68success_txs.append(bip68txs_v2[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 15
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# All txs without flag fail as we are at delta height = 8 < 10 and delta time = 8 * 600 < 10 * 512
bip68timetxs = []
for b25 in range(2):
for b18 in range(2):
bip68timetxs.append(bip68txs_v2[0][b25][1][b18])
for tx in bip68timetxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 16 - 19
bip68heighttxs = []
for b25 in range(2):
for b18 in range(2):
bip68heighttxs.append(bip68txs_v2[0][b25][0][b18])
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 20 - 23
# Advance one block to 581
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 24
# Height txs should fail and time txs should now pass 9 * 600 > 10 * 512
bip68success_txs.extend(bip68timetxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 25
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
for tx in bip68heighttxs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 26 - 29
# Advance one block to 582
test_blocks = self.generate_blocks(1, 1234)
yield TestInstance(test_blocks, sync_every_block=False) # 30
# All BIP 68 txs should pass
bip68success_txs.extend(bip68heighttxs)
yield TestInstance([[self.create_test_block(bip68success_txs), True]]) # 31
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### BIP 112 ###
### Version 1 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v1]), False]]) #32
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 1 txs should still pass
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v1[1][b25][b22][b18])
success_txs.append(bip112txs_vary_OP_CSV_9_v1[1][b25][b22][b18])
yield TestInstance([[self.create_test_block(success_txs), True]]) # 33
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV, version 1 txs should now fail
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_v1))
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v1))
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_v1[0][b25][b22][b18])
fail_txs.append(bip112txs_vary_OP_CSV_9_v1[0][b25][b22][b18])
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 34 - 81
### Version 2 txs ###
# -1 OP_CSV tx should fail
yield TestInstance([[self.create_test_block([bip112tx_special_v2]), False]]) #82
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in argument to OP_CSV, version 2 txs should pass (all sequence locks are met)
success_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_OP_CSV_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV
success_txs.append(bip112txs_vary_OP_CSV_9_v2[1][b25][b22][b18]) # 8/16 of vary_OP_CSV_9
yield TestInstance([[self.create_test_block(success_txs), True]]) # 83
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
## SEQUENCE_LOCKTIME_DISABLE_FLAG is unset in argument to OP_CSV for all remaining txs ##
# All txs with nSequence 9 should fail either due to earlier mismatch or failing the CSV check
fail_txs = []
fail_txs.extend(all_rlt_txs(bip112txs_vary_nSequence_9_v2)) # 16/16 of vary_nSequence_9
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_OP_CSV_9_v2[0][b25][b22][b18]) # 16/16 of vary_OP_CSV_9
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 84 - 107
# If SEQUENCE_LOCKTIME_DISABLE_FLAG is set in nSequence, tx should fail
fail_txs = []
for b25 in range(2):
for b22 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[1][b25][b22][b18]) # 8/16 of vary_nSequence
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 108-115
# If sequencelock types mismatch, tx should fail
fail_txs = []
for b25 in range(2):
for b18 in range(2):
fail_txs.append(bip112txs_vary_nSequence_v2[0][b25][1][b18]) # 12/16 of vary_nSequence
fail_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][1][b18]) # 12/16 of vary_OP_CSV
for tx in fail_txs:
yield TestInstance([[self.create_test_block([tx]), False]]) # 116-123
# Remaining txs should pass, just test masking works properly
success_txs = []
for b25 in range(2):
for b18 in range(2):
success_txs.append(bip112txs_vary_nSequence_v2[0][b25][0][b18]) # 16/16 of vary_nSequence
success_txs.append(bip112txs_vary_OP_CSV_v2[0][b25][0][b18]) # 16/16 of vary_OP_CSV
yield TestInstance([[self.create_test_block(success_txs), True]]) # 124
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Additional test, of checking that comparison of two time types works properly
time_txs = []
for b25 in range(2):
for b18 in range(2):
tx = bip112txs_vary_OP_CSV_v2[0][b25][1][b18]
tx.vin[0].nSequence = base_relative_locktime | seq_type_flag
signtx = self.sign_transaction(self.nodes[0], tx)
time_txs.append(signtx)
yield TestInstance([[self.create_test_block(time_txs), True]]) # 125
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
### Missing aspects of test
## Testing empty stack fails
if __name__ == '__main__':
BIP68_112_113Test().main()
| 50.574861 | 177 | 0.649574 |
5c41f219d03341c71aea49382e8d5a6ed7e50e2c | 265 | py | Python | setup.py | Fatalerr/pcaptools | b08c64b731e32d25912eadfb261ca6a445bb0d56 | [
"Apache-2.0"
] | null | null | null | setup.py | Fatalerr/pcaptools | b08c64b731e32d25912eadfb261ca6a445bb0d56 | [
"Apache-2.0"
] | null | null | null | setup.py | Fatalerr/pcaptools | b08c64b731e32d25912eadfb261ca6a445bb0d56 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
from setuptools import setup
setup(
name = 'pcaptools',
version = '0.1',
description = "This is a convinence tool for pcap analysis",
author = 'Jun Liu',
author_email = "liujun.gz@live.com",
package_dir = "pcaptools"
) | 20.384615 | 64 | 0.633962 |
56ee1f8eb0fee2fef112c7fd7f471c3bde3ac267 | 9,640 | py | Python | boto3/dynamodb/types.py | cno-io/pacu | a63f1a7fe37bda09a9f5f01f7523815df31487c8 | [
"BSD-3-Clause"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | boto3/dynamodb/types.py | cno-io/pacu | a63f1a7fe37bda09a9f5f01f7523815df31487c8 | [
"BSD-3-Clause"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | boto3/dynamodb/types.py | cno-io/pacu | a63f1a7fe37bda09a9f5f01f7523815df31487c8 | [
"BSD-3-Clause"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from collections import Mapping, Set
from decimal import Decimal, Context, Clamped
from decimal import Overflow, Inexact, Underflow, Rounded
from botocore.compat import six
STRING = 'S'
NUMBER = 'N'
BINARY = 'B'
STRING_SET = 'SS'
NUMBER_SET = 'NS'
BINARY_SET = 'BS'
NULL = 'NULL'
BOOLEAN = 'BOOL'
MAP = 'M'
LIST = 'L'
DYNAMODB_CONTEXT = Context(
Emin=-128, Emax=126, prec=38,
traps=[Clamped, Overflow, Inexact, Rounded, Underflow])
BINARY_TYPES = (bytearray, six.binary_type)
class Binary(object):
"""A class for representing Binary in dynamodb
Especially for Python 2, use this class to explicitly specify
binary data for item in DynamoDB. It is essentially a wrapper around
binary. Unicode and Python 3 string types are not allowed.
"""
def __init__(self, value):
if not isinstance(value, BINARY_TYPES):
raise TypeError('Value must be of the following types: %s.' %
', '.join([str(t) for t in BINARY_TYPES]))
self.value = value
def __eq__(self, other):
if isinstance(other, Binary):
return self.value == other.value
return self.value == other
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return 'Binary(%r)' % self.value
def __str__(self):
return self.value
def __hash__(self):
return hash(self.value)
class TypeSerializer(object):
"""This class serializes Python data types to DynamoDB types."""
def serialize(self, value):
"""The method to serialize the Python data types.
:param value: A python value to be serialized to DynamoDB. Here are
the various conversions:
Python DynamoDB
------ --------
None {'NULL': True}
True/False {'BOOL': True/False}
int/Decimal {'N': str(value)}
string {'S': string}
Binary/bytearray/bytes (py3 only) {'B': bytes}
set([int/Decimal]) {'NS': [str(value)]}
set([string]) {'SS': [string])
set([Binary/bytearray/bytes]) {'BS': [bytes]}
list {'L': list}
dict {'M': dict}
For types that involve numbers, it is recommended that ``Decimal``
objects are used to be able to round-trip the Python type.
For types that involve binary, it is recommended that ``Binary``
objects are used to be able to round-trip the Python type.
:rtype: dict
:returns: A dictionary that represents a dynamoDB data type. These
dictionaries can be directly passed to botocore methods.
"""
dynamodb_type = self._get_dynamodb_type(value)
serializer = getattr(self, '_serialize_%s' % dynamodb_type.lower())
return {dynamodb_type: serializer(value)}
def _get_dynamodb_type(self, value):
dynamodb_type = None
if self._is_null(value):
dynamodb_type = NULL
elif self._is_boolean(value):
dynamodb_type = BOOLEAN
elif self._is_number(value):
dynamodb_type = NUMBER
elif self._is_string(value):
dynamodb_type = STRING
elif self._is_binary(value):
dynamodb_type = BINARY
elif self._is_type_set(value, self._is_number):
dynamodb_type = NUMBER_SET
elif self._is_type_set(value, self._is_string):
dynamodb_type = STRING_SET
elif self._is_type_set(value, self._is_binary):
dynamodb_type = BINARY_SET
elif self._is_map(value):
dynamodb_type = MAP
elif self._is_list(value):
dynamodb_type = LIST
else:
msg = 'Unsupported type "%s" for value "%s"' % (type(value), value)
raise TypeError(msg)
return dynamodb_type
def _is_null(self, value):
if value is None:
return True
return False
def _is_boolean(self, value):
if isinstance(value, bool):
return True
return False
def _is_number(self, value):
if isinstance(value, (six.integer_types, Decimal)):
return True
elif isinstance(value, float):
raise TypeError(
'Float types are not supported. Use Decimal types instead.')
return False
def _is_string(self, value):
if isinstance(value, six.string_types):
return True
return False
def _is_binary(self, value):
if isinstance(value, Binary):
return True
elif isinstance(value, bytearray):
return True
elif six.PY3 and isinstance(value, six.binary_type):
return True
return False
def _is_set(self, value):
if isinstance(value, Set):
return True
return False
def _is_type_set(self, value, type_validator):
if self._is_set(value):
if False not in map(type_validator, value):
return True
return False
def _is_map(self, value):
if isinstance(value, Mapping):
return True
return False
def _is_list(self, value):
if isinstance(value, list):
return True
return False
def _serialize_null(self, value):
return True
def _serialize_bool(self, value):
return value
def _serialize_n(self, value):
number = str(DYNAMODB_CONTEXT.create_decimal(value))
if number in ['Infinity', 'NaN']:
raise TypeError('Infinity and NaN not supported')
return number
def _serialize_s(self, value):
return value
def _serialize_b(self, value):
if isinstance(value, Binary):
value = value.value
return value
def _serialize_ss(self, value):
return [self._serialize_s(s) for s in value]
def _serialize_ns(self, value):
return [self._serialize_n(n) for n in value]
def _serialize_bs(self, value):
return [self._serialize_b(b) for b in value]
def _serialize_l(self, value):
return [self.serialize(v) for v in value]
def _serialize_m(self, value):
return dict([(k, self.serialize(v)) for k, v in value.items()])
class TypeDeserializer(object):
"""This class deserializes DynamoDB types to Python types."""
def deserialize(self, value):
"""The method to deserialize the DynamoDB data types.
:param value: A DynamoDB value to be deserialized to a pythonic value.
Here are the various conversions:
DynamoDB Python
-------- ------
{'NULL': True} None
{'BOOL': True/False} True/False
{'N': str(value)} Decimal(str(value))
{'S': string} string
{'B': bytes} Binary(bytes)
{'NS': [str(value)]} set([Decimal(str(value))])
{'SS': [string]} set([string])
{'BS': [bytes]} set([bytes])
{'L': list} list
{'M': dict} dict
:returns: The pythonic value of the DynamoDB type.
"""
if not value:
raise TypeError('Value must be a nonempty dictionary whose key '
'is a valid dynamodb type.')
dynamodb_type = list(value.keys())[0]
try:
deserializer = getattr(
self, '_deserialize_%s' % dynamodb_type.lower())
except AttributeError:
raise TypeError(
'Dynamodb type %s is not supported' % dynamodb_type)
return deserializer(value[dynamodb_type])
def _deserialize_null(self, value):
return None
def _deserialize_bool(self, value):
return value
def _deserialize_n(self, value):
return DYNAMODB_CONTEXT.create_decimal(value)
def _deserialize_s(self, value):
return value
def _deserialize_b(self, value):
return Binary(value)
def _deserialize_ns(self, value):
return set(map(self._deserialize_n, value))
def _deserialize_ss(self, value):
return set(map(self._deserialize_s, value))
def _deserialize_bs(self, value):
return set(map(self._deserialize_b, value))
def _deserialize_l(self, value):
return [self.deserialize(v) for v in value]
def _deserialize_m(self, value):
return dict([(k, self.deserialize(v)) for k, v in value.items()])
| 32.348993 | 79 | 0.568361 |
5b722c03f81a5e4ac537b9ccd2edc85c661f3b81 | 7,442 | py | Python | meshsegnet.py | ZhangPingping/MeshSegNet | f7f4e41919bd270f6baf0d776b466a1e394e3d54 | [
"MIT"
] | 100 | 2020-06-24T11:24:06.000Z | 2022-03-31T07:15:09.000Z | meshsegnet.py | ZhangPingping/MeshSegNet | f7f4e41919bd270f6baf0d776b466a1e394e3d54 | [
"MIT"
] | 37 | 2020-06-24T05:27:17.000Z | 2022-03-31T16:35:53.000Z | meshsegnet.py | ZhangPingping/MeshSegNet | f7f4e41919bd270f6baf0d776b466a1e394e3d54 | [
"MIT"
] | 54 | 2020-06-15T13:16:46.000Z | 2022-03-31T08:42:10.000Z | import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import numpy as np
class STN3d(nn.Module):
def __init__(self, channel):
super(STN3d, self).__init__()
self.conv1 = torch.nn.Conv1d(channel, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 1024, 1)
self.fc1 = nn.Linear(1024, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 9)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(1024)
self.bn4 = nn.BatchNorm1d(512)
self.bn5 = nn.BatchNorm1d(256)
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 1024)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.array([1, 0, 0, 0, 1, 0, 0, 0, 1]).astype(np.float32))).view(1, 9).repeat(
batchsize, 1)
if x.is_cuda:
iden = iden.to(x.get_device())
x = x + iden
x = x.view(-1, 3, 3)
return x
class STNkd(nn.Module):
def __init__(self, k=64):
super(STNkd, self).__init__()
self.conv1 = torch.nn.Conv1d(k, 64, 1)
self.conv2 = torch.nn.Conv1d(64, 128, 1)
self.conv3 = torch.nn.Conv1d(128, 512, 1)
self.fc1 = nn.Linear(512, 256)
self.fc2 = nn.Linear(256, 128)
self.fc3 = nn.Linear(128, k * k)
self.relu = nn.ReLU()
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(128)
self.bn3 = nn.BatchNorm1d(512)
self.bn4 = nn.BatchNorm1d(256)
self.bn5 = nn.BatchNorm1d(128)
self.k = k
def forward(self, x):
batchsize = x.size()[0]
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = torch.max(x, 2, keepdim=True)[0]
x = x.view(-1, 512)
x = F.relu(self.bn4(self.fc1(x)))
x = F.relu(self.bn5(self.fc2(x)))
x = self.fc3(x)
iden = Variable(torch.from_numpy(np.eye(self.k).flatten().astype(np.float32))).view(1, self.k * self.k).repeat(
batchsize, 1)
if x.is_cuda:
iden = iden.to(x.get_device())
x = x + iden
x = x.view(-1, self.k, self.k)
return x
class MeshSegNet(nn.Module):
def __init__(self, num_classes=15, num_channels=15, with_dropout=True, dropout_p=0.5):
super(MeshSegNet, self).__init__()
self.num_classes = num_classes
self.num_channels = num_channels
self.with_dropout = with_dropout
self.dropout_p = dropout_p
# MLP-1 [64, 64]
self.mlp1_conv1 = torch.nn.Conv1d(self.num_channels, 64, 1)
self.mlp1_conv2 = torch.nn.Conv1d(64, 64, 1)
self.mlp1_bn1 = nn.BatchNorm1d(64)
self.mlp1_bn2 = nn.BatchNorm1d(64)
# FTM (feature-transformer module)
self.fstn = STNkd(k=64)
# GLM-1 (graph-contrained learning modulus)
self.glm1_conv1_1 = torch.nn.Conv1d(64, 32, 1)
self.glm1_conv1_2 = torch.nn.Conv1d(64, 32, 1)
self.glm1_bn1_1 = nn.BatchNorm1d(32)
self.glm1_bn1_2 = nn.BatchNorm1d(32)
self.glm1_conv2 = torch.nn.Conv1d(32+32, 64, 1)
self.glm1_bn2 = nn.BatchNorm1d(64)
# MLP-2
self.mlp2_conv1 = torch.nn.Conv1d(64, 64, 1)
self.mlp2_bn1 = nn.BatchNorm1d(64)
self.mlp2_conv2 = torch.nn.Conv1d(64, 128, 1)
self.mlp2_bn2 = nn.BatchNorm1d(128)
self.mlp2_conv3 = torch.nn.Conv1d(128, 512, 1)
self.mlp2_bn3 = nn.BatchNorm1d(512)
# GLM-2 (graph-contrained learning modulus)
self.glm2_conv1_1 = torch.nn.Conv1d(512, 128, 1)
self.glm2_conv1_2 = torch.nn.Conv1d(512, 128, 1)
self.glm2_conv1_3 = torch.nn.Conv1d(512, 128, 1)
self.glm2_bn1_1 = nn.BatchNorm1d(128)
self.glm2_bn1_2 = nn.BatchNorm1d(128)
self.glm2_bn1_3 = nn.BatchNorm1d(128)
self.glm2_conv2 = torch.nn.Conv1d(128*3, 512, 1)
self.glm2_bn2 = nn.BatchNorm1d(512)
# MLP-3
self.mlp3_conv1 = torch.nn.Conv1d(64+512+512+512, 256, 1)
self.mlp3_conv2 = torch.nn.Conv1d(256, 256, 1)
self.mlp3_bn1_1 = nn.BatchNorm1d(256)
self.mlp3_bn1_2 = nn.BatchNorm1d(256)
self.mlp3_conv3 = torch.nn.Conv1d(256, 128, 1)
self.mlp3_conv4 = torch.nn.Conv1d(128, 128, 1)
self.mlp3_bn2_1 = nn.BatchNorm1d(128)
self.mlp3_bn2_2 = nn.BatchNorm1d(128)
# output
self.output_conv = torch.nn.Conv1d(128, self.num_classes, 1)
if self.with_dropout:
self.dropout = nn.Dropout(p=self.dropout_p)
def forward(self, x, a_s, a_l):
batchsize = x.size()[0]
n_pts = x.size()[2]
# MLP-1
x = F.relu(self.mlp1_bn1(self.mlp1_conv1(x)))
x = F.relu(self.mlp1_bn2(self.mlp1_conv2(x)))
# FTM
trans_feat = self.fstn(x)
x = x.transpose(2, 1)
x_ftm = torch.bmm(x, trans_feat)
# GLM-1
sap = torch.bmm(a_s, x_ftm)
sap = sap.transpose(2, 1)
x_ftm = x_ftm.transpose(2, 1)
x = F.relu(self.glm1_bn1_1(self.glm1_conv1_1(x_ftm)))
glm_1_sap = F.relu(self.glm1_bn1_2(self.glm1_conv1_2(sap)))
x = torch.cat([x, glm_1_sap], dim=1)
x = F.relu(self.glm1_bn2(self.glm1_conv2(x)))
# MLP-2
x = F.relu(self.mlp2_bn1(self.mlp2_conv1(x)))
x = F.relu(self.mlp2_bn2(self.mlp2_conv2(x)))
x_mlp2 = F.relu(self.mlp2_bn3(self.mlp2_conv3(x)))
if self.with_dropout:
x_mlp2 = self.dropout(x_mlp2)
# GLM-2
x_mlp2 = x_mlp2.transpose(2, 1)
sap_1 = torch.bmm(a_s, x_mlp2)
sap_2 = torch.bmm(a_l, x_mlp2)
x_mlp2 = x_mlp2.transpose(2, 1)
sap_1 = sap_1.transpose(2, 1)
sap_2 = sap_2.transpose(2, 1)
x = F.relu(self.glm2_bn1_1(self.glm2_conv1_1(x_mlp2)))
glm_2_sap_1 = F.relu(self.glm2_bn1_2(self.glm2_conv1_2(sap_1)))
glm_2_sap_2 = F.relu(self.glm2_bn1_3(self.glm2_conv1_3(sap_2)))
x = torch.cat([x, glm_2_sap_1, glm_2_sap_2], dim=1)
x_glm2 = F.relu(self.glm2_bn2(self.glm2_conv2(x)))
# GMP
x = torch.max(x_glm2, 2, keepdim=True)[0]
# Upsample
x = torch.nn.Upsample(n_pts)(x)
# Dense fusion
x = torch.cat([x, x_ftm, x_mlp2, x_glm2], dim=1)
# MLP-3
x = F.relu(self.mlp3_bn1_1(self.mlp3_conv1(x)))
x = F.relu(self.mlp3_bn1_2(self.mlp3_conv2(x)))
x = F.relu(self.mlp3_bn2_1(self.mlp3_conv3(x)))
if self.with_dropout:
x = self.dropout(x)
x = F.relu(self.mlp3_bn2_2(self.mlp3_conv4(x)))
# output
x = self.output_conv(x)
x = x.transpose(2,1).contiguous()
x = torch.nn.Softmax(dim=-1)(x.view(-1, self.num_classes))
x = x.view(batchsize, n_pts, self.num_classes)
return x
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = MeshSegNet().to(device)
summary(model, [(15, 6000), (6000, 6000), (6000, 6000)])
| 37.969388 | 119 | 0.581564 |
2f855485e8713b57d9f1534e10ca14e66a40c28d | 3,487 | py | Python | venv/lib/python3.6/site-packages/botocore/__init__.py | owenabrams/greyapp | 05e1a7a7719f9974cb4424154c11338567383f96 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/botocore/__init__.py | owenabrams/greyapp | 05e1a7a7719f9974cb4424154c11338567383f96 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/botocore/__init__.py | owenabrams/greyapp | 05e1a7a7719f9974cb4424154c11338567383f96 | [
"MIT"
] | null | null | null | # Copyright (c) 2012-2013 Mitch Garnaat http://garnaat.org/
# Copyright 2012-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import re
import logging
__version__ = '1.10.65'
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Configure default logger to do nothing
log = logging.getLogger('botocore')
log.addHandler(NullHandler())
_first_cap_regex = re.compile('(.)([A-Z][a-z]+)')
_end_cap_regex = re.compile('([a-z0-9])([A-Z])')
# The regex below handles the special case where some acryonym
# name is pluralized, e.g GatewayARNs, ListWebACLs, SomeCNAMEs.
_special_case_transform = re.compile('[A-Z]{3,}s$')
# Prepopulate the cache with special cases that don't match
# our regular transformation.
_xform_cache = {
('CreateCachediSCSIVolume', '_'): 'create_cached_iscsi_volume',
('CreateCachediSCSIVolume', '-'): 'create-cached-iscsi-volume',
('DescribeCachediSCSIVolumes', '_'): 'describe_cached_iscsi_volumes',
('DescribeCachediSCSIVolumes', '-'): 'describe-cached-iscsi-volumes',
('DescribeStorediSCSIVolumes', '_'): 'describe_stored_iscsi_volumes',
('DescribeStorediSCSIVolumes', '-'): 'describe-stored-iscsi-volumes',
('CreateStorediSCSIVolume', '_'): 'create_stored_iscsi_volume',
('CreateStorediSCSIVolume', '-'): 'create-stored-iscsi-volume',
('ListHITsForQualificationType', '_'): 'list_hits_for_qualification_type',
('ListHITsForQualificationType', '-'): 'list-hits-for-qualification-type',
}
# The items in this dict represent partial renames to apply globally to all
# services which might have a matching argument or operation. This way a
# common mis-translation can be fixed without having to call out each
# individual case.
ScalarTypes = ('string', 'integer', 'boolean', 'timestamp', 'float', 'double')
BOTOCORE_ROOT = os.path.dirname(os.path.abspath(__file__))
# Used to specify anonymous (unsigned) request signature
class UNSIGNED(object):
def __copy__(self):
return self
def __deepcopy__(self, memodict):
return self
UNSIGNED = UNSIGNED()
def xform_name(name, sep='_', _xform_cache=_xform_cache):
"""Convert camel case to a "pythonic" name.
If the name contains the ``sep`` character, then it is
returned unchanged.
"""
if sep in name:
# If the sep is in the name, assume that it's already
# transformed and return the string unchanged.
return name
key = (name, sep)
if key not in _xform_cache:
if _special_case_transform.search(name) is not None:
is_special = _special_case_transform.search(name)
matched = is_special.group()
# Replace something like ARNs, ACLs with _arns, _acls.
name = name[:-len(matched)] + sep + matched.lower()
s1 = _first_cap_regex.sub(r'\1' + sep + r'\2', name)
transformed = _end_cap_regex.sub(r'\1' + sep + r'\2', s1).lower()
_xform_cache[key] = transformed
return _xform_cache[key]
| 37.494624 | 78 | 0.704044 |
90b82e59552d096aca9fbece500b6f96b5426d29 | 108 | py | Python | kibitzr/__init__.py | cescobarresi/kibitzr | 629f894d8d102657bc2c27edda2eb47ff51f1fa8 | [
"MIT"
] | null | null | null | kibitzr/__init__.py | cescobarresi/kibitzr | 629f894d8d102657bc2c27edda2eb47ff51f1fa8 | [
"MIT"
] | null | null | null | kibitzr/__init__.py | cescobarresi/kibitzr | 629f894d8d102657bc2c27edda2eb47ff51f1fa8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'Peter Demin'
__email__ = 'kibitzrrr@gmail.com'
__version__ = '5.3.4'
| 18 | 33 | 0.648148 |
16b339b80fdf529c13c710561ed38636a71de451 | 17,337 | py | Python | src/allennlp/fortex/allennlp/allennlp_processors.py | Piyush13y/forte-wrappers | 250df428a8705f769d53eb070f89c3f66e713015 | [
"Apache-2.0"
] | 3 | 2021-06-17T18:52:00.000Z | 2022-01-11T19:15:21.000Z | src/allennlp/fortex/allennlp/allennlp_processors.py | Piyush13y/forte-wrappers | 250df428a8705f769d53eb070f89c3f66e713015 | [
"Apache-2.0"
] | 66 | 2021-03-30T15:04:11.000Z | 2022-03-24T04:35:11.000Z | src/allennlp/fortex/allennlp/allennlp_processors.py | Piyush13y/forte-wrappers | 250df428a8705f769d53eb070f89c3f66e713015 | [
"Apache-2.0"
] | 10 | 2021-03-16T19:48:31.000Z | 2022-03-01T05:48:17.000Z | # Copyright 2021 The Forte Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import logging
from typing import Any, Dict, Iterator, List, Set
import more_itertools
from transformers import BertTokenizer
from allennlp.predictors import Predictor
from ft.onto.base_ontology import (
Token,
Dependency,
PredicateLink,
PredicateArgument,
PredicateMention,
)
from forte.common import ProcessorConfigError
from forte.common.configuration import Config
from forte.common.resources import Resources
from forte.data.data_pack import DataPack, as_entry_type
from forte.processors.base import PackProcessor
from fortex.allennlp.utils_processor import (
parse_allennlp_srl_tags,
parse_allennlp_srl_results,
)
logger = logging.getLogger(__name__)
__all__ = [
"AllenNLPProcessor",
]
# pylint: disable=line-too-long
MODEL2URL = {
"stanford": "https://storage.googleapis.com/allennlp-public-models"
"/biaffine-dependency-parser-ptb-2020.04.06.tar.gz",
"srl": "https://storage.googleapis.com/allennlp-public-models/bert-base"
"-srl-2020.11.19.tar.gz",
# TODO: The UD model seems to be broken at this moment.
"universal": "https://storage.googleapis.com/allennlp-public-models"
"/biaffine-dependency-parser-ud-2020.02.10.tar.gz",
}
class AllenNLPProcessor(PackProcessor):
"""
Processor for AllenNLP predictors. It currently supports POS tagging
(`pos`), Dependency Parsing (`depparse`), Semantic Role Labeling (`srl`).
Check the `default_configs` method on how to specify the behavior of this
processor.
There are a few assumptions for this processor:
- Sentences should be pre-annotated. The sentences can be stored in the
provided type via the "sentence_type" configuration.
- Long sentences will be skipped if they exceed the Bert model limit.
- If unknown run time exception happens during predicting a batch, the
results of the whole batch may be discarded.
"""
# pylint: disable=attribute-defined-outside-init,unused-argument
def initialize(self, resources: Resources, configs: Config):
super().initialize(resources, configs)
if (
"pos" in configs.processors
or "depparse" in configs.processors
or "depparse" in configs.processors
or "srl" in configs.processors
):
if "tokenize" not in self.configs.processors:
raise ProcessorConfigError(
"tokenize is necessary in "
"configs.processors for "
"pos, depparse or srl"
)
cuda_devices = itertools.cycle(configs["cuda_devices"])
if configs.tag_formalism not in MODEL2URL:
raise ProcessorConfigError("Incorrect value for tag_formalism")
self.predictor = {}
if configs.tag_formalism == "stanford":
self.predictor = {
"stanford": Predictor.from_path(
configs["stanford_url"], cuda_device=next(cuda_devices)
)
}
if "srl" in configs.processors:
# A parser seems to be needed for SRL.
if "stanford" not in self.predictor:
self.predictor["stanford"] = Predictor.from_path(
configs["stanford_url"], cuda_device=next(cuda_devices)
)
self.predictor["srl"] = Predictor.from_path(
configs["srl_url"], cuda_device=next(cuda_devices)
)
if configs.overwrite_entries:
logger.warning(
"`overwrite_entries` is set to True, this means "
"that the entries of the same type as produced by "
"this processor will be overwritten if found."
)
if configs.allow_parallel_entries:
logger.warning(
"Both `overwrite_entries` (whether to overwrite"
" the entries of the same type as produced by "
"this processor) and "
"`allow_parallel_entries` (whether to allow "
"similar new entries when they already exist) "
"are True, all existing conflicting entries "
"will be deleted."
)
else:
if not configs.allow_parallel_entries:
logger.warning(
"Both `overwrite_entries` (whether to overwrite"
" the entries of the same type as produced by "
"this processor) and "
"`allow_parallel_entries` (whether to allow "
"similar new entries when they already exist) "
"are False, processor will only run if there "
"are no existing conflicting entries."
)
self._sentence_type = as_entry_type(self.configs.sentence_type)
# Use to estimate the length of the input sentence to avoid sub-word
# go out of bound. Will be lazy initialized.
self._subword_tokenizer = None
@classmethod
def default_configs(cls):
"""
This defines a basic config structure for AllenNLP.
Following are the keys for this dictionary:
- processors: a comma separated string defines what operations to
be done on the sentences, supported predictors include `tokenize`,
`pos`, `depparse` and `srl`.
The default value is `tokenize,pos,depparse`.
- tag_formalism: format of the POS tags and dependency parsing,
can be `universal` or `stanford`, default value is `stanford`.
- overwrite_entries: whether to overwrite the entries of the same
type as produced by this processor, default value is False.
- allow_parallel_entries: whether to allow similar new entries when
they already exist, e.g. allowing new tokens with same spans,
used only when `overwrite_entries` is False.
- model_url: URL of the corresponding model, default URL(s) for
`stanford`, `srl` and `universal` can be found in `MODEL2URL`.
- cuda_devices: a list of integers indicating the available
cuda/gpu devices that can be used by this processor. When
multiple models are loaded, cuda devices are assigned in a
round robin fashion. E.g. [0, -1] -> first model uses gpu 0
but second model uses cpu.
- infer_batch_size: maximum number of sentences passed in as a
batch to model's predict function. A value <= 0 means no limit.
- sentence_type: the entry type that represent the sentences. The
processor will look for this entry type to conduct processing.
Default is `ft.onto.base_ontology.Sentence`.
Returns: A dictionary with the default config for this processor.
"""
return {
"processors": "tokenize,pos,depparse",
"tag_formalism": "stanford",
"overwrite_entries": False,
"allow_parallel_entries": True,
"stanford_url": MODEL2URL["stanford"],
"srl_url": MODEL2URL["srl"],
"universal_url": MODEL2URL["universal"],
"cuda_devices": [-1],
"infer_batch_size": 0,
"sentence_type": "ft.onto.base_ontology.Sentence",
}
def _process(self, input_pack: DataPack):
# handle existing entries
self._process_existing_entries(input_pack)
batch_size: int = self.configs["infer_batch_size"]
batches: Iterator[Iterator]
# Need a copy of the one-pass iterators to support a second loop on
# them. All other ways around it like using `itertools.tee` and `list`
# would require extra storage conflicting with the idea of using
# iterators in the first place. `more_itertools.ichunked` uses
# `itertools.tee` under the hood but our usage (reading iterators
# in order) does not cause memory issues.
batches_copy: Iterator[Iterator]
if batch_size <= 0:
batches = iter([input_pack.get(self._sentence_type)])
batches_copy = iter([input_pack.get(self._sentence_type)])
else:
batches = more_itertools.ichunked(
input_pack.get(self._sentence_type), batch_size
)
batches_copy = more_itertools.ichunked(
input_pack.get(self._sentence_type), batch_size
)
for sentences, sentences_copy in zip(batches, batches_copy):
inputs: List[Dict[str, str]] = []
skips = []
for s in sentences:
t = s.text.strip()
if not t == "":
inputs.append({"sentence": t})
skips.append(False)
else:
# Use placeholder, replace white space with dot.
inputs.append({"sentence": "." * len(s.text)})
# This result is useless, skip it.
skips.append(True)
# Handle when the data is empty.
if len(inputs) == 0:
continue
try:
raw_results: Dict[str, List[Dict[str, Any]]] = {
k: self._predict_with_exception_retry(k, p, inputs)
for k, p in self.predictor.items()
}
except RuntimeError:
for i, s in enumerate(skips):
skips[i] = True
logging.exception(
"Encounter RuntimeError while performing prediction. "
"This batch will be skipped."
)
continue
for i, sent in enumerate(sentences_copy):
if skips[i]:
continue
result: Dict[str, List[str]] = {}
for key in self.predictor:
raw_result = raw_results[key]
if raw_result is not None:
if key == "srl":
result.update(
parse_allennlp_srl_results(
raw_result[i]["verbs"]
)
)
else:
result.update(raw_result[i])
if "tokenize" in self.configs.processors:
# creating new tokens and dependencies
tokens = self._create_tokens(input_pack, sent, result)
if "depparse" in self.configs.processors:
self._create_dependencies(input_pack, tokens, result)
if "srl" in self.configs.processors:
self._create_srl(input_pack, tokens, result)
def _predict_with_exception_retry(
self, model_name: str, predictor: Predictor, inputs
):
"""
Run the predictor on the inputs. If a RuntimeError is thrown, try to
check if it is caused by long sentences in SRL models. Try to remove
those sentence and run again. But this step may still fail if there
are other causes.
"""
try:
return predictor.predict_batch_json(inputs)
except RuntimeError:
if model_name == "srl":
if self._subword_tokenizer is None:
self._subword_tokenizer = BertTokenizer.from_pretrained(
"bert-base-uncased"
)
# One possible cause of RuntimeError is the input is too long,
# See if we can fix this by retrying.
for item in inputs:
assert self._subword_tokenizer is not None
subword_length = len(
self._subword_tokenizer(item["sentence"]).data[
"input_ids"
]
)
if subword_length > 450:
# Remove long sentences that could create problems.
# Since the AllenNLP SRL uses Bert with a limit of 512,
# but we choose a smaller number to ensure we can get
# through it.
# We make this sentence to empty and skip it.
item["sentence"] = "."
# Try again with the new input, hopefully it can pass.
return predictor.predict_batch_json(inputs)
def _process_existing_entries(self, input_pack):
tokens_exist = any(True for _ in input_pack.get(Token))
dependencies_exist = any(True for _ in input_pack.get(Dependency))
if tokens_exist or dependencies_exist:
if not self.configs.overwrite_entries:
if not self.configs.allow_parallel_entries:
raise ProcessorConfigError(
"Found existing entries, either `overwrite_entries` or"
" `allow_parallel_entries` should be True"
)
else:
# delete existing tokens and dependencies
for entry_type in (Token, Dependency):
for entry in list(input_pack.get(entry_type)):
input_pack.delete_entry(entry)
def _create_tokens(self, input_pack, sentence, result):
words, pos = result["words"], result["pos"]
tokens = []
offset = sentence.span.begin
word_end = 0
for i, word in enumerate(words):
word_begin = sentence.text.find(word, word_end)
word_end = word_begin + len(word)
token = Token(input_pack, offset + word_begin, offset + word_end)
if "pos" in self.configs.processors:
token.pos = pos[i]
tokens.append(token)
return tokens
@staticmethod
def _create_dependencies(input_pack, tokens, result):
deps = result["predicted_dependencies"]
heads = result["predicted_heads"]
for i, token in enumerate(tokens):
relation = Dependency(
input_pack, parent=tokens[heads[i] - 1], child=token
)
relation.rel_type = deps[i]
@staticmethod
def _create_srl(
input_pack: DataPack, tokens: List[Token], result: Dict[str, List[str]]
) -> None:
for _, tag in enumerate(result["srl_tags"]):
pred_span, arguments = parse_allennlp_srl_tags(tag)
if not pred_span:
continue
pred = PredicateMention(
input_pack,
tokens[pred_span.begin].begin,
tokens[pred_span.end].end,
)
for arg_span, label in arguments:
arg = PredicateArgument(
input_pack,
tokens[arg_span.begin].begin,
tokens[arg_span.end].end,
)
link = PredicateLink(input_pack, pred, arg)
link.arg_type = label
def expected_types_and_attributes(self) -> Dict[str, Set[str]]:
r"""Method to add expected type for current processor input which
would be checked before running the processor if
the pipeline is initialized with
`enforce_consistency=True` or
:meth:`~forte.pipeline.Pipeline.enforce_consistency` was enabled for
the pipeline.
"""
expectation_dict: Dict[str, Set[str]] = {
self.configs.sentence_type: set()
}
return expectation_dict
def record(self, record_meta: Dict[str, Set[str]]):
r"""Method to add output type record of current processor
to :attr:`forte.data.data_pack.Meta.record`.
Args:
record_meta: the field in the datapack for type record that need to
fill in for consistency checking.
"""
if "tokenize" in self.configs.processors:
record_meta["ft.onto.base_ontology.Token"] = set()
if "pos" in self.configs.processors:
record_meta["ft.onto.base_ontology.Token"].add("pos")
if "depparse" in self.configs.processors:
record_meta["ft.onto.base_ontology.Dependency"] = {"rel_type"}
if "srl" in self.configs.processors:
record_meta["ft.onto.base_ontology.PredicateArgument"] = set()
record_meta["ft.onto.base_ontology.PredicateMention"] = set()
record_meta["ft.onto.base_ontology.PredicateLink"] = {
"arg_type"
}
| 41.775904 | 80 | 0.580839 |
6f5e9e48847a97eeda7383fb09160ce23265342b | 2,292 | py | Python | NetLights.py | 5N6R/NetLights | 516cee397d36308bfe2d28af8caf5a9fe0bfda15 | [
"MIT"
] | 2 | 2017-02-11T08:54:50.000Z | 2022-01-25T12:13:26.000Z | NetLights.py | 5N6R/NetLights | 516cee397d36308bfe2d28af8caf5a9fe0bfda15 | [
"MIT"
] | null | null | null | NetLights.py | 5N6R/NetLights | 516cee397d36308bfe2d28af8caf5a9fe0bfda15 | [
"MIT"
] | null | null | null | #(c) 2017, coded 5n6r
#!/usr/bin/env python3
import dns.resolver
import dns.rdtypes
import os, sys, re
from tkinter import *
ip=["81.218.119.11","209.88.198.133","77.88.8.7", "77.88.8.3","208.67.222.123",
"208.67.220.123","156.154.70.4","156.154.71.4"]
red=yellow=green=black=0
rez=[red,yellow,green,black]
def mous(event):
try:
cb=app.clipboard_get()
ii.set(cb)
except:
cl()
def cl() :
app.clipboard_clear()
ii.set("")
fr0.configure(bg="silver")
url=""
red=yellow=green=black=0
rez[0]=rez[1]=rez[2]=rez[3]=0
def checker(event):
rez[0]=rez[1]=rez[2]=rez[3]=0
url=ii.get()
if url!= "":
xx=url.split("//")
if len(xx)==1:
url=xx[0]
else:
url=xx[1]
for x in range(0,8,2):
resolver = dns.resolver.Resolver(configure=False)
resolver.nameservers = [ip[x],ip[x+1]]
try:
dr=resolver.query(url)[0].to_text()
if (dr=="93.158.134.250" or dr=="81.218.119.11" or dr=="67.215.65.130" or dr=="146.112.61.106"
or dr=="156.154.112.18" or dr=="156.154.113.18"):
rez[1]=rez[1]+1
elif (dr=="213.180.193.250" or dr=="209.88.198.133" or dr=="146.112.61.104" or dr== "146.112.61.105"
or dr=="146.112.61.107" or dr=="146.112.61.108" or dr=="146.112.61.109" or dr=="146.112.61.110"
or dr=="156.154.112.18" or dr=="156.154.113.18"):
rez[0]=rez[0]+1
else:
rez[2]=rez[2]+1
except:
rez[3]=rez[3]+1
if rez[0]>0:
rezz="red"
elif rez[1] >0:
rezz="yellow"
elif rez[2] >0:
rezz="green"
else:
rezz="black"
fr0.configure(bg=rezz)
app=Tk()
app.title(chr(9816)*7+" NetLights версия 0.5 бета "+chr(169)+" 2017, программирование 5n6r "+chr(9816)*7)
app.geometry("700x60")
app.resizable(0,0)
ii=StringVar()
ii.set("")
fr0=Frame(app,bd=2,height=12,relief="groove",bg="silver")
fr0.pack(padx=10,pady=10)
e=Entry(fr0,textvariable=ii,bd=1,cursor="spider",width=30)
e.focus()
e.grid(row=0,column=0,pady=5,padx=5)
b1=Button(fr0,text="Проверить!",cursor="hand2")
b1.grid(row=0,column=1,padx=3,pady=3)
b2=Button(fr0,text="Новая проверка",command=cl,cursor="hand2")
b2.grid(row=0,column=2,padx=3,pady=3)
b2=Button(fr0,text="Выход из программы",command=app.destroy,cursor="hand2")
b2.grid(row=0,column=3,padx=3,pady=3)
e.bind("<Button-3>",mous)
e.bind("<Return>",checker)
b1.bind("<Button-1>",checker)
app.mainloop()
| 28.65 | 105 | 0.635689 |
c19123ae3f60bc720c1ce9dfaf0dc2036541df95 | 413 | py | Python | ticket/migrations/0007_auto_20210113_1743.py | hossamhsn74/django-dmtech-task | 5cea908b9a4b9e1f7052e3aa46385e1a599be09f | [
"MIT"
] | null | null | null | ticket/migrations/0007_auto_20210113_1743.py | hossamhsn74/django-dmtech-task | 5cea908b9a4b9e1f7052e3aa46385e1a599be09f | [
"MIT"
] | null | null | null | ticket/migrations/0007_auto_20210113_1743.py | hossamhsn74/django-dmtech-task | 5cea908b9a4b9e1f7052e3aa46385e1a599be09f | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2021-01-13 15:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticket', '0006_ticket_code'),
]
operations = [
migrations.AlterField(
model_name='ticketqrcode',
name='code',
field=models.ImageField(blank=True, null=True, upload_to='qrcodes/'),
),
]
| 21.736842 | 81 | 0.602906 |
162c20e7287fe04b9239b303b98ac61360e228a1 | 2,442 | py | Python | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/CountRentedJoinPermissionsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/CountRentedJoinPermissionsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-linkwan/aliyunsdklinkwan/request/v20190301/CountRentedJoinPermissionsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdklinkwan.endpoint import endpoint_data
class CountRentedJoinPermissionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'LinkWAN', '2019-03-01', 'CountRentedJoinPermissions','linkwan')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Type(self):
return self.get_query_params().get('Type')
def set_Type(self,Type):
self.add_query_param('Type',Type)
def get_Enabled(self):
return self.get_query_params().get('Enabled')
def set_Enabled(self,Enabled):
self.add_query_param('Enabled',Enabled)
def get_FuzzyJoinEui(self):
return self.get_query_params().get('FuzzyJoinEui')
def set_FuzzyJoinEui(self,FuzzyJoinEui):
self.add_query_param('FuzzyJoinEui',FuzzyJoinEui)
def get_FuzzyJoinPermissionName(self):
return self.get_query_params().get('FuzzyJoinPermissionName')
def set_FuzzyJoinPermissionName(self,FuzzyJoinPermissionName):
self.add_query_param('FuzzyJoinPermissionName',FuzzyJoinPermissionName)
def get_BoundNodeGroup(self):
return self.get_query_params().get('BoundNodeGroup')
def set_BoundNodeGroup(self,BoundNodeGroup):
self.add_query_param('BoundNodeGroup',BoundNodeGroup)
def get_FuzzyOwnerAliyunId(self):
return self.get_query_params().get('FuzzyOwnerAliyunId')
def set_FuzzyOwnerAliyunId(self,FuzzyOwnerAliyunId):
self.add_query_param('FuzzyOwnerAliyunId',FuzzyOwnerAliyunId) | 35.911765 | 93 | 0.777232 |
ded1c3b369c844e37ba6f31ee037e0dc4bda3660 | 123 | py | Python | Thefirstandlastoccurence.py | Arnabsaha6/Snakify | df0c1112ae8a56a275044f786bfd89f746e3ca85 | [
"MIT"
] | null | null | null | Thefirstandlastoccurence.py | Arnabsaha6/Snakify | df0c1112ae8a56a275044f786bfd89f746e3ca85 | [
"MIT"
] | null | null | null | Thefirstandlastoccurence.py | Arnabsaha6/Snakify | df0c1112ae8a56a275044f786bfd89f746e3ca85 | [
"MIT"
] | null | null | null | Code:
s = input()
if s.count('f') == 1:
print(s.find('f'))
elif s.count('f') >= 2:
print(s.find('f'), s.rfind('f')) | 20.5 | 36 | 0.495935 |
6f2142fbe976ca5b5ece92226b1b0d8fb8097ff8 | 4,743 | bzl | Python | dev-infra/bazel/api-golden/index.bzl | yuchenghu/angular-cn | e926e3dab8cc304b537aecd703e756a2de90448f | [
"MIT"
] | 17 | 2021-04-29T11:00:59.000Z | 2021-05-25T10:11:24.000Z | dev-infra/bazel/api-golden/index.bzl | yuchenghu/angular-cn | e926e3dab8cc304b537aecd703e756a2de90448f | [
"MIT"
] | 1 | 2021-04-05T16:16:34.000Z | 2021-04-05T16:16:34.000Z | dev-infra/bazel/api-golden/index.bzl | yuchenghu/angular-cn | e926e3dab8cc304b537aecd703e756a2de90448f | [
"MIT"
] | 2 | 2021-01-13T02:18:19.000Z | 2021-01-13T02:22:38.000Z | load("//dev-infra/bazel:extract_js_module_output.bzl", "extract_js_module_output")
load("@build_bazel_rules_nodejs//:index.bzl", "nodejs_binary", "nodejs_test")
nodejs_test_args = [
# Needed so that node doesn't walk back to the source directory.
# From there, the relative imports would point to .ts files.
"--node_options=--preserve-symlinks",
# TODO(josephperrott): update dependency usages to no longer need bazel patch module resolver
# See: https://github.com/bazelbuild/rules_nodejs/wiki#--bazel_patch_module_resolver-now-defaults-to-false-2324
"--bazel_patch_module_resolver",
]
default_strip_export_pattern = "^ɵ(?!ɵdefineInjectable|ɵinject|ɵInjectableDef)"
"""Escapes a Regular expression so that it can be passed as process argument."""
def _escape_regex_for_arg(value):
return "\"%s\"" % value
"""
Extracts type names from a list of NPM type targets.
For example: Consider the `@npm//@types/node` target. This function extracts `node`
from the label. This is needed so that the Node types can be wired up within a
TypeScript program using the `types` tsconfig option.
"""
def extract_type_names_from_labels(type_targets):
type_names = []
for type_target in type_targets:
type_package = Label(type_target).package
if (type_package.startswith("@types/")):
type_names.append(type_package[len("@types/"):])
else:
fail("Expected type target to match the following format: " +
"`@<npm_workspace>//@types/<name>`, but got: %s" % type_target)
return type_names
"""
Builds an API report for the specified entry-point and compares it against the
specified golden
"""
def api_golden_test(
name,
golden,
entry_point,
data = [],
strip_export_pattern = default_strip_export_pattern,
types = [],
**kwargs):
quoted_export_pattern = _escape_regex_for_arg(strip_export_pattern)
kwargs["tags"] = kwargs.get("tags", []) + ["api_guard"]
# For API golden tests not running against a NPM package, we extract all transitive
# declarations of the specified `data` targets. This is necessary because API extractor
# needs to resolve other targets that have been linked by the Bazel NodeJS rules. The
# linker by default only provides access to JavaScript sources, but the API extractor is
# specifically concerned with type definitions that we can extract manually here.
extract_js_module_output(
name = "%s_data_typings" % name,
deps = data,
provider = "JSModuleInfo",
include_declarations = True,
include_default_files = False,
)
test_data = ["//dev-infra/bazel/api-golden", "//:package.json", ":%s_data_typings" % name] + \
data + types
nodejs_test(
name = name,
data = test_data,
entry_point = "//dev-infra/bazel/api-golden:index.ts",
templated_args = nodejs_test_args + [golden, entry_point, "false", quoted_export_pattern] +
extract_type_names_from_labels(types),
**kwargs
)
nodejs_binary(
name = name + ".accept",
testonly = True,
data = test_data,
entry_point = "//dev-infra/bazel/api-golden:index.ts",
templated_args = nodejs_test_args + [golden, entry_point, "true", quoted_export_pattern] +
extract_type_names_from_labels(types),
**kwargs
)
"""
Builds an API report for all entrypoints within the given NPM package and compares it
against goldens within the specified directory.
"""
def api_golden_test_npm_package(
name,
golden_dir,
npm_package,
data = [],
strip_export_pattern = default_strip_export_pattern,
types = [],
**kwargs):
quoted_export_pattern = _escape_regex_for_arg(strip_export_pattern)
kwargs["tags"] = kwargs.get("tags", []) + ["api_guard"]
nodejs_test(
name = name,
data = ["//dev-infra/bazel/api-golden"] + data + types,
entry_point = "//dev-infra/bazel/api-golden:index_npm_packages.ts",
templated_args = nodejs_test_args + [golden_dir, npm_package, "false", quoted_export_pattern] +
extract_type_names_from_labels(types),
**kwargs
)
nodejs_binary(
name = name + ".accept",
testonly = True,
data = ["//dev-infra/bazel/api-golden"] + data + types,
entry_point = "//dev-infra/bazel/api-golden:index_npm_packages.ts",
templated_args = nodejs_test_args + [golden_dir, npm_package, "true", quoted_export_pattern] +
extract_type_names_from_labels(types),
**kwargs
)
| 37.054688 | 115 | 0.660342 |
f7d7be3b8ae1f41fd7761fc57ac06133e0ef5914 | 4,193 | py | Python | wiggle/wiggle.py | gatechzhu/wiggle | 664c1909ae768456e662e249fb51c5899334fc3b | [
"MIT"
] | 15 | 2017-07-20T12:55:44.000Z | 2021-12-17T04:10:30.000Z | wiggle/wiggle.py | gatechzhu/wiggle | 664c1909ae768456e662e249fb51c5899334fc3b | [
"MIT"
] | null | null | null | wiggle/wiggle.py | gatechzhu/wiggle | 664c1909ae768456e662e249fb51c5899334fc3b | [
"MIT"
] | 4 | 2018-02-02T03:44:35.000Z | 2021-02-26T11:37:58.000Z | import numpy as np
import matplotlib.pyplot as plt
def insert_zeros(trace, tt=None):
"""Insert zero locations in data trace and tt vector based on linear fit"""
if tt is None:
tt = np.arange(len(trace))
# Find zeros
zc_idx = np.where(np.diff(np.signbit(trace)))[0]
x1 = tt[zc_idx]
x2 = tt[zc_idx + 1]
y1 = trace[zc_idx]
y2 = trace[zc_idx + 1]
a = (y2 - y1) / (x2 - x1)
tt_zero = x1 - y1 / a
# split tt and trace
tt_split = np.split(tt, zc_idx + 1)
trace_split = np.split(trace, zc_idx + 1)
tt_zi = tt_split[0]
trace_zi = trace_split[0]
# insert zeros in tt and trace
for i in range(len(tt_zero)):
tt_zi = np.hstack(
(tt_zi, np.array([tt_zero[i]]), tt_split[i + 1]))
trace_zi = np.hstack(
(trace_zi, np.zeros(1), trace_split[i + 1]))
return trace_zi, tt_zi
def wiggle_input_check(data, tt, xx, sf, verbose):
''' Helper function for wiggle() and traces() to check input
'''
# Input check for verbose
if not isinstance(verbose, bool):
raise TypeError("verbose must be a bool")
# Input check for data
if type(data).__module__ != np.__name__:
raise TypeError("data must be a numpy array")
if len(data.shape) != 2:
raise ValueError("data must be a 2D array")
# Input check for tt
if tt is None:
tt = np.arange(data.shape[0])
if verbose:
print("tt is automatically generated.")
print(tt)
else:
if type(tt).__module__ != np.__name__:
raise TypeError("tt must be a numpy array")
if len(tt.shape) != 1:
raise ValueError("tt must be a 1D array")
if tt.shape[0] != data.shape[0]:
raise ValueError("tt must have same as data's rows")
# Input check for xx
if xx is None:
xx = np.arange(data.shape[1])
if verbose:
print("xx is automatically generated.")
print(xx)
else:
if type(xx).__module__ != np.__name__:
raise TypeError("tt must be a numpy array")
if len(xx.shape) != 1:
raise ValueError("tt must be a 1D array")
if tt.shape[0] != data.shape[0]:
raise ValueError("tt must have same as data's rows")
if verbose:
print(xx)
# Input check for streth factor (sf)
if not isinstance(sf, (int, float)):
raise TypeError("Strech factor(sf) must be a number")
# Compute trace horizontal spacing
ts = np.min(np.diff(xx))
# Rescale data by trace_spacing and strech_factor
data_max_std = np.max(np.std(data, axis=0))
data = data / data_max_std * ts * sf
return data, tt, xx, ts
def wiggle(data, tt=None, xx=None, color='k', sf=0.15, verbose=False):
'''Wiggle plot of a sesimic data section
Syntax examples:
wiggle(data)
wiggle(data, tt)
wiggle(data, tt, xx)
wiggle(data, tt, xx, color)
fi = wiggle(data, tt, xx, color, sf, verbose)
Use the column major order for array as in Fortran to optimal performance.
The following color abbreviations are supported:
========== ========
character color
========== ========
'b' blue
'g' green
'r' red
'c' cyan
'm' magenta
'y' yellow
'k' black
'w' white
========== ========
'''
# Input check
data, tt, xx, ts = wiggle_input_check(data, tt, xx, sf, verbose)
# Plot data using matplotlib.pyplot
Ntr = data.shape[1]
ax = plt.gca()
for ntr in range(Ntr):
trace = data[:, ntr]
offset = xx[ntr]
if verbose:
print(offset)
trace_zi, tt_zi = insert_zeros(trace, tt)
ax.fill_betweenx(tt_zi, offset, trace_zi + offset,
where=trace_zi >= 0,
facecolor=color)
ax.plot(trace_zi + offset, tt_zi, color)
ax.set_xlim(xx[0] - ts, xx[-1] + ts)
ax.set_ylim(tt[0], tt[-1])
ax.invert_yaxis()
if __name__ == '__main__':
data = np.random.randn(1000, 100)
wiggle(data)
plt.show() | 27.051613 | 79 | 0.55855 |
4301d7478c8d2ea7b163e4391cdac8d81b1ad7ef | 711 | py | Python | examples/ManagedApp.py | tescalada/npyscreen-restructure | 0833bbbdec18439182f102d2147f3756fa98aadd | [
"BSD-2-Clause"
] | 2 | 2015-01-12T14:47:19.000Z | 2018-10-03T09:27:22.000Z | examples/ManagedApp.py | tescalada/npyscreen-restructure | 0833bbbdec18439182f102d2147f3756fa98aadd | [
"BSD-2-Clause"
] | null | null | null | examples/ManagedApp.py | tescalada/npyscreen-restructure | 0833bbbdec18439182f102d2147f3756fa98aadd | [
"BSD-2-Clause"
] | 1 | 2020-03-20T20:19:33.000Z | 2020-03-20T20:19:33.000Z | #!/usr/bin/env python
# encoding: utf-8
"""
ExampleManaged.py
Created by Nicholas Cole on 2007-02-22.
"""
import npyscreen, curses
class MyTestApp(npyscreen.NPSAppManaged):
def onStart(self):
self.registerForm("MAIN", MainForm())
class MainForm(npyscreen.Form):
def create(self):
self.add(npyscreen.TitleText, name = "Text:", value= "Press Escape to quit application" )
self.how_exited_handers[npyscreen.wgwidget.EXITED_ESCAPE] = self.exit_application
def exit_application(self):
curses.beep()
self.parent_app.setNextForm(None)
self.editing = False
def main():
TA = MyTestApp()
TA.run()
if __name__ == '__main__':
main()
| 21.545455 | 97 | 0.66526 |
d224ce42847ee92d46b1f471ee1226b4c7f69232 | 19,801 | py | Python | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 64 | 2016-10-20T15:47:18.000Z | 2021-11-11T11:57:32.000Z | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 126 | 2016-10-05T10:36:14.000Z | 2019-05-15T08:43:23.000Z | napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/__init__.py | ckishimo/napalm-yang | 8f2bd907bd3afcde3c2f8e985192de74748baf6c | [
"Apache-2.0"
] | 63 | 2016-11-07T15:23:08.000Z | 2021-09-22T14:41:16.000Z | # -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/ebgp-multihop/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to eBGP multihop for the
BGP group
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled", "__multihop_ttl")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__multihop_ttl = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multihop-ttl",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"ebgp-multihop",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/enabled (boolean)
YANG Description: When enabled the referenced group or neighbors are permitted
to be indirectly connected - including cases where the TTL
can be decremented between the BGP peers
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When enabled the referenced group or neighbors are permitted
to be indirectly connected - including cases where the TTL
can be decremented between the BGP peers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_multihop_ttl(self):
"""
Getter method for multihop_ttl, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/multihop_ttl (uint8)
YANG Description: Time-to-live value to use when packets are sent to the
referenced group or neighbors and ebgp-multihop is enabled
"""
return self.__multihop_ttl
def _set_multihop_ttl(self, v, load=False):
"""
Setter method for multihop_ttl, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/multihop_ttl (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_multihop_ttl is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_multihop_ttl() directly.
YANG Description: Time-to-live value to use when packets are sent to the
referenced group or neighbors and ebgp-multihop is enabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multihop-ttl",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """multihop_ttl must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="multihop-ttl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__multihop_ttl = t
if hasattr(self, "_set"):
self._set()
def _unset_multihop_ttl(self):
self.__multihop_ttl = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multihop-ttl",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
multihop_ttl = __builtin__.property(_get_multihop_ttl, _set_multihop_ttl)
_pyangbind_elements = OrderedDict(
[("enabled", enabled), ("multihop_ttl", multihop_ttl)]
)
class config(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/ebgp-multihop/config. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Configuration parameters relating to eBGP multihop for the
BGP group
"""
__slots__ = ("_path_helper", "_extmethods", "__enabled", "__multihop_ttl")
_yang_name = "config"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
self.__multihop_ttl = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multihop-ttl",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"ebgp-multihop",
"config",
]
def _get_enabled(self):
"""
Getter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/enabled (boolean)
YANG Description: When enabled the referenced group or neighbors are permitted
to be indirectly connected - including cases where the TTL
can be decremented between the BGP peers
"""
return self.__enabled
def _set_enabled(self, v, load=False):
"""
Setter method for enabled, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/enabled (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_enabled is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enabled() directly.
YANG Description: When enabled the referenced group or neighbors are permitted
to be indirectly connected - including cases where the TTL
can be decremented between the BGP peers
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """enabled must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="enabled", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=True)""",
}
)
self.__enabled = t
if hasattr(self, "_set"):
self._set()
def _unset_enabled(self):
self.__enabled = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="enabled",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=True,
)
def _get_multihop_ttl(self):
"""
Getter method for multihop_ttl, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/multihop_ttl (uint8)
YANG Description: Time-to-live value to use when packets are sent to the
referenced group or neighbors and ebgp-multihop is enabled
"""
return self.__multihop_ttl
def _set_multihop_ttl(self, v, load=False):
"""
Setter method for multihop_ttl, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/ebgp_multihop/config/multihop_ttl (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_multihop_ttl is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_multihop_ttl() directly.
YANG Description: Time-to-live value to use when packets are sent to the
referenced group or neighbors and ebgp-multihop is enabled
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multihop-ttl",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """multihop_ttl must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), is_leaf=True, yang_name="multihop-ttl", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=True)""",
}
)
self.__multihop_ttl = t
if hasattr(self, "_set"):
self._set()
def _unset_multihop_ttl(self):
self.__multihop_ttl = YANGDynClass(
base=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
is_leaf=True,
yang_name="multihop-ttl",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=True,
)
enabled = __builtin__.property(_get_enabled, _set_enabled)
multihop_ttl = __builtin__.property(_get_multihop_ttl, _set_multihop_ttl)
_pyangbind_elements = OrderedDict(
[("enabled", enabled), ("multihop_ttl", multihop_ttl)]
)
| 40.327902 | 423 | 0.608858 |
4661e025308b6a6034d3d7a0dcfcdf513554b5b1 | 1,930 | py | Python | sdk/python/pulumi_azure_native/resources/v20201001/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 31 | 2020-09-21T09:41:01.000Z | 2021-02-26T13:21:59.000Z | sdk/python/pulumi_azure_native/resources/v20201001/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 231 | 2020-09-21T09:38:45.000Z | 2021-03-01T11:16:03.000Z | sdk/python/pulumi_azure_native/resources/v20201001/_enums.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | 4 | 2020-09-29T14:14:59.000Z | 2021-02-10T20:38:16.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'DeploymentMode',
'ExpressionEvaluationOptionsScopeType',
'ManagedServiceIdentityType',
'OnErrorDeploymentType',
'ResourceIdentityType',
'ScriptType',
]
class DeploymentMode(str, Enum):
"""
The mode that is used to deploy resources. This value can be either Incremental or Complete. In Incremental mode, resources are deployed without deleting existing resources that are not included in the template. In Complete mode, resources are deployed and existing resources in the resource group that are not included in the template are deleted. Be careful when using Complete mode as you may unintentionally delete resources.
"""
INCREMENTAL = "Incremental"
COMPLETE = "Complete"
class ExpressionEvaluationOptionsScopeType(str, Enum):
"""
The scope to be used for evaluation of parameters, variables and functions in a nested template.
"""
NOT_SPECIFIED = "NotSpecified"
OUTER = "Outer"
INNER = "Inner"
class ManagedServiceIdentityType(str, Enum):
"""
Type of the managed identity.
"""
USER_ASSIGNED = "UserAssigned"
class OnErrorDeploymentType(str, Enum):
"""
The deployment on error behavior type. Possible values are LastSuccessful and SpecificDeployment.
"""
LAST_SUCCESSFUL = "LastSuccessful"
SPECIFIC_DEPLOYMENT = "SpecificDeployment"
class ResourceIdentityType(str, Enum):
"""
The identity type.
"""
SYSTEM_ASSIGNED = "SystemAssigned"
USER_ASSIGNED = "UserAssigned"
SYSTEM_ASSIGNED_USER_ASSIGNED = "SystemAssigned, UserAssigned"
NONE = "None"
class ScriptType(str, Enum):
"""
Type of the script.
"""
AZURE_POWER_SHELL = "AzurePowerShell"
AZURE_CLI = "AzureCLI"
| 29.692308 | 433 | 0.714508 |
5378dce43b6ac085a35ed1a3b8f5b8a948caa0f2 | 496 | py | Python | cloudrunner_server/version.py | ttrifonov/cloudrunner-server | 3b2426c8d9987e78425899010b534afc7734d8d4 | [
"Apache-2.0"
] | 2 | 2016-03-31T08:45:29.000Z | 2021-04-28T15:18:45.000Z | cloudrunner_server/version.py | ttrifonov/cloudrunner-server | 3b2426c8d9987e78425899010b534afc7734d8d4 | [
"Apache-2.0"
] | null | null | null | cloudrunner_server/version.py | ttrifonov/cloudrunner-server | 3b2426c8d9987e78425899010b534afc7734d8d4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# /*******************************************************
# * Copyright (C) 2013-2014 CloudRunner.io <info@cloudrunner.io>
# *
# * Proprietary and confidential
# * This file is part of CloudRunner Server.
# *
# * CloudRunner Server can not be copied and/or distributed
# * without the express permission of CloudRunner.io
# *******************************************************/
VERSION = '1.6.2'
| 31 | 65 | 0.528226 |
283da40436d0049da146ec666cd79375edc6e44b | 2,644 | py | Python | pal/runner/intel_64bit_runner.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 26 | 2020-01-06T23:53:17.000Z | 2022-02-01T08:58:21.000Z | pal/runner/intel_64bit_runner.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 30 | 2019-11-13T00:55:22.000Z | 2022-01-06T08:09:35.000Z | pal/runner/intel_64bit_runner.py | mars-research/pal | 5977394cda8750ff5dcb89c2bf193ec1ef4cd137 | [
"MIT"
] | 14 | 2019-11-15T16:56:22.000Z | 2021-12-22T10:14:17.000Z | import os
import copy
from pal.runner.abstract_runner import AbstractRunner
from pal.parser import parse_registers
from pal.parser import parse_instructions
class Intel64bitRunner(AbstractRunner):
def run(self, generators):
input_root = self.config.pal_data_dir
output_root = self.config.pal_output_dir
if self.config.language == "c" or self.config.language == "c++11":
output_root = os.path.join(output_root, "include/pal")
if self.config.language == "rust":
output_root = os.path.join(output_root, "src")
libfile = os.path.join(output_root, "lib.rs")
if not os.path.exists(output_root):
os.makedirs(output_root)
if not os.path.exists(libfile):
with open(libfile, 'w'): pass
for generator in generators:
indir = os.path.join(input_root, "intel/register/control_register")
outdir = os.path.join(output_root, "control_register")
os.makedirs(outdir, exist_ok=True)
regs = parse_registers(indir)
generator.generate_registers(copy.deepcopy(regs), outdir)
indir = os.path.join(input_root, "intel/register/cpuid")
outdir = os.path.join(output_root, "cpuid")
os.makedirs(outdir, exist_ok=True)
regs = parse_registers(indir)
generator.generate_registers(copy.deepcopy(regs), outdir)
indir = os.path.join(input_root, "intel/register/msr")
outdir = os.path.join(output_root, "msr")
os.makedirs(outdir, exist_ok=True)
regs = parse_registers(indir)
generator.generate_registers(copy.deepcopy(regs), outdir)
indir = os.path.join(input_root, "intel/register/vmcs")
outdir = os.path.join(output_root, "vmcs")
os.makedirs(outdir, exist_ok=True)
regs = parse_registers(indir)
generator.generate_registers(copy.deepcopy(regs), outdir)
indir = os.path.join(input_root, "intel/instruction/architectural")
outdir = os.path.join(output_root, "instruction")
os.makedirs(outdir, exist_ok=True)
instructions = parse_instructions(indir)
generator.generate_instructions(copy.deepcopy(instructions), outdir)
indir = os.path.join(input_root, "intel/instruction/logical")
outdir = os.path.join(output_root, "instruction")
os.makedirs(outdir, exist_ok=True)
instructions = parse_instructions(indir)
generator.generate_instructions(copy.deepcopy(instructions), outdir)
| 43.344262 | 80 | 0.642209 |
008504d7a8f0caeb399718724b18a4fd1e4f0720 | 6,525 | py | Python | app/api/v1/statistics/image.py | kalraid/HoloLibraryAPI | 5470b279ae6922f85353c8ecf73a9fc13d83552a | [
"Apache-2.0"
] | 1 | 2022-01-27T02:30:39.000Z | 2022-01-27T02:30:39.000Z | app/api/v1/statistics/image.py | kalraid/HoloLibraryAPI | 5470b279ae6922f85353c8ecf73a9fc13d83552a | [
"Apache-2.0"
] | 31 | 2021-09-20T01:21:24.000Z | 2022-02-08T01:29:18.000Z | app/api/v1/statistics/image.py | kalraid/HoloLibraryAPI | 5470b279ae6922f85353c8ecf73a9fc13d83552a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import json
from falcon.asgi import Request, WebSocket
from falcon.errors import WebSocketDisconnected
import log
from app.api.common import BaseResource
from app.database import get_session
from app.model import DrawStatistics, HoloTwitterDraw, HoloTwitterCustomDraw, DrawStatisticsMenual, \
DrawStatisticsMenualHistory
LOG = log.get_logger()
db_session = get_session()
class Count(BaseResource):
"""
Handle for endpoint: /v1/statistics/count/image
"""
async def on_websocket(self, req: Request, socket: WebSocket):
statistics_connection_clients = []
LOG.info("----- /v1/statistics/count/image statistic websocket init start ---------")
LOG.info(req)
if 'statistics' not in socket.subprotocols:
return
try:
await socket.accept(subprotocol='statistics')
statistics_connection_clients.append(socket)
LOG.info(statistics_connection_clients)
except WebSocketDisconnected:
LOG.error("statistic websocket WebSocketDisconnected - not accepted")
if socket in statistics_connection_clients:
statistics_connection_clients.remove(socket)
LOG.info(statistics_connection_clients)
return
while True:
try:
statistics_object = await socket.receive_media()
LOG.info(statistics_object)
event = statistics_object['message']
if not statistics_object['data'] or 'index' not in statistics_object['data']:
return
if event in DrawStatistics().get_auto_event_names(): # click, download, disable
drawStatistics = DrawStatistics()
drawStatistics.event = event
if event == 'disable':
if 'base' in statistics_object['type']:
holoTwitterDraw = HoloTwitterDraw() \
.get_by_id(db_session, statistics_object['data']['index'])
holoTwitterDraw.isUse = 'N'
elif 'custom' in statistics_object['type']:
holoTwitterCustomDraw = HoloTwitterCustomDraw() \
.get_by_id(db_session, statistics_object['data']['index'])
holoTwitterCustomDraw.isUse = 'N'
drawStatistics.user_uuid = statistics_object['user']
if statistics_object['type'] == 'base':
drawStatistics.holo_twitter_draw_id = statistics_object['data']['index']
elif statistics_object['type'] == 'custom':
drawStatistics.holo_twitter_custom_draw_id = statistics_object['data']['index']
db_session.add(drawStatistics)
elif event in DrawStatisticsMenualHistory().get_manual_event_names(): # like, dislike, adult, ban
drawStatisticsMenualHistory = DrawStatisticsMenualHistory()
drawStatisticsMenualHistory.event = event
draw_type = statistics_object['drawType']
if event == 'ban': # isUse - N
if 'first' in draw_type:
holoTwitterDraw = HoloTwitterDraw() \
.get_by_id(db_session, statistics_object['data']['index'])
holoTwitterDraw.isUse = 'N'
elif 'second' in draw_type:
holoTwitterCustomDraw = HoloTwitterCustomDraw() \
.get_by_id(db_session, statistics_object['data']['index'])
holoTwitterCustomDraw.isUse = 'N'
drawStatisticsMenualHistory.user_uuid = statistics_object['user']
draw_id = statistics_object['data']['index']
if 'first' in draw_type:
drawStatisticsMenualHistory.holo_twitter_draw_id = draw_id
elif 'second' in draw_type:
drawStatisticsMenualHistory.holo_twitter_custom_draw_id = draw_id
save_count(db_session, draw_id, draw_type, event)
db_session.add(drawStatisticsMenualHistory)
db_session.commit()
except WebSocketDisconnected:
LOG.error("statistic websocket WebSocketDisconnected")
if socket in statistics_connection_clients:
statistics_connection_clients.remove(socket)
LOG.info(statistics_connection_clients)
return
except TypeError:
LOG.error("statistic websocket TypeError")
pass
except json.JSONDecodeError:
LOG.error("statistic websocket JSONDecodeError")
pass
def save_count(session, draw_id, draw_type, event):
if 'first' in draw_type:
drawStaticsMenual = session.query(DrawStatisticsMenual) \
.filter(DrawStatisticsMenual.holo_twitter_draw_id == draw_id) \
.filter(DrawStatisticsMenual.event == event).first()
elif 'second' in draw_type:
drawStaticsMenual = session.query(DrawStatisticsMenual) \
.filter(DrawStatisticsMenual.holo_twitter_custom_draw_id == draw_id) \
.filter(DrawStatisticsMenual.event == event).first()
if drawStaticsMenual is None :
drawStaticsMenual = DrawStatisticsMenual()
drawStaticsMenual.event = event
if 'first' in draw_type:
drawStaticsMenual.holo_twitter_draw_id = draw_id
elif 'second' in draw_type:
drawStaticsMenual.holo_twitter_custom_draw_id = draw_id
if event == 'like': # like ++
drawStaticsMenual.like = 1
elif event == 'dislike': # dislike ++
drawStaticsMenual.dislike = 1
elif event == 'adult': # adult ++
drawStaticsMenual.ban = 1
elif event == 'ban': # ban' ++
drawStaticsMenual.adult = 1
session.add(drawStaticsMenual)
else:
if event == 'like': # like ++
drawStaticsMenual.like += 1
elif event == 'dislike': # dislike ++
drawStaticsMenual.dislike += 1
elif event == 'adult': # adult ++
drawStaticsMenual.ban = 1
elif event == 'ban': # ban' ++
drawStaticsMenual.adult = 1 | 42.647059 | 114 | 0.583602 |
f624a7acd674d5bfcb80b6847c5eef0d2305f4f2 | 21,942 | py | Python | finch/processes/utils.py | cjauvin/finch | 02647cac669ac01572203ded7a8384f3324f4e0e | [
"Apache-2.0"
] | null | null | null | finch/processes/utils.py | cjauvin/finch | 02647cac669ac01572203ded7a8384f3324f4e0e | [
"Apache-2.0"
] | null | null | null | finch/processes/utils.py | cjauvin/finch | 02647cac669ac01572203ded7a8384f3324f4e0e | [
"Apache-2.0"
] | null | null | null | from datetime import timedelta
import logging
from multiprocessing.pool import ThreadPool
from pathlib import Path
import re
import json
from typing import (
Callable,
Deque,
Dict,
Generator,
Iterable,
List,
Optional,
Tuple,
Union,
)
import zipfile
import cftime
import netCDF4
import numpy as np
import pandas as pd
from pywps import (
BoundingBoxInput,
BoundingBoxOutput,
ComplexInput,
ComplexOutput,
FORMATS,
LiteralInput,
LiteralOutput,
Process,
configuration
)
from pywps.inout.outputs import MetaFile, MetaLink4
import requests
from requests.exceptions import ConnectionError, InvalidSchema, MissingSchema
import sentry_sdk
import xarray as xr
from netCDF4 import num2date
import xclim
LOGGER = logging.getLogger("PYWPS")
PywpsInput = Union[LiteralInput, ComplexInput, BoundingBoxInput]
PywpsOutput = Union[LiteralOutput, ComplexOutput, BoundingBoxOutput]
RequestInputs = Dict[str, Deque[PywpsInput]]
# These are parameters that set options. They are not `compute` arguments.
INDICATOR_OPTIONS = ['check_missing', 'missing_options', "cf_compliance", "data_validation"]
def log_file_path(process: Process) -> Path:
"""Returns the filepath to write the process logfile."""
return Path(process.workdir) / "log.txt"
def write_log(
process: Process,
message: str,
level=logging.INFO,
*,
process_step: str = None,
subtask_percentage: int = None,
):
"""Log the process status.
- With the logging module
- To a log file stored in the process working directory
- Update the response document with the message and the status percentage
subtask_percentage: not the percentage of the whole process, but the percent done
in the current processing step. (see `process.status_percentage_steps`)
"""
LOGGER.log(level, message)
status_percentage = process.response.status_percentage
# if a process_step is given, set this as the status percentage
if process_step:
status_percentage = process.status_percentage_steps.get(
process_step, status_percentage
)
# if a subtask percentage is given, add this value to the status_percentage
if subtask_percentage is not None:
steps_percentages = list(process.status_percentage_steps.values())
for n, percent in enumerate(steps_percentages):
if status_percentage < percent:
next_step_percentage = percent
current_step_percentage = steps_percentages[n - 1]
break
else:
current_step_percentage, next_step_percentage = 1, 100
if steps_percentages:
current_step_percentage = steps_percentages[-1]
step_delta = next_step_percentage - current_step_percentage
sub_percentage = subtask_percentage / 100 * step_delta
status_percentage = current_step_percentage + int(sub_percentage)
if level >= logging.INFO:
log_file_path(process).open("a", encoding="utf8").write(message + "\n")
try:
process.response.update_status(message, status_percentage=status_percentage)
except AttributeError:
pass
def get_attributes_from_config():
"""Get all explicitly passed metadata attributes from the config, in section finch:metadata."""
# Remove all "defaults", only keep explicitly-passed options
# This works because we didn't define any defaults for this section.
# But will do strange things if any of the defaults have the same name as a passed field
# This is especially risky, since ALL environment variables are listed in the defaults...
names = (
set(configuration.CONFIG['finch:metadata'].keys())
- set(configuration.CONFIG._defaults.keys())
)
return {
name: configuration.get_config_value("finch:metadata", name) for name in names
}
def compute_indices(
process: Process, func: Callable, inputs: RequestInputs
) -> xr.Dataset:
kwds = {}
global_attributes = {}
for name, input_queue in inputs.items():
if isinstance(input_queue[0], LiteralInput):
value = [inp.data for inp in input_queue]
if len(input_queue) == 1:
value = value[0]
kwds[name] = value
variable = kwds.pop("variable", None)
for name, input_queue in inputs.items():
input = input_queue[0]
if isinstance(input, ComplexInput):
if input.supported_formats[0] == FORMATS.JSON:
kwds[name] = json.loads(input.data)
elif input.supported_formats[0] in [FORMATS.NETCDF, FORMATS.DODS]:
ds = try_opendap(input, logging_function=lambda msg: write_log(process, msg))
global_attributes = global_attributes or ds.attrs
vars = list(ds.data_vars.values())
if variable:
if variable in ds.data_vars:
kwds[name] = ds.data_vars[variable]
else:
raise KeyError(
f"Variable name '{name}' not in data_vars {list(ds.data_vars)}"
)
else:
# Get variable matching input parameter name.
if name in ds.data_vars:
kwds[name] = ds.data_vars[name]
# If only one variable in dataset, use it.
elif len(vars) == 1:
kwds[name] = vars[0]
user_attrs = get_attributes_from_config()
global_attributes.update(
{
"climateindex_package_id": "https://github.com/Ouranosinc/xclim",
"product": "derived climate index",
},
**user_attrs
)
options = {name: kwds.pop(name) for name in INDICATOR_OPTIONS if name in kwds}
with xclim.core.options.set_options(**options):
out = func(**kwds)
output_dataset = xr.Dataset(
data_vars=None, coords=out.coords, attrs=global_attributes
)
# fix frequency of computed output (xclim should handle this)
if output_dataset.attrs.get("frequency") == "day" and "freq" in kwds:
conversions = {
"YS": "yr",
"MS": "mon",
"QS-DEC": "seasonal",
"AS-JUL": "seasonal",
}
output_dataset.attrs["frequency"] = conversions.get(kwds["freq"], "day")
output_dataset[out.name] = out
return output_dataset
def drs_filename(ds: xr.Dataset, variable: str = None):
"""Copied and modified from https://github.com/bird-house/eggshell
which doesn't have a release usable by finch.
generates filename according to the data reference syntax (DRS)
based on the metadata in the resource.
http://cmip-pcmdi.llnl.gov/cmip5/docs/cmip5_data_reference_syntax.pdf
https://pypi.python.org/pypi/drslib
:param variable: appropriate variable for filename, if not set (default), variable will
be determined from the dataset variables.
:return str: DRS filename
:raises KeyError: When the dataset doesn't have the required attributes.
"""
if len(ds.data_vars) == 1:
variable = list(ds.data_vars)[0]
if variable is None:
variable = [k for k, v in ds.variables.items() if len(v.dims) >= 3][0]
variable = variable.replace("_", "-")
# CORDEX example: tas_EUR-11_ICHEC-EC-EARTH_historical_r3i1p1_DMI-HIRHAM5_v1_day
cordex_pattern = "{variable}_{domain}_{driving_model}_{experiment}_{ensemble}_{model}_{version}_{frequency}"
# CMIP5 example: tas_MPI-ESM-LR_historical_r1i1p1
cmip5_pattern = "{variable}_{model}_{experiment}_{ensemble}"
if ds.attrs["project_id"] in ("CORDEX", "EOBS"):
filename = cordex_pattern.format(
variable=variable,
domain=ds.attrs["CORDEX_domain"],
driving_model=ds.attrs["driving_model_id"],
experiment=ds.attrs["experiment_id"],
ensemble=ds.attrs["driving_model_ensemble_member"],
model=ds.attrs["model_id"],
version=ds.attrs["rcm_version_id"],
frequency=ds.attrs["frequency"],
)
elif ds.attrs["project_id"] == "CMIP5":
ensemble = "r{}i{}p{}".format(
ds.attrs["driving_realization"],
ds.attrs["driving_initialization_method"],
ds.attrs["driving_physics_version"],
)
filename = cmip5_pattern.format(
variable=variable,
model=ds.attrs["driving_model_id"],
experiment=ds.attrs["driving_experiment_id"].replace(",", "+"),
ensemble=ensemble,
)
else:
params = [
variable,
ds.attrs.get("frequency"),
ds.attrs.get("model_id"),
ds.attrs.get("driving_model_id"),
ds.attrs.get("experiment_id", "").replace(",", "+"),
ds.attrs.get("driving_experiment_id", "").replace(",", "+"),
]
params = [k for k in params if k]
filename = "_".join(params)
if "time" in ds:
date_from = ds.time[0].values
date_to = ds.time[-1].values
if "units" in ds.time.attrs:
# times are encoded
units = ds.time.units
calendar = ds.time.attrs.get("calendar", "standard")
date_from = num2date(date_from, units, calendar)
date_to = num2date(date_to, units, calendar)
date_from = pd.to_datetime(str(date_from))
date_to = pd.to_datetime(str(date_to))
filename += f"_{date_from:%Y%m%d}-{date_to:%Y%m%d}"
# sanitize any spaces that came from the source input's metadata
filename = filename.replace(" ", "-")
filename += ".nc"
return filename
def try_opendap(
input: ComplexInput,
*,
chunks=None,
decode_times=True,
chunk_dims=None,
logging_function=lambda message: None,
) -> xr.Dataset:
"""Try to open the file as an OPeNDAP url and chunk it.
If OPeNDAP fails, access the file directly.
"""
url = input.url
logging_function(f"Try opening DAP link {url}")
if is_opendap_url(url):
ds = xr.open_dataset(url, chunks=chunks, decode_times=decode_times)
logging_function(f"Opened dataset as an OPeNDAP url: {url}")
else:
if url.startswith("http"):
# Accessing the file property writes it to disk if it's a url
logging_function(f"Downloading dataset for url: {url}")
else:
logging_function(f"Opening as local file: {input.file}")
ds = xr.open_dataset(input.file, chunks=chunks, decode_times=decode_times)
# To handle large number of grid cells (50+) in subsetted data
if "region" in ds.dims and "time" in ds.dims:
chunks = dict(time=-1, region=5)
ds = ds.chunk(chunks)
if not chunks:
ds = ds.chunk(chunk_dataset(ds, max_size=1000000, chunk_dims=chunk_dims))
return ds
def process_threaded(function: Callable, inputs: Iterable):
"""Based on the current configuration, process a list threaded or not."""
threads = int(configuration.get_config_value("finch", "subset_threads"))
if threads > 1:
pool = ThreadPool(processes=threads)
outputs = list(pool.imap_unordered(function, inputs))
pool.close()
pool.join()
else:
outputs = [function(r) for r in inputs]
return outputs
def chunk_dataset(ds, max_size=1000000, chunk_dims=None):
"""Ensures the chunked size of a xarray.Dataset is below a certain size.
Cycle through the dimensions, divide the chunk size by 2 until criteria is met.
If chunk_dims is given, limits the chunking to those dimensions, if they are
found in the dataset.
"""
from functools import reduce
from operator import mul
from itertools import cycle
chunks = dict(ds.sizes)
dims = set(ds.dims).intersection(chunk_dims or ds.dims)
if not dims:
LOGGER.warning(
(f"Provided dimension names for chunking ({chunk_dims}) were "
f"not found in dataset dims ({ds.dims}). No chunking was done.")
)
return chunks
def chunk_size():
return reduce(mul, chunks.values())
for dim in cycle(dims):
if chunk_size() < max_size:
break
chunks[dim] = max(chunks[dim] // 2, 1)
return chunks
def make_metalink_output(
process: Process, files: List[Path], description: str = None
) -> MetaLink4:
"""Make a metalink output from a list of files"""
metalink = MetaLink4(
identity=process.identifier,
description=description,
publisher="Finch",
workdir=process.workdir,
)
for f in files:
mf = MetaFile(identity=f.stem, fmt=FORMATS.NETCDF)
mf.file = str(f)
metalink.append(mf)
return metalink
def is_opendap_url(url):
"""
Check if a provided url is an OpenDAP url.
The DAP Standard specifies that a specific tag must be included in the
Content-Description header of every request. This tag is one of:
"dods-dds" | "dods-das" | "dods-data" | "dods-error"
So we can check if the header starts with `dods`.
Even then, some OpenDAP servers seem to not include the specified header...
So we need to let the netCDF4 library actually open the file.
"""
try:
content_description = requests.head(url, timeout=5).headers.get(
"Content-Description"
)
except (ConnectionError, MissingSchema, InvalidSchema):
return False
if content_description:
return content_description.lower().startswith("dods")
else:
return False
try:
# For a non-DAP URL, this just hangs python.
dataset = netCDF4.Dataset(url)
except OSError:
return False
return dataset.disk_format in ("DAP2", "DAP4")
def single_input_or_none(inputs, identifier) -> Optional[str]:
"""Return first input item."""
try:
return inputs[identifier][0].data
except KeyError:
return None
def netcdf_file_list_to_csv(
netcdf_files: Union[List[Path], List[str]], output_folder, filename_prefix
) -> Tuple[List[str], str]:
"""Write csv files for a list of netcdf files.
Produces one csv file per calendar type, along with a metadata folder in
the output_folder."""
output_folder = Path(output_folder)
output_folder.mkdir(parents=True, exist_ok=True)
def get_attrs_fallback(ds, *args):
for key in args:
try:
return ds.attrs[key]
except KeyError:
continue
raise KeyError(f"Couldn't find any attribute in [{', '.join(args)}]")
metadata = {}
concat_by_calendar = {}
for file in netcdf_files:
ds = xr.open_dataset(str(file), decode_times=False)
calendar = ds.time.calendar
ds["time"] = xr.decode_cf(ds).time
for variable in ds.data_vars:
# for a specific dataset the keys are different:
# BCCAQv2+ANUSPLIN300_BNU-ESM_historical+rcp85_r1i1p1_19500101-21001231
model = get_attrs_fallback(ds, "driving_model_id", "GCM__model_id")
experiment = get_attrs_fallback(
ds, "driving_experiment_id", "GCM__experiment"
)
experiment = experiment.replace(",", "_")
output_variable = f"{variable}_{model}_{experiment}"
units = ds[variable].units
if units:
output_variable += f"_({units})"
ds = ds.rename({variable: output_variable})
df = dataset_to_dataframe(ds)
if calendar not in concat_by_calendar:
if "lat" in df.index.names and "lon" in df.index.names:
df = df.reset_index(["lat", "lon"])
concat_by_calendar[calendar] = [df]
else:
concat_by_calendar[calendar].append(df[output_variable])
metadata[output_variable] = format_metadata(ds)
output_csv_list = []
for calendar_type, data in concat_by_calendar.items():
output_csv = output_folder / f"{filename_prefix}_{calendar_type}.csv"
concat = pd.concat(data, axis=1)
try:
concat = concat.reset_index().set_index("time").drop(columns="region")
except KeyError:
pass
dropna_threshold = 3 # lat + lon + at least one value
concat.dropna(thresh=dropna_threshold, inplace=True)
concat.to_csv(output_csv)
output_csv_list.append(output_csv)
metadata_folder = output_folder / "metadata"
metadata_folder.mkdir(parents=True, exist_ok=True)
for output_variable, info in metadata.items():
metadata_file = metadata_folder / f"{output_variable}.csv"
metadata_file.write_text(info)
return output_csv_list, str(metadata_folder)
def dataset_to_dataframe(ds: xr.Dataset) -> pd.DataFrame:
"""Convert a Dataset, while keeping the hour of the day uniform at hour=12"""
if not np.all(ds.time.dt.hour == 12):
attrs = ds.time.attrs
# np.datetime64 doesn't have the 'replace' method
time_values = ds.time.values
if not hasattr(time_values[0], "replace"):
time_values = pd.to_datetime(time_values)
ds["time"] = [y.replace(hour=12) for y in time_values]
ds.time.attrs = attrs
return ds.to_dataframe()
def format_metadata(ds) -> str:
"""For an xarray dataset, return its formatted metadata."""
def _fmt_attrs(obj, name="", comment="# ", tab=" "):
"""Return string of an object's attribute."""
lines = ["", name]
for key, val in obj.attrs.items():
lines.append(
tab + key + ":: " + str(val).replace("\n", "\n" + comment + tab + " ")
)
out = ("\n" + comment + tab).join(lines)
return out
objs = [
({"": ds}, "Global attributes"),
(ds.coords, "Coordinates"),
(ds.data_vars, "Data variables"),
]
out = ""
for obj, name in objs:
out += "# " + name
tab = "" if name == "Global attributes" else " "
for key, val in obj.items():
out += _fmt_attrs(val, key, tab=tab)
out += "\n#\n"
return out
def zip_files(
output_filename, files: Iterable, log_function: Callable[[str, int], None] = None
):
"""Create a zipfile from a list of files or folders.
log_function is a function that receives a message and a percentage."""
log_function = log_function or (lambda *a: None)
with zipfile.ZipFile(
output_filename, mode="w", compression=zipfile.ZIP_DEFLATED
) as z:
all_files = []
for file in files:
file = Path(file)
if file.is_dir():
all_files += list(file.rglob("*.*"))
else:
all_files.append(file)
common_folder = None
all_parents = [list(reversed(file.parents)) for file in all_files]
for parents in zip(*all_parents):
if len(set(parents)) == 1:
common_folder = parents[0]
else:
break
n_files = len(all_files)
for n, filename in enumerate(all_files):
percentage = int(n / n_files * 100)
message = f"Zipping file {n + 1} of {n_files}"
log_function(message, percentage)
arcname = filename.relative_to(common_folder) if common_folder else None
z.write(filename, arcname=arcname)
def make_tasmin_tasmax_pairs(
filenames: List[Path],
) -> Generator[Tuple[Path, Path], None, None]:
"""Returns pairs of corresponding tasmin-tasmax files based on their filename"""
tasmin_files = [f for f in filenames if "tasmin" in f.name.lower()]
tasmax_files = [f for f in filenames if "tasmax" in f.name.lower()]
for tasmin in tasmin_files[:]:
for tasmax in tasmax_files[:]:
if tasmin.name.lower() == tasmax.name.lower().replace("tasmax", "tasmin"):
yield tasmin, tasmax
tasmax_files.remove(tasmax)
tasmin_files.remove(tasmin)
break
for f in tasmax_files + tasmax_files:
sentry_sdk.capture_message(
f"Couldn't find matching tasmin or tasmax for: {f}", level="error"
)
def fix_broken_time_index(ds: xr.Dataset):
"""Fix for a single broken index in a specific file"""
if "time" not in ds.dims:
return
time_dim = ds.time.values
times_are_encoded = "units" in ds.time.attrs
if times_are_encoded:
wrong_id = np.argwhere(np.isclose(time_dim, 0))
else:
wrong_id = np.argwhere(
time_dim == cftime.DatetimeNoLeap(year=1850, month=1, day=1, hour=0)
)
if not wrong_id:
return
wrong_id = wrong_id[0, 0]
if wrong_id == 0 or wrong_id == len(ds.time) - 1:
return
daily_gap = 1.0 if times_are_encoded else timedelta(days=1)
is_daily = time_dim[wrong_id + 1] - time_dim[wrong_id - 1] == daily_gap * 2
if is_daily:
fixed_time = time_dim
fixed_time[wrong_id] = time_dim[wrong_id - 1] + daily_gap
attrs = ds.time.attrs
ds["time"] = fixed_time
ds.time.attrs = attrs
def dataset_to_netcdf(
ds: xr.Dataset, output_path: Union[Path, str], compression_level=0
) -> None:
"""Write an :class:`xarray.Dataset` dataset to disk, optionally using compression."""
encoding = {}
if "time" in ds.dims:
encoding["time"] = {
"dtype": "single", # better compatibility with OpenDAP in thredds
}
fix_broken_time_index(ds)
if compression_level:
for v in ds.data_vars:
encoding[v] = {"zlib": True, "complevel": compression_level}
ds.to_netcdf(str(output_path), format="NETCDF4", encoding=encoding)
| 33.245455 | 112 | 0.624328 |
a157f4a329b35ee3b506298ef625b31cc824a351 | 4,886 | py | Python | ExaTrkXPlots/performance.py | rlf23240/ExaTrkXPlotting | 777ca91e6b0d4389a643bca166c5413f8fc98ca8 | [
"MIT"
] | null | null | null | ExaTrkXPlots/performance.py | rlf23240/ExaTrkXPlotting | 777ca91e6b0d4389a643bca166c5413f8fc98ca8 | [
"MIT"
] | null | null | null | ExaTrkXPlots/performance.py | rlf23240/ExaTrkXPlotting | 777ca91e6b0d4389a643bca166c5413f8fc98ca8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ExaTrkXPlotting import plot
import sklearn.metrics
@plot('exatrkx.performance.score_distribution', ['truth', 'score'])
def score_distribution(
ax,
data,
log_scale=True,
bins=50,
title=None
):
"""
Plot score distribution for true and fake data.
:param ax: matplotlib axis object.
:param data: Data.
:param log_scale: Enable log scale for y-axis.
:param bins: Bins for histogram
:param title: Plot title.
:return:
"""
score = data['score']
# Truth should be bool array.
# We apply >0.5 in case user pass numerical array.
truth = (data['truth'] > 0.5)
# True target.
ax.hist(
score[truth],
bins=bins,
log=log_scale,
histtype='step',
lw=2,
label='true'
)
# False target.
ax.hist(
score[~truth],
bins=bins,
log=log_scale,
histtype='step',
lw=2,
label='fake'
)
ax.set_xlabel('Model Output')
ax.legend()
if title is not None:
ax.set_title(title)
@plot('exatrkx.performance.roc_curve', ['truth', 'score'])
def score_roc_curve(
ax,
data,
title=None
):
"""
Plot ROC curve.
:param ax: matplotlib axis object.
:param data: Data.
:param title: Plot title. If None, "ROC curve, AUC = {auc:.4f}" will be used.
:return:
"""
score = data['score']
# Truth should be bool array.
# We apply >0.5 in case user pass numerical array.
truth = (data['truth'] > 0.5)
# Compute curve.
if all(tag in data for tag in ['false_positive_rate', 'true_positive_rate']):
# If user pass precompute precision, recall, thresholds,
# we don't need to recompute all of them.
false_positive_rate = data['false_positive_rate']
true_positive_rate = data['true_positive_rate']
else:
# Compute curve.
false_positive_rate, true_positive_rate, _ = sklearn.metrics.roc_curve(truth, score)
auc = sklearn.metrics.auc(
false_positive_rate,
true_positive_rate
)
# ROC curve.
ax.plot(
false_positive_rate,
true_positive_rate,
lw=2
)
# AUC=0.5.
ax.plot([0, 1], [0, 1], '--', lw=2)
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.tick_params(width=2, grid_alpha=0.5)
if title is None:
# If no title, use AUC as title.
ax.set_title(f'ROC curve, AUC = {auc:.4f}')
else:
ax.set_title(title)
@plot('exatrkx.performance.precision_recall_with_threshold', ['truth', 'score'])
def precision_recall_with_threshold(
ax,
data,
title=None
):
"""
Plot precision and recall change with different threshold.
:param ax: matplotlib axis object.
:param data: Data.
:param title: Plot title.
:return:
"""
score = data['score']
# Truth should be bool array.
# We apply >0.5 in case user pass numerical array.
truth = (data['truth'] > 0.5)
# Compute curve.
if all(tag in data for tag in ['precision', 'recall', 'thresholds']):
# If user pass precompute precision, recall, thresholds,
# we don't need to recompute all of them.
precision = data['precision']
recall = data['recall']
thresholds = data['thresholds']
else:
# Compute curve.
precision, recall, thresholds = sklearn.metrics.precision_recall_curve(
truth,
score
)
ax.plot(thresholds, precision[:-1], label='purity', lw=2)
ax.plot(thresholds, recall[:-1], label='efficiency', lw=2)
ax.set_xlabel('Cut on model score')
ax.tick_params(width=2, grid_alpha=0.5)
ax.legend(loc='upper right')
if title is not None:
ax.set_title(title)
@plot('exatrkx.performance.precision_recall', ['truth', 'score'])
def precision_recall_curve(
ax,
data,
title=None
):
"""
Plot precision and recall dependency.
:param ax: matplotlib axis object.
:param data: Data.
:param title: Plot title.
:return:
"""
score = data['score']
# Truth should be bool array.
# We apply >0.5 in case user pass numerical array.
truth = (data['truth'] > 0.5)
if all(tag in data for tag in ['precision', 'recall']):
# If user pass precompute precision, recall, thresholds,
# we don't need to recompute all of them.
precision = data['precision']
recall = data['recall']
else:
# Compute curve.
precision, recall, _ = sklearn.metrics.precision_recall_curve(
truth,
score
)
ax.plot(precision, recall, lw=2)
ax.set_xlabel('Purity')
ax.set_ylabel('Efficiency')
ax.tick_params(width=2, grid_alpha=0.5)
if title is not None:
ax.set_title(title)
| 24.676768 | 92 | 0.602743 |
c4ac886c6c0018757f6c0a51e04b9801244a5ce6 | 623 | py | Python | contrib/qt_translations.py | Nearkitai/Nearkitai | 305125d13a97b9816909d8c0bf14dad337e93bb7 | [
"MIT"
] | null | null | null | contrib/qt_translations.py | Nearkitai/Nearkitai | 305125d13a97b9816909d8c0bf14dad337e93bb7 | [
"MIT"
] | null | null | null | contrib/qt_translations.py | Nearkitai/Nearkitai | 305125d13a97b9816909d8c0bf14dad337e93bb7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Helpful little script that spits out a comma-separated list of
# language codes for Qt icons that should be included
# in binary transfer distributions
import glob
import os
import re
import sys
if len(sys.argv) != 3:
sys.exit("Usage: %s $QTDIR/translations $NEARKITAIDIR/src/qt/locale"%sys.argv[0])
d1 = sys.argv[1]
d2 = sys.argv[2]
l1 = set([ re.search(r'qt_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d1, 'qt_*.qm')) ])
l2 = set([ re.search(r'nearkitai_(.*).qm', f).group(1) for f in glob.glob(os.path.join(d2, 'nearkitai_*.qm')) ])
print ",".join(sorted(l1.intersection(l2)))
| 27.086957 | 112 | 0.685393 |
eedc2ae73017a47f985b8ec32aa1644ef2dbd4a0 | 1,850 | py | Python | mindhome_alpha/erpnext/education/doctype/fees/test_fees.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/education/doctype/fees/test_fees.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/education/doctype/fees/test_fees.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
from frappe.utils import nowdate
from frappe.utils.make_random import get_random
from erpnext.education.doctype.program.test_program import make_program_and_linked_courses
# test_records = frappe.get_test_records('Fees')
class TestFees(unittest.TestCase):
def test_fees(self):
student = get_random("Student")
program = make_program_and_linked_courses("_Test Program 1", ["_Test Course 1", "_Test Course 2"])
fee = frappe.new_doc("Fees")
fee.posting_date = nowdate()
fee.due_date = nowdate()
fee.student = student
fee.receivable_account = "_Test Receivable - _TC"
fee.income_account = "Sales - _TC"
fee.cost_center = "_Test Cost Center - _TC"
fee.company = "_Test Company"
fee.program = program.name
fee.extend("components", [
{
"fees_category": "Tuition Fee",
"amount": 40000
},
{
"fees_category": "Transportation Fee",
"amount": 10000
}])
fee.save()
fee.submit()
gl_entries = frappe.db.sql("""
select account, posting_date, party_type, party, cost_center, fiscal_year, voucher_type,
voucher_no, against_voucher_type, against_voucher, cost_center, company, credit, debit
from `tabGL Entry` where voucher_type=%s and voucher_no=%s""", ("Fees", fee.name), as_dict=True)
if gl_entries[0].account == "_Test Receivable - _TC":
self.assertEqual(gl_entries[0].debit, 50000)
self.assertEqual(gl_entries[0].credit, 0)
self.assertEqual(gl_entries[1].debit, 0)
self.assertEqual(gl_entries[1].credit, 50000)
else:
self.assertEqual(gl_entries[0].credit, 50000)
self.assertEqual(gl_entries[0].debit, 0)
self.assertEqual(gl_entries[1].credit, 0)
self.assertEqual(gl_entries[1].debit, 50000)
| 33.035714 | 100 | 0.728649 |
e3fd0c4837adad01c0b54da00f0f9083520abc70 | 13,140 | py | Python | EffectPFC/Network_BG.py | Olimaol/Cognitive-Learning-Agent | c60a6021432eb3d500cb269d6c49c69c3cf5abe7 | [
"MIT"
] | null | null | null | EffectPFC/Network_BG.py | Olimaol/Cognitive-Learning-Agent | c60a6021432eb3d500cb269d6c49c69c3cf5abe7 | [
"MIT"
] | null | null | null | EffectPFC/Network_BG.py | Olimaol/Cognitive-Learning-Agent | c60a6021432eb3d500cb269d6c49c69c3cf5abe7 | [
"MIT"
] | null | null | null | """
Basal Ganglia Model based on the version of F. Escudero
Modified by A. Schwarz
Version 1.0 - 29.05.2018
"""
from ANNarchy import Constant, Neuron, Synapse, Population, Projection, Uniform, setup
from parameters import params
from changed_val import changed
import numpy as np
np.random.seed()
setup(num_threads=params['num_threads'] )
#Model parameters
baseline_dopa = Constant('baseline_dopa', params['baseline_dopa'])
reversal = Constant('reversal', changed['reversal_SNr'])
#####################################################
########## Neuron models #########################
#####################################################
LinearNeuron = Neuron(
parameters="""
tau = 10.0 : population
noise = 0.0 : population
baseline = 0.0
""",
equations="""
tau * dmp/dt + mp = sum(exc) - sum(inh) + baseline + noise * Uniform(-1.0,1.0)
r = pos(mp) : max=0.15
"""
)
DopamineNeuron = Neuron(
parameters="""
tau = 10.0 : population
firing = 0 : population
baseline = 0.0
""",
equations="""
test=sum(inh)
aux = if (sum(exc)>0): pos(1.0-baseline-sum(inh)) else: -10 * sum(inh)
tau * dmp/dt + mp = firing * aux + baseline
r = pos(mp)
"""
)
InputNeuron = Neuron(
parameters="""
tau = 10.0 : population
baseline = 0.0
""",
equations="""
tau * dmp/dt + mp = baseline * (1 + sum(att))
r = pos(mp)
"""
)
ScaledNeuron = Neuron(
parameters="""
tau = 10.0 : population
noise = 0.0 : population
baseline = 0.0
""",
equations="""
tau * dmp/dt + mp = (sum(exc) -sum(inh) + baseline + noise * Uniform(-1.0,1.0)) * (1 + sum(att))
r = pos(mp)
"""
)
#####################################################
########## Synapse models ########################
#####################################################
PostCovariance = Synapse(
parameters="""
tau = 15000.0 : projection
tau_alpha = 1.0 : projection
regularization_threshold = 3.5 : projection
threshold_post = 0.0 : projection
threshold_pre = 0.15 : projection
alpha_factor = 15.0 : projection
""",
equations="""
tau_alpha * dalpha/dt + alpha = pos(post.mp - regularization_threshold) * alpha_factor
trace = (pre.r - mean(pre.r) - threshold_pre) * pos(post.r - mean(post.r) - threshold_post)
delta = (trace - alpha*pos(post.r - mean(post.r) - threshold_post) * pos(post.r - mean(post.r) - threshold_post)*w)
tau * dw/dt = delta : min=0
"""
)
ReversedSynapse = Synapse(
parameters="""
""",
psp="""
w * pos(reversal - pre.r)
"""
)
#DA_typ = 1 ==> D1 type DA_typ = -1 ==> D2 type
DAPostCovarianceNoThreshold = Synapse(
parameters="""
tau=75.0 : projection
tau_alpha=1.0 : projection
tau_trace=60.0 : projection
regularization_threshold=1.0 : projection
K_burst = 1.0 : projection
K_dip = 0.4 : projection
DA_type = 1 : projection
threshold_pre=0.15 : projection
threshold_post=0.0 : projection
""",
equations="""
tau_alpha * dalpha/dt + alpha = pos(post.mp - regularization_threshold)
dopa_sum = 2.0 * (post.sum(dopa) - baseline_dopa)
trace = pos(post.r - mean(post.r) - threshold_post) * (pre.r - mean(pre.r) - threshold_pre)
condition_0 = if (trace>0.0) and (w >0.0): 1 else: 0
dopa_mod = if (DA_type*dopa_sum>0): DA_type*K_burst*dopa_sum else: condition_0*DA_type*K_dip*dopa_sum
delta = (dopa_mod* trace - alpha*pos(post.r - mean(post.r) - threshold_post)*pos(post.r - mean(post.r) - threshold_post))
tau * dw/dt = delta : min=0
"""
)
#Excitatory synapses STN -> SNr
DA_excitatory = Synapse(
parameters="""
tau=50.0 : projection
tau_alpha=1.0 : projection
tau_trace=60.0 : projection
regularization_threshold=2.6 : projection
K_burst = 1.0 : projection
K_dip = 0.4 : projection
DA_type= 1 : projection
threshold_pre=0.0 : projection
threshold_post= -0.15 : projection
trace_pos_factor = 1.0 : projection
""",
equations="""
tau_alpha * dalpha/dt + alpha = pos(post.mp - regularization_threshold)
dopa_sum = 2.0 * (post.sum(dopa) - baseline_dopa)
a = mean(post.r) - min(post.r) - 0.45 : postsynaptic
post_thresh = if (-a<threshold_post): -a else: threshold_post : postsynaptic
trace = pos(pre.r - mean(pre.r) - threshold_pre) * (post.r - mean(post.r) - post_thresh)
aux = if (trace<0.0): 1 else: 0
dopa_mod = if (dopa_sum>0): K_burst * dopa_sum * ((1-trace_pos_factor)*aux+trace_pos_factor) else: K_dip * dopa_sum * aux
delta = dopa_mod * trace - alpha * pos(trace)
tau * dw/dt = delta : min=0
"""
)
#Inhibitory synapses STRD1 -> SNr and STRD2 -> GPe
DA_inhibitory = Synapse(
parameters="""
tau=50.0 : projection
tau_alpha=1.0 : projection
tau_trace=60.0 : projection
regularization_threshold=1.0 : projection
K_burst = 1.0 : projection
K_dip = 0.4 : projection
DA_type= 1 : projection
threshold_pre=0.0 : projection
threshold_post=0.15 : projection
trace_neg_factor = 1.0 : projection
""",
equations="""
tau_alpha * dalpha/dt + alpha = pos(-post.mp - regularization_threshold)
dopa_sum = 2.0 * (post.sum(dopa) - baseline_dopa)
a = mean(post.r) - min(post.r) - 0.45 : postsynaptic
post_thresh = if (a>threshold_post) and (DA_type>0): a else: threshold_post : postsynaptic
trace = if (DA_type>0): pos(pre.r - mean(pre.r) - threshold_pre) * (mean(post.r) - post.r - post_thresh) else: pos(pre.r - mean(pre.r) - threshold_pre) * (max(post.r) - post.r - post_thresh)
aux = if (trace>0): 1 else: 0
dopa_mod = if (DA_type*dopa_sum>0): DA_type*K_burst*dopa_sum * ((1-trace_neg_factor)*aux+trace_neg_factor) else: aux*DA_type*K_dip*dopa_sum
tau * dw/dt = dopa_mod * trace - alpha * pos(trace) : min=0
"""
)
DAPrediction = Synapse(
parameters="""
tau = 100000.0 : projection
""",
equations="""
aux = if (post.sum(exc)>0): 1.0 else: 3.0 : postsynaptic
tau*dw/dt = aux * (post.r - baseline_dopa) * pos(pre.r - mean(pre.r)) : min=0
"""
)
#TraceSynapse = default annarchy synapse
##################################################
############## CREATION OF THE NEURONS ########
##################################################
nBG = params['dim_BG']
# IT Input
IT = Population(name='IT', geometry=params['dim_IT'], neuron=ScaledNeuron)
IT.baseline = changed['baseline_IT']
IT.noise = params['noise_IT']
# Reward Input
PPTN = Population(name='PPTN', geometry=params['dim_SN'], neuron=InputNeuron)
PPTN.tau = 1.0
# PFC
PFC = Population(name='PFC', geometry=params['dim_PFC'], neuron=LinearNeuron)
PFC.noise = params['noise_PFC']
#pfc_base=np.zeros(16)
#pfc_base[9]=1.
PFC.baseline=params['baseline_PFC']#pfc_base
# SNc
SNc = Population(name='SNc', geometry=params['dim_SN'], neuron=DopamineNeuron)
SNc.baseline = params['baseline_SNc']
# Striatum direct pathway
StrD1 = Population(name='StrD1', geometry=params['dim_STR'], neuron=LinearNeuron)
StrD1.noise = params['noise_Str']
StrD1.baseline = changed['baseline_STR']
# Striatum indirect pathway
StrD2 = Population(name='StrD2', geometry=params['dim_STR'], neuron=LinearNeuron)
StrD2.noise = params['noise_Str']
StrD2.baseline = changed['baseline_STR']
# Striatum feedback pathway
StrThal = Population(name='StrThal', geometry=nBG, neuron=LinearNeuron)
StrThal.noise = params['noise_StrThal']
StrThal.baseline = params['baseline_StrThal']
# SNr
SNr = Population(name='SNr', geometry=nBG, neuron=LinearNeuron)
SNr.noise = changed['noise_SNr']
SNr.baseline = changed['baseline_SNr']
# STN
STN = Population(name='STN', geometry=params['dim_STN'], neuron=LinearNeuron)
STN.noise = params['noise_STN']
STN.baseline = params['baseline_STN']
# GPe
GPe = Population(name='GPe', geometry=nBG, neuron=LinearNeuron)
GPe.noise = changed['noise_GPe']
GPe.baseline = params['baseline_GPe']
# MD
MD = Population(name='MD', geometry=nBG, neuron=LinearNeuron)
MD.noise = changed['noise_MD']
MD.baseline = changed['baseline_MD']
#####################################################
######## PROJECTIONS ##############################
#####################################################
############# FROM INPUT #############
ITPFC = Projection(pre=IT, post=PFC, target='exc', synapse=PostCovariance)
ITPFC.connect_all_to_all(weights=changed['ITPFC.connect_all_to_all']) #Normal(0.3,0.1) )
ITStrD1 = Projection(pre=IT, post=StrD1, target='exc', synapse=DAPostCovarianceNoThreshold)
ITStrD1.connect_all_to_all(weights=Uniform(0, 0.3)) #Normal(0.15,0.15))
ITStrD2 = Projection(pre=IT, post=StrD2, target='exc', synapse=DAPostCovarianceNoThreshold)
ITStrD2.connect_all_to_all(weights=Uniform(0, 0.3)) #Normal(0.15,0.15))
ITStrD2.DA_type = -1
ITSTN = Projection(pre=IT, post=STN, target='exc', synapse=DAPostCovarianceNoThreshold)
ITSTN.connect_all_to_all(weights=Uniform(0, 0.3)) #Normal(0.15,0.15))
ITSTN.DA_type = 1
############### OUTPUT ########################
SNrMD = Projection(pre=SNr, post=MD, target='inh')
SNrMD.connect_one_to_one(weights=changed['SNrMD.connect_one_to_one'])
################ REWARD #######################
PPTNSNc = Projection(pre=PPTN, post=SNc, target='exc')
PPTNSNc.connect_all_to_all(weights=1.0)
StrD1SNc = Projection(pre=StrD1, post=SNc, target='inh', synapse=DAPrediction)
StrD1SNc.connect_all_to_all(weights=0.5)#statt 1.0
SNcStrD1 = Projection(pre=SNc, post=StrD1, target='dopa')
SNcStrD1.connect_all_to_all(weights=1.0)
SNcStrD2 = Projection(pre=SNc, post=StrD2, target='dopa')
SNcStrD2.connect_all_to_all(weights=1.0)
SNcSNr = Projection(pre=SNc, post=SNr, target='dopa')
SNcSNr.connect_all_to_all(weights=1.0)
SNcSTN = Projection(pre=SNc, post=STN, target='dopa')
SNcSTN.connect_all_to_all(weights=1.0)
SNcGPe = Projection(pre=SNc, post=GPe, target='dopa')
SNcGPe.connect_all_to_all(weights=1.0)
#SNcPFC = Projection(pre=SNc, post=PFC, target='dopa')
#SNcPFC.connect_all_to_all(weights=1.0)
#SNcVA = Projection(pre=SNc, post=VA, target='dopa')
#SNcVA.connect_all_to_all(weights=1.0)
################# TEACHING CATEGORIES ####################
VAPFC = Projection(pre=MD, post=PFC, target='exc', synapse=PostCovariance)
VAPFC.connect_one_to_one(weights=changed['VAPFC.connect_one_to_one'])
PFCMD = Projection(pre=PFC, post=MD, target='exc')
PFCMD.connect_one_to_one(weights=changed['PFCMD.connect_one_to_one'])
################ INNER BG ###################
StrD1SNr = Projection(pre=StrD1, post=SNr, target='inh', synapse=DA_inhibitory)
StrD1SNr.connect_all_to_all(weights=changed['StrD1SNr.connect_all_to_all']) #Normal(0.025,0.025))
StrD1SNr.regularization_threshold = 1.0
StrD1SNr.DA_type = 1
STNSNr = Projection(pre=STN, post=SNr, target='exc', synapse=DA_excitatory)
STNSNr.connect_all_to_all(weights=Uniform(0, 0.05)) #Normal(0.025,0.025))
StrD2GPe = Projection(pre=StrD2, post=GPe, target='inh', synapse=DA_inhibitory)
StrD2GPe.connect_all_to_all(weights=changed['StrD2GPe.connect_all_to_all']) #Normal(0.025,0.025))
StrD2GPe.regularization_threshold = 2.0
StrD2GPe.DA_type = -1
GPeSNr = Projection(pre=GPe, post=SNr, target='inh')
GPeSNr.connect_one_to_one(weights=changed['GPeSNr.connect_one_to_one'])
############### LATERALS ######################
StrD1StrD1 = Projection(pre=StrD1, post=StrD1, target='inh')
StrD1StrD1.connect_all_to_all(weights=changed['StrD1StrD1.connect_all_to_all'])
STNSTN = Projection(pre=STN, post=STN, target='inh')
STNSTN.connect_all_to_all(weights=changed['STNSTN.connect_all_to_all'])
PFCPFC = Projection(pre=PFC, post=PFC, target='inh')
PFCPFC.connect_all_to_all(weights=changed['PFCPFC.connect_all_to_all'])
StrD2StrD2 = Projection(pre=StrD2, post=StrD2, target='inh')
StrD2StrD2.connect_all_to_all(weights=changed['StrD2StrD2.connect_all_to_all'])
StrThalStrThal = Projection(pre=StrThal, post=StrThal, target='inh')
StrThalStrThal.connect_all_to_all(weights=changed['StrThalStrThal.connect_all_to_all'])
SNrSNr = Projection(pre=SNr, post=SNr, target='exc', synapse=ReversedSynapse)
SNrSNr.connect_all_to_all(weights=changed['SNrSNr.connect_all_to_all'])
ITIT = Projection(pre=IT, post=IT, target='inh')
ITIT.connect_all_to_all(weights=changed['ITIT.connect_all_to_all'])
################# FEEDBACK ####################
MDStrThal = Projection(pre=MD, post=StrThal, target='exc')
MDStrThal.connect_one_to_one(weights=changed['MDStrThal.connect_one_to_one'])
StrThalGPe = Projection(pre=StrThal, post=GPe, target='inh')
StrThalGPe.connect_one_to_one(weights=changed['StrThalGPe.connect_one_to_one'])
StrThalSNr = Projection(pre=StrThal, post=SNr, target='inh')
StrThalSNr.connect_one_to_one(weights=changed['StrThalSNr.connect_one_to_one'])
################# ATTENTION ####################
PFCIT = Projection(pre=PFC, post=IT, target='att')
PFCIT.connect_one_to_one(weights=Uniform(0.0, 0.03))
| 35.13369 | 200 | 0.641781 |
e1994b337edaa962da0285d26e4d796f4e50217d | 2,903 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2019_11_01/aio/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 8 | 2021-01-13T23:44:08.000Z | 2021-03-17T10:13:36.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2019_11_01/aio/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 2 | 2021-11-03T06:10:36.000Z | 2021-12-01T06:29:39.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/subscriptions/v2019_11_01/aio/_configuration.py | vbarbaresi/azure-sdk-for-python | 397ba46c51d001ff89c66b170f5576cf8f49c05f | [
"MIT"
] | 1 | 2021-05-19T02:55:10.000Z | 2021-05-19T02:55:10.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
VERSION = "unknown"
class SubscriptionClientConfiguration(Configuration):
"""Configuration for SubscriptionClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
"""
def __init__(
self,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
super(SubscriptionClientConfiguration, self).__init__(**kwargs)
self.credential = credential
self.api_version = "2019-11-01"
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| 47.590164 | 134 | 0.696521 |
0c8d334a8bb7b6db17b9554061854afb250f7a74 | 2,335 | py | Python | ASIS_Exps/network/ASIS_utils.py | Fawkes7/mmdetection3d | 7d5a339c699a73b3c8d65de5e48c7407f8640b23 | [
"Apache-2.0"
] | 1 | 2020-11-20T23:10:34.000Z | 2020-11-20T23:10:34.000Z | ASIS_Exps/network/ASIS_utils.py | Fawkes7/mmdetection3d | 7d5a339c699a73b3c8d65de5e48c7407f8640b23 | [
"Apache-2.0"
] | null | null | null | ASIS_Exps/network/ASIS_utils.py | Fawkes7/mmdetection3d | 7d5a339c699a73b3c8d65de5e48c7407f8640b23 | [
"Apache-2.0"
] | null | null | null | import torch
def pairwise_distance(fm):
B,C,N = fm.size()
fm2 = fm.permute(0,2,1) # [B,N,5]
dist = []
for i in range(B):
f = fm2[i]
l1 = torch.abs(torch.sub(f, f.unsqueeze(1))).sum(-1) # [N,N]
dist.append(l1)
return torch.stack(dist) # [B,N,N]
def knn_thre(adj_matrix, k=20, thre=0.5):
B,N = adj_matrix.size(0), adj_matrix.size(1)
neg_adj = - 1.0 * adj_matrix
vals, nn_idx = torch.topk(neg_adj, k=k, dim=-1)
to_add = torch.arange(start=0,end=N).view(-1,1)
to_add = to_add.repeat(1,k)
#print(to_add.size())
final_nn_idx = []
for i in range(B):
idx_vals = vals[i]
idx_nn_idx = nn_idx[i]
mask = torch.tensor(idx_vals < -1.0 * thre, dtype=torch.int32)
#print(mask.size())
idx_to_add = to_add * mask
idx_nn_idx = idx_nn_idx * (1 - mask) + idx_to_add
final_nn_idx.append(idx_nn_idx)
return torch.stack(final_nn_idx) # [B,N,K]
def get_local_feature(fms_sem, nn_idx, k=20):
B, C, N = fms_sem.size()
fms_sem2 = fms_sem.permute(0,2,1) # [B,N,C]
idx_ = torch.arange(start=0, end=B) * N
idx_ = idx_.view(B,1,1)
fms_sem_flat = fms_sem2.contiguous().view(-1,C)
final_idx = (nn_idx+idx_).long()
return fms_sem_flat[final_idx]
def unsorted_segment_sum(data, segment_ids, num_segments):
"""
Computes the sum along segments of a tensor. Analogous to tf.unsorted_segment_sum.
:param data: A tensor whose segments are to be summed.
:param segment_ids: The segment indices tensor.
:param num_segments: The number of segments.
:return: A tensor of same data type as the data argument.
"""
assert all([i in data.shape for i in segment_ids.shape]), "segment_ids.shape should be a prefix of data.shape"
# segment_ids is a 1-D tensor repeat it to have the same shape as data
if len(segment_ids.shape) == 1:
s = torch.prod(torch.tensor(data.shape[1:])).long()
segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0], *data.shape[1:])
assert data.shape == segment_ids.shape, "data.shape and segment_ids.shape should be equal"
shape = [num_segments] + list(data.shape[1:])
tensor = torch.zeros(*shape).scatter_add(0, segment_ids, data.float())
tensor = tensor.type(data.dtype)
return tensor | 35.923077 | 114 | 0.643683 |
afd2505cf2c45d3c5536ea3d2bf73fa8566c9b79 | 6,767 | py | Python | app/pylibs/win32/Cryptodome/Hash/MD4.py | skylex77/PokeMapGT | dbef69236fd8ec492d00bd067b430686451cc102 | [
"MIT"
] | 2,557 | 2016-07-19T22:20:45.000Z | 2022-01-25T10:53:35.000Z | app/pylibs/win32/Cryptodome/Hash/MD4.py | skylex77/PokeMapGT | dbef69236fd8ec492d00bd067b430686451cc102 | [
"MIT"
] | 1,360 | 2016-07-20T02:06:42.000Z | 2021-07-27T12:46:40.000Z | app/pylibs/win32/Cryptodome/Hash/MD4.py | skylex77/PokeMapGT | dbef69236fd8ec492d00bd067b430686451cc102 | [
"MIT"
] | 607 | 2016-07-20T03:34:04.000Z | 2022-01-05T14:57:09.000Z | # ===================================================================
#
# Copyright (c) 2014, Legrandin <helderijs@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
"""
MD4 is specified in RFC1320_ and produces the 128 bit digest of a message.
>>> from Cryptodome.Hash import MD4
>>>
>>> h = MD4.new()
>>> h.update(b'Hello')
>>> print h.hexdigest()
MD4 stand for Message Digest version 4, and it was invented by Rivest in 1990.
This algorithm is insecure. Do not use it for new designs.
.. _RFC1320: http://tools.ietf.org/html/rfc1320
"""
from Cryptodome.Util.py3compat import bord
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
expect_byte_string)
_raw_md4_lib = load_pycryptodome_raw_lib(
"Cryptodome.Hash._MD4",
"""
int md4_init(void **shaState);
int md4_destroy(void *shaState);
int md4_update(void *hs,
const uint8_t *buf,
size_t len);
int md4_digest(const void *shaState,
uint8_t digest[20]);
int md4_copy(const void *src, void *dst);
""")
class MD4Hash(object):
"""Class that implements an MD4 hash
"""
#: The size of the resulting hash in bytes.
digest_size = 16
#: The internal block size of the hash algorithm in bytes.
block_size = 64
#: ASN.1 Object ID
oid = "1.2.840.113549.2.4"
def __init__(self, data=None):
state = VoidPointer()
result = _raw_md4_lib.md4_init(state.address_of())
if result:
raise ValueError("Error %d while instantiating MD4"
% result)
self._state = SmartPointer(state.get(),
_raw_md4_lib.md4_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Repeated calls are equivalent to a single call with the concatenation
of all the arguments. In other words:
>>> m.update(a); m.update(b)
is equivalent to:
>>> m.update(a+b)
:Parameters:
data : byte string
The next chunk of the message being hashed.
"""
expect_byte_string(data)
result = _raw_md4_lib.md4_update(self._state.get(),
data,
c_size_t(len(data)))
if result:
raise ValueError("Error %d while instantiating MD4"
% result)
def digest(self):
"""Return the **binary** (non-printable) digest of the message that
has been hashed so far.
This method does not change the state of the hash object.
You can continue updating the object after calling this function.
:Return: A byte string of `digest_size` bytes. It may contain non-ASCII
characters, including null bytes.
"""
bfr = create_string_buffer(self.digest_size)
result = _raw_md4_lib.md4_digest(self._state.get(),
bfr)
if result:
raise ValueError("Error %d while instantiating MD4"
% result)
return get_raw_buffer(bfr)
def hexdigest(self):
"""Return the **printable** digest of the message that has been
hashed so far.
This method does not change the state of the hash object.
:Return: A string of 2* `digest_size` characters. It contains only
hexadecimal ASCII digits.
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def copy(self):
"""Return a copy ("clone") of the hash object.
The copy will have the same internal state as the original hash
object.
This can be used to efficiently compute the digests of strings that
share a common initial substring.
:Return: A hash object of the same type
"""
clone = MD4Hash()
result = _raw_md4_lib.md4_copy(self._state.get(),
clone._state.get())
if result:
raise ValueError("Error %d while copying MD4" % result)
return clone
def new(self, data=None):
return MD4Hash(data)
def new(data=None):
"""Return a fresh instance of the hash object.
:Parameters:
data : byte string
The very first chunk of the message to hash.
It is equivalent to an early call to `MD4Hash.update()`.
Optional.
:Return: A `MD4Hash` object
"""
return MD4Hash().new(data)
#: The size of the resulting hash in bytes.
digest_size = MD4Hash.digest_size
#: The internal block size of the hash algorithm in bytes.
block_size = MD4Hash.block_size
| 36.187166 | 80 | 0.572041 |
d02dffdec38794244126b03e1d4880023e911cb2 | 1,094 | py | Python | src/project_2/src/inverse.py | anurag-b/project_2_cmsc498F | 736626e0144cf45ebeddd341cfcb4fcdac679c3f | [
"MIT"
] | null | null | null | src/project_2/src/inverse.py | anurag-b/project_2_cmsc498F | 736626e0144cf45ebeddd341cfcb4fcdac679c3f | [
"MIT"
] | null | null | null | src/project_2/src/inverse.py | anurag-b/project_2_cmsc498F | 736626e0144cf45ebeddd341cfcb4fcdac679c3f | [
"MIT"
] | null | null | null | #!/usr/bin/python
from project_2.kinematics import *
import argparse
parser = argparse.ArgumentParser(description='Inverse Kinematics Tester')
parser.add_argument('-x0', default=0.0, type=float)
parser.add_argument('-y0', default=0.0, type=float)
parser.add_argument('-theta0', default=0.0, type=float)
parser.add_argument('-x1', default=0.0, type=float)
parser.add_argument('-y1', default=0.0, type=float)
parser.add_argument('-theta1', default=0.0, type=float)
parser.add_argument('-axle', default=1.0, type=float)
parser.add_argument('-wheelr', default=1.0, type=float)
parser.add_argument('-speed', default=1.0, type=float)
args = parser.parse_args()
pose0 = args.x0, args.y0, args.theta0
pose1 = args.x1, args.y1, args.theta1
robot = args.axle, args.wheelr, args.speed
plan = inverse(pose0, pose1, robot)
print "Plan: "
print plan
print
p = pose0
for a in plan:
print "Pose: (%3.2f %3.2f %3.2f)"%p, "Action: (%3.2f %3.2f %3.2f)"%a
p = forward(p, a, robot)
print "Final pose: (%3.2f %3.2f %3.2f)"%p
print "Goal pose : (%3.2f %3.2f %3.2f)"%pose1
| 30.388889 | 73 | 0.683729 |
765a77b374386b0921a62bd2e539b867d7d4babc | 123 | py | Python | locations/admin.py | Cortador-de-Grama-Autonomo/Software-Api-Gateway | 0e8273c874a3f3821479e54e602d6e74cfe3d8fe | [
"MIT"
] | null | null | null | locations/admin.py | Cortador-de-Grama-Autonomo/Software-Api-Gateway | 0e8273c874a3f3821479e54e602d6e74cfe3d8fe | [
"MIT"
] | 1 | 2021-09-08T02:13:05.000Z | 2021-09-08T02:13:05.000Z | locations/admin.py | Cortador-de-Grama-Autonomo/Software-Api-Gateway | 0e8273c874a3f3821479e54e602d6e74cfe3d8fe | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Locations
# Register your models here.
admin.site.register(Locations) | 24.6 | 32 | 0.821138 |
ddc14c6287991c811190f149687eeb6cb68ca241 | 1,399 | py | Python | MVPA/task002/create_evds.py | psychoinformatics-de/studyforrest-paper-auditorydecoding | afdbf544b462b45e8981e085177f23404946c5a1 | [
"MIT"
] | 1 | 2018-02-15T19:34:17.000Z | 2018-02-15T19:34:17.000Z | MVPA/task002/create_evds.py | psychoinformatics-de/studyforrest-paper-auditorydecoding | afdbf544b462b45e8981e085177f23404946c5a1 | [
"MIT"
] | null | null | null | MVPA/task002/create_evds.py | psychoinformatics-de/studyforrest-paper-auditorydecoding | afdbf544b462b45e8981e085177f23404946c5a1 | [
"MIT"
] | null | null | null | from mvpa2.suite import eventrelated_dataset, zscore
import numpy as np
def fx(dataset, behav_file, motion_file, polynomial_order,
run_number):
print("events -> %s" % behav_file)
print("nuisance -> %s" % motion_file)
tsds = dataset
behav_txt = np.recfromcsv(behav_file, delimiter=',')
events = [dict(
onset=float(event['run_volume']) * 2.0,
duration=6.0,
targets=event['genre'],
chunks=int(event['run']),
stim=event['stim'])
for event in behav_txt]
motion = np.loadtxt(motion_file)
add_reg_names = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz']
hrf_estimates = eventrelated_dataset(
tsds,
events,
model='hrf',
time_attr='time_coords',
condition_attr=(('targets', 'chunks')),
design_kwargs=dict(
drift_model='polynomial',
drift_order=polynomial_order,
hrf_model='canonical with derivative',
add_regs=motion,
add_reg_names=add_reg_names),
glmfit_kwargs=dict(model='ar1'))
#hrf_estimates.sa['subj'] = [subject] * len(hrf_estimates)
hrf_estimates.sa['run'] = [run_number] * len(hrf_estimates)
# zscore voxelwise
# XXX `hrf_estimates` has no chunks! hence zscoring is not performed run-wise!
zscore(hrf_estimates)
return hrf_estimates
| 32.534884 | 82 | 0.601858 |
f9236ffcff942246710809a87e8df118cbdedd2b | 104,577 | py | Python | tensor2tensor/models/transformer.py | sciforce/tensor2tensor | 47e6209685727c4121b6b66e8662dffc7e0afb99 | [
"Apache-2.0"
] | 1 | 2019-07-26T01:57:23.000Z | 2019-07-26T01:57:23.000Z | tensor2tensor/models/transformer.py | sciforce/tensor2tensor | 47e6209685727c4121b6b66e8662dffc7e0afb99 | [
"Apache-2.0"
] | null | null | null | tensor2tensor/models/transformer.py | sciforce/tensor2tensor | 47e6209685727c4121b6b66e8662dffc7e0afb99 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Transformer model from "Attention Is All You Need".
The Transformer model consists of an encoder and a decoder. Both are stacks
of self-attention layers followed by feed-forward layers. This model yields
good results on a number of problems, especially in NLP and machine translation.
See "Attention Is All You Need" (https://arxiv.org/abs/1706.03762) for the full
description of the model and the results obtained with its early version.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range # pylint: disable=redefined-builtin
import re
from tensor2tensor.data_generators import librispeech
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.layers import modalities
from tensor2tensor.layers import transformer_layers
from tensor2tensor.layers import transformer_memory
from tensor2tensor.utils import beam_search
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import mlperf_log
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops
from tensorflow.python.util import nest
# pylint: enable=g-direct-tensorflow-import
# Alias some commonly reused layers, here and elsewhere.
transformer_prepare_encoder = transformer_layers.transformer_prepare_encoder
transformer_encoder = transformer_layers.transformer_encoder
transformer_ffn_layer = transformer_layers.transformer_ffn_layer
def transformer_encode(encoder_function, inputs, target_space, hparams,
attention_weights=None, features=None, losses=None,
prepare_encoder_fn=None, **kwargs):
"""Encode transformer inputs.
Args:
encoder_function: the encoder function
inputs: Transformer inputs [batch_size, input_length, 1, hidden_dim] which
will be flattened along the two spatial dimensions.
target_space: scalar, target space ID.
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
losses: optional list onto which to append extra training losses
prepare_encoder_fn: optional, alternative to transformer_prepare_encoder.
**kwargs: additional arguments to pass to encoder_function
Returns:
Tuple of:
encoder_output: Encoder representation.
[batch_size, input_length, hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for
encoder-decoder attention. [batch_size, input_length]
"""
inputs = common_layers.flatten4d3d(inputs)
if not prepare_encoder_fn:
prepare_encoder_fn = transformer_prepare_encoder
encoder_input, self_attention_bias, encoder_decoder_attention_bias = (
prepare_encoder_fn(
inputs, target_space, hparams, features=features))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
attn_bias_for_padding = None
# Otherwise the encoder will just use encoder_self_attention_bias.
if hparams.unidirectional_encoder:
attn_bias_for_padding = encoder_decoder_attention_bias
encoder_output = encoder_function(
encoder_input,
self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "inputs"),
save_weights_to=attention_weights,
make_image_summary=not common_layers.is_xla_compiled(),
losses=losses,
attn_bias_for_padding=attn_bias_for_padding,
**kwargs)
return encoder_output, encoder_decoder_attention_bias
def transformer_decode(decoder_function,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
attention_weights=None,
cache=None,
decode_loop_step=None,
nonpadding=None,
losses=None,
**kwargs):
"""Decode Transformer outputs from encoder representation.
Args:
decoder_function: the decoder function
decoder_input: inputs to bottom of the model. [batch_size, decoder_length,
hidden_dim]
encoder_output: Encoder representation. [batch_size, input_length,
hidden_dim]
encoder_decoder_attention_bias: Bias and mask weights for encoder-decoder
attention. [batch_size, input_length]
decoder_self_attention_bias: Bias and mask weights for decoder
self-attention. [batch_size, decoder_length]
hparams: hyperparameters for model.
attention_weights: weight to store attention to.
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
nonpadding: optional Tensor with shape [batch_size, decoder_length]
losses: optional list onto which to append extra training losses
**kwargs: additional arguments to pass to decoder_function
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_LAYER_POSTPROCESS_DROPOUT,
value=hparams.layer_prepostprocess_dropout,
hparams=hparams)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = decoder_function(
decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=cache,
decode_loop_step=decode_loop_step,
nonpadding=nonpadding,
save_weights_to=attention_weights,
losses=losses,
**kwargs)
if (common_layers.is_xla_compiled() and
hparams.mode == tf.estimator.ModeKeys.TRAIN):
# TPU does not react kindly to extra dimensions.
# TODO(noam): remove this once TPU is more forgiving of extra dims.
return decoder_output
else:
# Expand since t2t expects 4d tensors.
return tf.expand_dims(decoder_output, axis=2)
@registry.register_model
class Transformer(t2t_model.T2TModel):
"""Attention net. See file docstring."""
def __init__(self, *args, **kwargs):
super(Transformer, self).__init__(*args, **kwargs)
self.attention_weights = {} # For visualizing attention heads.
self.recurrent_memory_by_layer = None # Override to enable recurrent memory
self._encoder_function = transformer_encoder
self._decoder_function = transformer_decoder
self._init_cache_fn = _init_transformer_cache
self._prepare_encoder_fn = transformer_prepare_encoder
self._prepare_decoder_fn = transformer_prepare_decoder
def encode(self, inputs, target_space, hparams, features=None, losses=None):
"""Encode transformer inputs, see transformer_encode."""
return transformer_encode(
self._encoder_function, inputs, target_space, hparams,
attention_weights=self.attention_weights,
features=features, losses=losses,
prepare_encoder_fn=self._prepare_encoder_fn)
def decode(self,
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
cache=None,
decode_loop_step=None,
nonpadding=None,
losses=None,
**kwargs):
"""Decode Transformer outputs, see transformer_decode."""
return transformer_decode(
self._decoder_function, decoder_input, encoder_output,
encoder_decoder_attention_bias, decoder_self_attention_bias,
hparams, attention_weights=self.attention_weights, cache=cache,
decode_loop_step=decode_loop_step, nonpadding=nonpadding, losses=losses,
**kwargs)
def body(self, features):
"""Transformer main model_fn.
Args:
features: Map of features to the model. Should contain the following:
"inputs": Transformer inputs. [batch_size, input_length, 1,
hidden_dim].
"targets": Target decoder outputs. [batch_size, decoder_length, 1,
hidden_dim]
"target_space_id": A scalar int from data_generators.problem.SpaceID.
Returns:
Final decoder representation. [batch_size, decoder_length, hidden_dim]
"""
hparams = self._hparams
losses = []
if self.has_input:
inputs = features["inputs"]
target_space = features["target_space_id"]
encoder_output, encoder_decoder_attention_bias = self.encode(
inputs, target_space, hparams, features=features, losses=losses)
else:
encoder_output, encoder_decoder_attention_bias = (None, None)
targets = features["targets"]
targets_shape = common_layers.shape_list(targets)
targets = common_layers.flatten4d3d(targets)
decoder_input, decoder_self_attention_bias = self._prepare_decoder_fn(
targets, hparams, features=features)
# Not all subclasses of Transformer support keyword arguments related to
# recurrent memory, so only pass these arguments if memory is enabled.
decode_kwargs = {}
if self.recurrent_memory_by_layer is not None:
# TODO(kitaev): The chunk_number feature currently has the same shape as
# "targets", but this is only for the purposes of sharing sharding code.
# In fact every token within an example must have the same chunk number.
chunk_number_each_token = tf.squeeze(features["chunk_number"], (-1, -2))
chunk_number_each_example = chunk_number_each_token[:, 0]
# Uncomment the code below to verify that tokens within a batch share the
# same chunk number:
# with tf.control_dependencies([
# tf.assert_equal(chunk_number_each_token,
# chunk_number_each_example[:, None])
# ]):
# chunk_number_each_example = tf.identity(chunk_number_each_example)
decode_kwargs = dict(
recurrent_memory_by_layer=self.recurrent_memory_by_layer,
chunk_number=chunk_number_each_example,
)
decoder_output = self.decode(
decoder_input,
encoder_output,
encoder_decoder_attention_bias,
decoder_self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "targets"),
losses=losses,
**decode_kwargs
)
expected_attentions = features.get("expected_attentions")
if expected_attentions is not None:
attention_loss = common_attention.encoder_decoder_attention_loss(
expected_attentions, self.attention_weights,
hparams.expected_attention_loss_type,
hparams.expected_attention_loss_multiplier)
return decoder_output, {"attention_loss": attention_loss}
ret = tf.reshape(decoder_output, targets_shape)
if losses:
return ret, {"extra_loss": tf.add_n(losses)}
else:
return ret
def _greedy_infer(self, features, decode_length, use_tpu=False):
"""Fast version of greedy decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
use_tpu: A bool. Whether to build the inference graph for TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If there are multiple data shards.
"""
# For real-valued modalities use the slow decode path for now.
if (self._target_modality_is_real or
self._hparams.self_attention_type != "dot_product"):
return super(Transformer, self)._greedy_infer(features, decode_length)
with tf.variable_scope(self.name):
if use_tpu:
return self._fast_decode_tpu(features, decode_length)
return self._fast_decode(features, decode_length)
def _beam_decode(self,
features,
decode_length,
beam_size,
top_beams,
alpha,
use_tpu=False):
"""Beam search decoding.
Args:
features: an map of string to `Tensor`
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
use_tpu: A bool, whether to do beam decode on TPU.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
"""
if (self._hparams.self_attention_type not in [
"dot_product", "dot_product_relative"
]):
# Caching is not guaranteed to work with attention types other than
# dot_product.
# TODO(petershaw): Support fast decoding when using relative
# position representations, i.e. "dot_product_relative" attention.
return self._beam_decode_slow(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
with tf.variable_scope(self.name):
if use_tpu:
return self._fast_decode_tpu(features, decode_length, beam_size,
top_beams, alpha)
return self._fast_decode(features, decode_length, beam_size, top_beams,
alpha)
def _fast_decode_tpu(self,
features,
decode_length,
beam_size=1,
top_beams=1,
alpha=1.0):
"""Fast decoding.
Implements both greedy and beam search decoding on TPU, uses beam search
iff beam_size > 1, otherwise beam search related arguments are ignored.
Args:
features: A map of string to model features.
decode_length: An integer, how many additional timesteps to decode.
beam_size: An integer, number of beams.
top_beams: An integer, how many of the beams to return.
alpha: A float that controls the length penalty. Larger the alpha,
stronger the preference for longer translations.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}.
Raises:
NotImplementedError: If there are multiple data shards.
"""
if self._num_datashards != 1:
raise NotImplementedError("Fast decoding only supports a single shard.")
if "targets_segmentation" in features:
raise NotImplementedError(
"Decoding not supported on packed datasets "
" If you want to decode from a dataset, use the non-packed version"
" of the dataset when decoding.")
dp = self._data_parallelism
hparams = self._hparams
target_modality = self._problem_hparams.modality["targets"]
target_vocab_size = self._problem_hparams.vocab_size["targets"]
if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor
if self.has_input:
inputs = features["inputs"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
decode_length = (
common_layers.shape_list(inputs)[1] + features.get(
"decode_length", decode_length))
# TODO(llion): Clean up this reshaping logic.
inputs = tf.expand_dims(inputs, axis=1)
if len(inputs.shape) < 5:
inputs = tf.expand_dims(inputs, axis=4)
s = common_layers.shape_list(inputs)
batch_size = s[0]
inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]])
# _shard_features called to ensure that the variable names match
inputs = self._shard_features({"inputs": inputs})["inputs"]
input_modality = self._problem_hparams.modality["inputs"]
input_vocab_size = self._problem_hparams.vocab_size["inputs"]
if input_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor
modality_name = hparams.name.get(
"inputs",
modalities.get_name(input_modality))(hparams, input_vocab_size)
with tf.variable_scope(modality_name):
bottom = hparams.bottom.get("inputs",
modalities.get_bottom(input_modality))
inputs = dp(bottom, inputs, hparams, input_vocab_size)
with tf.variable_scope("body"):
encoder_output, encoder_decoder_attention_bias = dp(
self.encode,
inputs,
features["target_space_id"],
hparams,
features=features)
encoder_output = encoder_output[0]
encoder_decoder_attention_bias = encoder_decoder_attention_bias[0]
partial_targets = None
else:
# The problem has no inputs.
encoder_output = None
encoder_decoder_attention_bias = None
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
assert partial_targets is not None
partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2)
partial_targets = tf.to_int64(partial_targets)
partial_targets_shape = common_layers.shape_list(partial_targets)
partial_targets_length = partial_targets_shape[1]
decode_length = (
partial_targets_length + features.get("decode_length", decode_length))
batch_size = partial_targets_shape[0]
if hparams.pos == "timing":
positional_encoding = common_attention.get_timing_signal_1d(
decode_length + 1, hparams.hidden_size)
elif hparams.pos == "emb":
positional_encoding = common_attention.add_positional_embedding(
tf.zeros([1, decode_length + 1, hparams.hidden_size]),
hparams.max_length, "body/targets_positional_embedding", None)
else:
positional_encoding = None
def preprocess_targets(targets, i):
"""Performs preprocessing steps on the targets to prepare for the decoder.
This includes:
- Embedding the ids.
- Flattening to 3D tensor.
- Optionally adding timing signals.
Args:
targets: A tensor, inputs ids to the decoder. [batch_size, 1].
i: An integer, Step number of the decoding loop.
Returns:
A tensor, processed targets [batch_size, 1, hidden_dim].
"""
# _shard_features called to ensure that the variable names match
targets = self._shard_features({"targets": targets})["targets"]
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
bottom = hparams.bottom.get(
"targets", modalities.get_targets_bottom(target_modality))
targets = dp(bottom, targets, hparams, target_vocab_size)[0]
targets = common_layers.flatten4d3d(targets)
# GO embeddings are all zero, this is because transformer_prepare_decoder
# Shifts the targets along by one for the input which pads with zeros.
# If the modality already maps GO to the zero embeddings this is not
# needed.
targets = tf.cond(
tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)
if positional_encoding is not None:
positional_encoding_shape = positional_encoding.shape.as_list()
targets += tf.slice(
positional_encoding, [0, i, 0],
[positional_encoding_shape[0], 1, positional_encoding_shape[2]])
return targets
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(decode_length))
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
decode_length)
def symbols_to_logits_tpu_fn(ids, i, cache):
"""Go from ids to logits for next symbol on TPU.
Args:
ids: A tensor, symbol IDs.
i: An integer, step number of the decoding loop. Only used for inference
on TPU.
cache: A dict, containing tensors which are the results of previous
attentions, used for fast decoding.
Returns:
ret: A tensor, computed logits.
cache: A dict, containing tensors which are the results of previous
attentions, used for fast decoding.
"""
ids = ids[:, -1:]
targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
targets = preprocess_targets(targets, i)
bias_shape = decoder_self_attention_bias.shape.as_list()
bias = tf.slice(decoder_self_attention_bias, [0, 0, i, 0],
[bias_shape[0], bias_shape[1], 1, bias_shape[3]])
with tf.variable_scope("body"):
body_outputs = dp(
self.decode,
targets,
cache.get("encoder_output"),
cache.get("encoder_decoder_attention_bias"),
bias,
hparams,
cache,
i,
nonpadding=features_to_nonpadding(features, "targets"))
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
top = hparams.top.get("targets",
modalities.get_top(target_modality))
logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0]
ret = tf.squeeze(logits, axis=[1, 2, 3])
if partial_targets is not None:
# If the position is within the given partial targets, we alter the
# logits to always return those values.
# A faster approach would be to process the partial targets in one
# iteration in order to fill the corresponding parts of the cache.
# This would require broader changes, though.
vocab_size = tf.shape(ret)[1]
def forced_logits():
return tf.one_hot(
tf.tile(
tf.slice(partial_targets, [0, i],
[partial_targets.shape.as_list()[0], 1]),
[beam_size]), vocab_size, 0.0, -1e9)
ret = tf.cond(
tf.less(i, partial_targets_length), forced_logits, lambda: ret)
return ret, cache
eos_id = self.get_decode_end_id() or beam_search.EOS_ID
ret = fast_decode_tpu(
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
symbols_to_logits_fn=symbols_to_logits_tpu_fn,
hparams=hparams,
decode_length=decode_length,
vocab_size=target_vocab_size,
init_cache_fn=self._init_cache_fn,
beam_size=beam_size,
top_beams=top_beams,
alpha=alpha,
batch_size=batch_size,
force_decode_length=self._decode_hparams.force_decode_length,
eos_id=eos_id)
if partial_targets is not None:
if beam_size <= 1 or top_beams <= 1:
ret["outputs"] = ret["outputs"][:, partial_targets_length:]
else:
ret["outputs"] = ret["outputs"][:, :, partial_targets_length:]
return ret
def get_decode_start_id(self):
"""Returns the id of the first decoder input symbol.
The default case maps None to a vector of 0's for transformer. This method
can be overridden to return a different id by a model wanting to use a
different decoder start symbol. The id returned by this method is used to
index the embedding matrix, and retrieve the vector that will be used as the
first input to the decoder
"""
return None
def get_decode_end_id(self):
"""Returns the id of the output symbol that terminates decoding.
This method can be overridden by a different model. The id returned by this
method is used to check if the generation is complete during decoding.
"""
return None
def _fast_decode(self,
features,
decode_length,
beam_size=1,
top_beams=1,
alpha=1.0):
"""Fast decoding.
Implements both greedy and beam search decoding, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
features: a map of string to model features.
decode_length: an integer. How many additional timesteps to decode.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if beam_size == 1 or
[batch_size, top_beams, <= decode_length]
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If there are multiple data shards.
"""
if self._num_datashards != 1:
raise NotImplementedError("Fast decoding only supports a single shard.")
dp = self._data_parallelism
hparams = self._hparams
target_modality = self._problem_hparams.modality["targets"]
target_vocab_size = self._problem_hparams.vocab_size["targets"]
if target_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
target_vocab_size += (-target_vocab_size) % hparams.vocab_divisor
if "targets_segmentation" in features:
raise NotImplementedError(
"Decoding not supported on packed datasets "
" If you want to decode from a dataset, use the non-packed version"
" of the dataset when decoding.")
if self.has_input:
inputs = features["inputs"]
if target_modality == modalities.ModalityType.CLASS_LABEL:
decode_length = 1
else:
decode_length = (
common_layers.shape_list(inputs)[1] + features.get(
"decode_length", decode_length))
# TODO(llion): Clean up this reshaping logic.
inputs = tf.expand_dims(inputs, axis=1)
if len(inputs.shape) < 5:
inputs = tf.expand_dims(inputs, axis=4)
s = common_layers.shape_list(inputs)
batch_size = s[0]
inputs = tf.reshape(inputs, [s[0] * s[1], s[2], s[3], s[4]])
# _shard_features called to ensure that the variable names match
inputs = self._shard_features({"inputs": inputs})["inputs"]
input_modality = self._problem_hparams.modality["inputs"]
input_vocab_size = self._problem_hparams.vocab_size["inputs"]
if input_vocab_size is not None and hasattr(hparams, "vocab_divisor"):
input_vocab_size += (-input_vocab_size) % hparams.vocab_divisor
modality_name = hparams.name.get(
"inputs",
modalities.get_name(input_modality))(hparams, input_vocab_size)
with tf.variable_scope(modality_name):
bottom = hparams.bottom.get("inputs",
modalities.get_bottom(input_modality))
inputs = dp(bottom, inputs, hparams, input_vocab_size)
with tf.variable_scope("body"):
encoder_output, encoder_decoder_attention_bias = dp(
self.encode,
inputs,
features["target_space_id"],
hparams,
features=features)
encoder_output = encoder_output[0]
encoder_decoder_attention_bias = encoder_decoder_attention_bias[0]
partial_targets = features.get("partial_targets")
else:
# The problem has no inputs.
encoder_output = None
encoder_decoder_attention_bias = None
# Prepare partial targets.
# In either features["inputs"] or features["targets"].
# We force the outputs to begin with these sequences.
partial_targets = features.get("inputs")
if partial_targets is None:
partial_targets = features["targets"]
assert partial_targets is not None
if partial_targets is not None:
partial_targets = common_layers.expand_squeeze_to_nd(partial_targets, 2)
partial_targets = tf.to_int64(partial_targets)
partial_targets_shape = common_layers.shape_list(partial_targets)
partial_targets_length = partial_targets_shape[1]
decode_length = (
partial_targets_length + features.get("decode_length", decode_length))
batch_size = partial_targets_shape[0]
if hparams.pos == "timing":
positional_encoding = common_attention.get_timing_signal_1d(
decode_length + 1, hparams.hidden_size)
elif hparams.pos == "emb":
positional_encoding = common_attention.add_positional_embedding(
tf.zeros([1, decode_length, hparams.hidden_size]), hparams.max_length,
"body/targets_positional_embedding", None)
else:
positional_encoding = None
def preprocess_targets(targets, i):
"""Performs preprocessing steps on the targets to prepare for the decoder.
This includes:
- Embedding the ids.
- Flattening to 3D tensor.
- Optionally adding timing signals.
Args:
targets: inputs ids to the decoder. [batch_size, 1]
i: scalar, Step number of the decoding loop.
Returns:
Processed targets [batch_size, 1, hidden_dim]
"""
# _shard_features called to ensure that the variable names match
targets = self._shard_features({"targets": targets})["targets"]
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
bottom = hparams.bottom.get(
"targets", modalities.get_targets_bottom(target_modality))
targets = dp(bottom, targets, hparams, target_vocab_size)[0]
targets = common_layers.flatten4d3d(targets)
# GO embeddings are all zero, this is because transformer_prepare_decoder
# Shifts the targets along by one for the input which pads with zeros.
# If the modality already maps GO to the zero embeddings this is not
# needed.
if not self.get_decode_start_id():
targets = tf.cond(
tf.equal(i, 0), lambda: tf.zeros_like(targets), lambda: targets)
if positional_encoding is not None:
targets += positional_encoding[:, i:i + 1]
return targets
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(decode_length))
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
decode_length)
# Create tensors for encoder-decoder attention history
att_cache = {"attention_history": {}}
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
if encoder_output is not None:
att_batch_size, enc_seq_length = common_layers.shape_list(
encoder_output)[0:2]
for layer in range(num_layers):
att_cache["attention_history"]["layer_%d" % layer] = tf.zeros(
[att_batch_size, hparams.num_heads, 0, enc_seq_length])
def update_decoder_attention_history(cache):
"""Save attention weights in cache, e.g., for vizualization."""
for k in [x for x in self.attention_weights
if "decoder" in x and "self" not in x and "logits" not in x]:
idx = k.find("layer_")
if idx < 0:
continue
# Get layer number from the string name.
layer_nbr = k[idx + 6:]
idx = 0
while idx + 1 < len(layer_nbr) and layer_nbr[:idx + 1].isdigit():
idx += 1
layer_nbr = "layer_%d" % int(layer_nbr[:idx])
if layer_nbr in cache["attention_history"]:
cache["attention_history"][layer_nbr] = tf.concat(
[cache["attention_history"][layer_nbr],
self.attention_weights[k]],
axis=2)
def symbols_to_logits_fn(ids, i, cache):
"""Go from ids to logits for next symbol."""
ids = ids[:, -1:]
targets = tf.expand_dims(tf.expand_dims(ids, axis=2), axis=3)
targets = preprocess_targets(targets, i)
bias = decoder_self_attention_bias[:, :, i:i + 1, :i + 1]
with tf.variable_scope("body"):
body_outputs = dp(
self.decode,
targets,
cache.get("encoder_output"),
cache.get("encoder_decoder_attention_bias"),
bias,
hparams,
cache,
nonpadding=features_to_nonpadding(features, "targets"))
update_decoder_attention_history(cache)
modality_name = hparams.name.get(
"targets",
modalities.get_name(target_modality))(hparams, target_vocab_size)
with tf.variable_scope(modality_name):
top = hparams.top.get("targets", modalities.get_top(target_modality))
logits = dp(top, body_outputs, None, hparams, target_vocab_size)[0]
ret = tf.squeeze(logits, axis=[1, 2, 3])
if partial_targets is not None:
# If the position is within the given partial targets, we alter the
# logits to always return those values.
# A faster approach would be to process the partial targets in one
# iteration in order to fill the corresponding parts of the cache.
# This would require broader changes, though.
vocab_size = tf.shape(ret)[1]
def forced_logits():
return tf.one_hot(
tf.tile(partial_targets[:, i], [beam_size]), vocab_size, 0.0,
-1e9)
ret = tf.cond(
tf.less(i, partial_targets_length), forced_logits, lambda: ret)
return ret, cache
sos_id = self.get_decode_start_id() or 0
eos_id = self.get_decode_end_id() or beam_search.EOS_ID
ret = fast_decode(
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
symbols_to_logits_fn=symbols_to_logits_fn,
hparams=hparams,
decode_length=decode_length,
vocab_size=target_vocab_size,
init_cache_fn=self._init_cache_fn,
beam_size=beam_size,
top_beams=top_beams,
alpha=alpha,
batch_size=batch_size,
force_decode_length=self._decode_hparams.force_decode_length,
sos_id=sos_id,
eos_id=eos_id,
cache=att_cache)
if partial_targets is not None:
if beam_size <= 1 or top_beams <= 1:
ret["outputs"] = ret["outputs"][:, partial_targets_length:]
else:
ret["outputs"] = ret["outputs"][:, :, partial_targets_length:]
return ret
def _init_transformer_cache(cache, hparams, batch_size, attention_init_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix):
"""Create the initial cache for Transformer fast decoding."""
key_channels = hparams.attention_key_channels or hparams.hidden_size
value_channels = hparams.attention_value_channels or hparams.hidden_size
num_layers = hparams.num_decoder_layers or hparams.num_hidden_layers
vars_3d_num_heads = (
hparams.num_heads if hparams.get("attention_variables_3d") else 0)
if cache is None:
cache = {}
cache.update({
"layer_%d" % layer: { # pylint: disable=g-complex-comprehension
"k":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
key_channels]), hparams.num_heads),
"v":
common_attention.split_heads(
tf.zeros([batch_size,
attention_init_length,
value_channels]), hparams.num_heads),
} for layer in range(num_layers)
})
# If `ffn_layer` is in `["dense_relu_dense" or "conv_hidden_relu"]`, then the
# cache key "f" won't be used, which means that the` shape of cache["f"]`
# won't be changed to
# `[beamsize*batch_size, decode_length, hparams.hidden_size]` and may cause
# error when applying `nest.map reshape function` on it.
if hparams.ffn_layer not in ["dense_relu_dense", "conv_hidden_relu"]:
for layer in range(num_layers):
cache["layer_%d" % layer]["f"] = tf.zeros(
[batch_size, 0, hparams.hidden_size])
if encoder_output is not None:
for layer in range(num_layers):
layer_name = "layer_%d" % layer
with tf.variable_scope(
"%sdecoder/%s/encdec_attention/multihead_attention" %
(scope_prefix, layer_name)):
k_encdec = common_attention.compute_attention_component(
encoder_output,
key_channels,
name="k",
vars_3d_num_heads=vars_3d_num_heads)
k_encdec = common_attention.split_heads(k_encdec, hparams.num_heads)
v_encdec = common_attention.compute_attention_component(
encoder_output,
value_channels,
name="v",
vars_3d_num_heads=vars_3d_num_heads)
v_encdec = common_attention.split_heads(v_encdec, hparams.num_heads)
cache[layer_name]["k_encdec"] = k_encdec
cache[layer_name]["v_encdec"] = v_encdec
cache["encoder_output"] = encoder_output
cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias
return cache
def fast_decode_tpu(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
use_top_k_with_unique=True):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding for TPU, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: A tensor, output from encoder.
encoder_decoder_attention_bias: A tensor, bias for use in encoder-decoder
attention.
symbols_to_logits_fn: Incremental decoding, function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: Run hyperparameters.
decode_length: An integer, how many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: An integer, number of beams.
top_beams: An integer, how many of the beams to return.
alpha: A float that controls the length penalty. Larger the alpha, stronger
the preference for longer translations.
sos_id: Start-of-sequence symbol.
eos_id: End-of-sequence symbol.
batch_size: An integer, must be passed if there is no input.
force_decode_length: A bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
use_top_k_with_unique: bool, whether to use a fast (but decreased precision)
top_k during beam search.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}.
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(None, hparams, batch_size, decode_length,
encoder_output, encoder_decoder_attention_bias,
scope_prefix)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH,
value={
"vocab_size": vocab_size,
"batch_size": batch_size,
"beam_size": beam_size,
"alpha": alpha,
"max_decode_length": decode_length
},
hparams=hparams)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, _ = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1),
use_tpu=True,
use_top_k_with_unique=use_top_k_with_unique)
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(
log_probs, log_prob_indices) * (1 - tf.to_float(hit_eos))
# Note(thangluong): we purposely update hit_eos after aggregating log_prob
# There is a subtle detail here that we want to include log_probs up to
# (and inclusive of) the first eos generated, but not subsequent tokens.
hit_eos |= tf.equal(next_id, eos_id)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.transpose(decoded_ids)
decoded_ids = inplace_ops.alias_inplace_update(
decoded_ids, i, tf.squeeze(next_id, axis=1))
decoded_ids = tf.transpose(decoded_ids)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, decode_length], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
def compute_cache_shape_invariants(tensor):
return tf.TensorShape(tensor.shape.as_list())
_, _, _, decoded_ids, _, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([batch_size]),
tf.TensorShape([batch_size, 1]),
tf.TensorShape([batch_size, decode_length]),
nest.map_structure(compute_cache_shape_invariants, cache),
tf.TensorShape([batch_size]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores}
def fast_decode(encoder_output,
encoder_decoder_attention_bias,
symbols_to_logits_fn,
hparams,
decode_length,
vocab_size,
init_cache_fn=_init_transformer_cache,
beam_size=1,
top_beams=1,
alpha=1.0,
sos_id=0,
eos_id=beam_search.EOS_ID,
batch_size=None,
force_decode_length=False,
scope_prefix="body/",
cache=None):
"""Given encoder output and a symbols to logits function, does fast decoding.
Implements both greedy and beam search decoding, uses beam search iff
beam_size > 1, otherwise beam search related arguments are ignored.
Args:
encoder_output: Output from encoder.
encoder_decoder_attention_bias: a bias tensor for use in encoder-decoder
attention
symbols_to_logits_fn: Incremental decoding; function mapping triple `(ids,
step, cache)` to symbol logits.
hparams: run hyperparameters
decode_length: an integer. How many additional timesteps to decode.
vocab_size: Output vocabulary size.
init_cache_fn: Function that returns the initial cache dict.
beam_size: number of beams.
top_beams: an integer. How many of the beams to return.
alpha: Float that controls the length penalty. larger the alpha, stronger
the preference for longer translations.
sos_id: End-of-sequence symbol in beam search.
eos_id: End-of-sequence symbol in beam search.
batch_size: an integer scalar - must be passed if there is no input
force_decode_length: bool, whether to force the full decode length, or if
False, stop when all beams hit eos_id.
scope_prefix: str, prefix for decoder layer variable scopes.
cache: cache dictionary for additional predictions.
Returns:
A dict of decoding results {
"outputs": integer `Tensor` of decoded ids of shape
[batch_size, <= decode_length] if top_beams == 1 or
[batch_size, top_beams, <= decode_length] otherwise
"scores": decoding log probs from the beam search,
None if using greedy decoding (beam_size=1)
}
Raises:
NotImplementedError: If beam size > 1 with partial targets.
"""
if encoder_output is not None:
batch_size = common_layers.shape_list(encoder_output)[0]
cache = init_cache_fn(
cache=cache,
hparams=hparams,
batch_size=batch_size,
attention_init_length=0,
encoder_output=encoder_output,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
scope_prefix=scope_prefix)
if beam_size > 1: # Beam Search
initial_ids = sos_id * tf.ones([batch_size], dtype=tf.int32)
decoded_ids, scores, cache = beam_search.beam_search(
symbols_to_logits_fn,
initial_ids,
beam_size,
decode_length,
vocab_size,
alpha,
states=cache,
eos_id=eos_id,
stop_early=(top_beams == 1))
if top_beams == 1:
decoded_ids = decoded_ids[:, 0, 1:]
scores = scores[:, 0]
else:
decoded_ids = decoded_ids[:, :top_beams, 1:]
scores = scores[:, :top_beams]
else: # Greedy
def inner_loop(i, hit_eos, next_id, decoded_ids, cache, log_prob):
"""One step of greedy decoding."""
logits, cache = symbols_to_logits_fn(next_id, i, cache)
log_probs = common_layers.log_prob_from_logits(logits)
temperature = getattr(hparams, "sampling_temp", 0.0)
keep_top = getattr(hparams, "sampling_keep_top_k", -1)
if hparams.sampling_method == "argmax":
temperature = 0.0
next_id = common_layers.sample_with_temperature(
logits, temperature, keep_top)
log_prob_indices = tf.stack([tf.range(tf.to_int64(batch_size)), next_id],
axis=1)
log_prob += tf.gather_nd(
log_probs, log_prob_indices) * (1 - tf.to_float(hit_eos))
# Note(thangluong): we purposely update hit_eos after aggregating log_prob
# There is a subtle detail here that we want to include log_probs up to
# (and inclusive of) the first eos generated, but not subsequent tokens.
hit_eos |= tf.equal(next_id, eos_id)
next_id = tf.expand_dims(next_id, axis=1)
decoded_ids = tf.concat([decoded_ids, next_id], axis=1)
return i + 1, hit_eos, next_id, decoded_ids, cache, log_prob
def is_not_finished(i, hit_eos, *_):
finished = i >= decode_length
if not force_decode_length:
finished |= tf.reduce_all(hit_eos)
return tf.logical_not(finished)
decoded_ids = tf.zeros([batch_size, 0], dtype=tf.int64)
hit_eos = tf.fill([batch_size], False)
next_id = sos_id * tf.ones([batch_size, 1], dtype=tf.int64)
initial_log_prob = tf.zeros([batch_size], dtype=tf.float32)
_, _, _, decoded_ids, cache, log_prob = tf.while_loop(
is_not_finished,
inner_loop, [
tf.constant(0), hit_eos, next_id, decoded_ids, cache,
initial_log_prob
],
shape_invariants=[
tf.TensorShape([]),
tf.TensorShape([None]),
tf.TensorShape([None, None]),
tf.TensorShape([None, None]),
nest.map_structure(beam_search.get_state_shape_invariants, cache),
tf.TensorShape([None]),
])
scores = log_prob
return {"outputs": decoded_ids, "scores": scores, "cache": cache}
@registry.register_model
class TransformerScorer(Transformer):
"""Transformer model, but only scores in PREDICT mode.
Checkpoints between Transformer and TransformerScorer are interchangeable.
"""
def __init__(self, *args, **kwargs):
super(TransformerScorer, self).__init__(*args, **kwargs)
self._name = "transformer"
self._base_name = "transformer"
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""Returns the targets and their log probabilities."""
del decode_length, beam_size, top_beams, alpha, use_tpu
assert features is not None
# Run the model
self.hparams.force_full_predict = True
with tf.variable_scope(self.name):
logits, _ = self.model_fn(features)
assert len(logits.shape) == 5 # [batch, time, 1, 1, vocab]
logits = tf.squeeze(logits, [2, 3])
# Compute the log probabilities
log_probs = common_layers.log_prob_from_logits(logits)
targets = features["targets"]
assert len(targets.shape) == 4 # [batch, time, 1, 1]
targets = tf.squeeze(targets, [2, 3])
# Slice out the log_probs of the targets
log_probs = common_layers.index_last_dim_with_indices(log_probs, targets)
# Sum over time to get the log_prob of the sequence
scores = tf.reduce_sum(log_probs, axis=1)
return {"outputs": targets, "scores": scores}
@registry.register_model
class TransformerEncoder(t2t_model.T2TModel):
"""Transformer, encoder only."""
def body(self, features):
hparams = self._hparams
inputs = features["inputs"]
target_space = features["target_space_id"]
inputs = common_layers.flatten4d3d(inputs)
(encoder_input, encoder_self_attention_bias, _) = (
transformer_prepare_encoder(inputs, target_space, hparams))
encoder_input = tf.nn.dropout(encoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
encoder_output = transformer_encoder(
encoder_input,
encoder_self_attention_bias,
hparams,
nonpadding=features_to_nonpadding(features, "inputs"))
encoder_output = tf.expand_dims(encoder_output, 2)
return encoder_output
@registry.register_model
class TransformerRegressor(TransformerEncoder):
"""Transformer inheriting from Encoder, for the regression problem.
Final result is a tensor that has a shape of (?, 1, 1, 1).
"""
def top(self, body_output, features):
"""Computes single scalar value from body_output."""
with tf.variable_scope("reg_top_ffn"):
x = body_output
x = tf.reduce_mean(x, axis=[1, 2], keepdims=True)
res = tf.layers.dense(x, 1, name="model_top")
return res
def features_to_nonpadding(features, inputs_or_targets="inputs"):
key = inputs_or_targets + "_segmentation"
if features and key in features:
return tf.minimum(tf.to_float(features[key]), 1.0)
return None
def transformer_prepare_decoder(targets, hparams, features=None, pad=None):
"""Prepare one shard of the model for the decoder.
Args:
targets: a Tensor.
hparams: run hyperparameters
features: optionally pass the entire features dictionary as well. This is
needed now for "packed" datasets.
pad: vector to use for padding when shifting targets right
Returns:
decoder_input: a Tensor, bottom of decoder stack
decoder_self_attention_bias: a bias tensor for use in decoder self-attention
"""
if hparams.causal_decoder_self_attention:
# Causal attention.
if hparams.prepend_mode == "prepend_inputs_full_attention":
decoder_self_attention_bias = (
common_attention.attention_bias_prepend_inputs_full_attention(
common_attention.embedding_to_padding(targets)))
else:
decoder_self_attention_bias = (
common_attention.attention_bias_lower_triangle(
common_layers.shape_list(targets)[1]))
else:
# Full attention.
decoder_padding = common_attention.embedding_to_padding(targets)
decoder_self_attention_bias = (
common_attention.attention_bias_ignore_padding(decoder_padding))
if features and "targets_segmentation" in features:
# "Packed" dataset - keep the examples from seeing each other.
targets_segmentation = features["targets_segmentation"]
targets_position = features["targets_position"]
decoder_self_attention_bias += common_attention.attention_bias_same_segment(
targets_segmentation, targets_segmentation)
else:
targets_position = None
if hparams.proximity_bias:
decoder_self_attention_bias += common_attention.attention_bias_proximal(
common_layers.shape_list(targets)[1])
decoder_input = common_layers.shift_right_3d(targets, pad)
if hparams.pos == "timing":
if targets_position is not None:
decoder_input = common_attention.add_timing_signal_1d_given_position(
decoder_input, targets_position)
else:
decoder_input = common_attention.add_timing_signal_1d(decoder_input)
elif hparams.pos == "emb":
decoder_input = common_attention.add_positional_embedding(
decoder_input, hparams.max_length, "targets_positional_embedding",
targets_position)
if hparams.activation_dtype == "bfloat16":
decoder_self_attention_bias = tf.cast(decoder_self_attention_bias,
tf.bfloat16)
return (decoder_input, decoder_self_attention_bias)
def transformer_decoder_layer(decoder_input,
decoder_self_attention_bias,
layer_idx,
hparams,
encoder_output=None,
encoder_decoder_attention_bias=None,
cache=None,
decode_loop_step=None,
nonpadding=None,
save_weights_to=None,
make_image_summary=False,
losses=None,
layer_collection=None,
recurrent_memory_by_layer=None,
chunk_number=None):
"""A single transformer decoder layer."""
x = decoder_input
layer = layer_idx
layer_name = "layer_%d" % layer
layer_cache = cache[layer_name] if cache is not None else None
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
if recurrent_memory_by_layer is not None:
recurrent_memory = recurrent_memory_by_layer[layer_name]
else:
recurrent_memory = None
if layer < hparams.get("num_area_layers", 0):
max_area_width = hparams.get("max_area_width", 1)
max_area_height = hparams.get("max_area_height", 1)
memory_height = hparams.get("max_area_height", 1)
else:
max_area_width = 1
max_area_height = 1
memory_height = 1
with tf.variable_scope(layer_name):
with tf.variable_scope("self_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
None,
decoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
decode_loop_step=decode_loop_step,
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
recurrent_memory=recurrent_memory,
chunk_number=chunk_number,
hard_attention_k=hparams.get("hard_attention_k", 0),
gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get(
"mode",
tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
if encoder_output is not None:
with tf.variable_scope("encdec_attention"):
y = common_attention.multihead_attention(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
encoder_output,
encoder_decoder_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
cache=layer_cache,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
activation_dtype=hparams.get("activation_dtype", "float32"),
weight_dtype=hparams.get("weight_dtype", "float32"),
layer_collection=layer_collection,
hard_attention_k=hparams.get("hard_attention_k", 0),
gumbel_noise_weight=hparams.get("gumbel_noise_weight", 0.0),
max_area_width=max_area_width,
max_area_height=max_area_height,
memory_height=memory_height,
area_key_mode=hparams.get("area_key_mode", "none"),
area_value_mode=hparams.get("area_value_mode", "none"),
training=(hparams.get(
"mode",
tf.estimator.ModeKeys.TRAIN) == tf.estimator.ModeKeys.TRAIN))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection),
hparams,
conv_padding="LEFT",
nonpadding_mask=nonpadding,
losses=losses,
cache=layer_cache,
decode_loop_step=decode_loop_step,
layer_collection=layer_collection)
x = common_layers.layer_postprocess(x, y, hparams)
return x
def transformer_decoder(decoder_input,
encoder_output,
decoder_self_attention_bias,
encoder_decoder_attention_bias,
hparams,
cache=None,
decode_loop_step=None,
name="decoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True,
losses=None,
layer_collection=None,
recurrent_memory_by_layer=None,
chunk_number=None):
"""A stack of transformer layers.
Args:
decoder_input: a Tensor
encoder_output: a Tensor
decoder_self_attention_bias: bias Tensor for self-attention (see
common_attention.attention_bias())
encoder_decoder_attention_bias: bias Tensor for encoder-decoder attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
cache: dict, containing tensors which are the results of previous
attentions, used for fast decoding.
decode_loop_step: An integer, step number of the decoding loop. Only used
for inference on TPU.
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This is used to mask out
padding in convolutional layers. We generally only need this mask for
"packed" datasets, because for ordinary datasets, no padding is ever
followed by nonpadding.
save_weights_to: an optional dictionary to capture attention weights for
visualization; the weights tensor will be appended there under a string
key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
losses: optional list onto which to append extra training losses
layer_collection: A tensorflow_kfac.LayerCollection. Only used by the KFAC
optimizer. Default is None.
recurrent_memory_by_layer: Optional dict, mapping layer names to instances
of transformer_memory.RecurrentMemory. Default is None.
chunk_number: an optional integer Tensor with shape [batch] used to operate
the recurrent_memory.
Returns:
y: a Tensors
"""
x = decoder_input
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_decoder_layers or hparams.num_hidden_layers,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout,
hparams=hparams)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
},
hparams=hparams)
with tf.variable_scope(name):
for layer_idx in range(hparams.num_decoder_layers or
hparams.num_hidden_layers):
x = transformer_decoder_layer(
x,
decoder_self_attention_bias,
layer_idx,
hparams,
encoder_decoder_attention_bias=encoder_decoder_attention_bias,
encoder_output=encoder_output,
cache=cache,
decode_loop_step=decode_loop_step,
nonpadding=nonpadding,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
losses=losses,
layer_collection=layer_collection,
recurrent_memory_by_layer=recurrent_memory_by_layer,
chunk_number=chunk_number
)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(
x, hparams, layer_collection=layer_collection)
@registry.register_model
class TransformerMemory(Transformer):
"""Transformer language model with memory across chunks."""
# TODO(kitaev): consider overriding set_mode to swap out recurrent memory when
# switching between training and evaluation.
def __init__(self, *args, **kwargs):
super(TransformerMemory, self).__init__(*args, **kwargs)
hparams = self._hparams
self.recurrent_memory_by_layer = {}
for layer in range(hparams.num_decoder_layers or hparams.num_hidden_layers):
layer_name = "layer_%d" % layer
if hparams.memory_type == "neural_memory":
memory = transformer_memory.TransformerMemory(
batch_size=int(hparams.batch_size / hparams.max_length),
key_depth=hparams.hidden_size,
val_depth=hparams.hidden_size,
memory_size=hparams.split_targets_chunk_length,
sharpen_factor=1.,
name=layer_name + "/recurrent_memory")
elif hparams.memory_type == "transformer_xl":
memory = transformer_memory.RecentTokensMemory(
layer_name + "/recurrent_memory", hparams)
else:
raise ValueError("Unsupported memory type: %s" % hparams.memory_type)
self.recurrent_memory_by_layer[layer_name] = memory
@property
def has_input(self):
if hasattr(self._hparams, "unconditional") and self._hparams.unconditional:
return False
return super(TransformerMemory, self).has_input
def _beam_decode(self, features, decode_length, beam_size, top_beams, alpha,
use_tpu=False):
"""Overriding beam search because for now only the slow version works with
memory
"""
return self._beam_decode_slow(features, decode_length, beam_size,
top_beams, alpha, use_tpu)
@registry.register_hparams
def transformer_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 256
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.1
hparams.shared_embedding_and_softmax_weights = True
hparams.symbol_modality_num_shards = 16
# Add new ones like this.
hparams.add_hparam("filter_size", 2048)
# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("attention_dropout_broadcast_dims", "")
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("relu_dropout_broadcast_dims", "")
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams.add_hparam("use_pad_remover", True)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("conv_first_kernel", 3)
hparams.add_hparam("attention_variables_3d", False)
hparams.add_hparam("use_target_space_embedding", True)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
# If specified, use this value instead of problem name in metrics.py.
# This is useful for programs that can automatically compare experiments side
# by side based on the same metric names.
hparams.add_hparam("overload_eval_metric_name", "")
# For making a transformer encoder unidirectional by using masked
# attention.
hparams.add_hparam("unidirectional_encoder", False)
# For hard attention.
hparams.add_hparam("hard_attention_k", 0)
hparams.add_hparam("gumbel_noise_weight", 0.0)
return hparams
@registry.register_hparams
def transformer_base_v2():
"""Set of hyperparameters."""
hparams = transformer_base_v1()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
return hparams
@registry.register_hparams
def transformer_base_vq_ada_32ex_packed():
"""Set of hyperparameters for lm1b packed following tpu params."""
hparams = transformer_base_v2()
expert_utils.update_hparams_for_vq_gating(hparams)
hparams.moe_num_experts = 32
hparams.gating_type = "vq"
# this gives us a batch size of 16 because each seq is len 256
hparams.batch_size = 5072
hparams.ffn_layer = "local_moe"
hparams.shared_embedding_and_softmax_weights = False
hparams.learning_rate_warmup_steps = 10000
# one epoch for languagemodel_lm1b32k_packed = 27200 steps w/ bsize 128
hparams.learning_rate_decay_steps = 27200
hparams.num_heads = 4
hparams.num_blocks = 1
hparams.moe_k = 1
hparams.num_decoder_layers = 6
hparams.label_smoothing = 0.
hparams.layer_prepostprocess_dropout = 0.1
hparams.layer_postprocess_sequence = "dan"
hparams.layer_preprocess_sequence = "none"
hparams.weight_decay = 1e-06
hparams.attention_dropout = 0.1
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "linear_warmup*rsqrt_decay*linear_decay"
hparams.activation_dtype = "float32"
hparams.learning_rate = 0.1
hparams.learning_rate_constant = 1.0
return hparams
@registry.register_hparams
def transformer_topk_16_packed():
hparams = transformer_base_vq_ada_32ex_packed()
hparams.gating_type = "topk"
hparams.moe_num_experts = 16
hparams.moe_k = 2
return hparams
@registry.register_hparams
def transformer_base_vq1_16_nb1_packed_nda_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.ema = False
return hparams
@registry.register_hparams
def transformer_base_vq1_16_nb1_packed_dan_b01_scales():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.use_scales = int(True)
hparams.moe_num_experts = 16
hparams.moe_k = 1
hparams.beta = 0.1
hparams.ema = False
return hparams
@registry.register_hparams
def transformer_base_vq1_16_nb1_packed_nda_b01_scales_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq1_16_nb1_packed_nda_b01_scales()
hparams.batch_size = 2048
hparams.max_length = 1024
hparams.filter_size = 3072
return hparams
@registry.register_hparams
def transformer_ada_lmpackedbase():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.ffn_layer = "dense_relu_dense"
return hparams
@registry.register_hparams
def transformer_ada_lmpackedbase_dialog():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.max_length = 1024
hparams.ffn_layer = "dense_relu_dense"
hparams.batch_size = 4096
return hparams
@registry.register_hparams
def transformer_ada_lmpackedbase_relative():
"""Set of hyperparameters."""
hparams = transformer_base_vq_ada_32ex_packed()
hparams.ffn_layer = "dense_relu_dense"
return hparams
@registry.register_hparams
def transformer_base_v3():
"""Base parameters for Transformer model."""
# Update parameters here, then occasionally cut a versioned set, e.g.
# transformer_base_v2.
hparams = transformer_base_v2()
hparams.optimizer_adam_beta2 = 0.997
# New way of specifying learning rate schedule.
# Equivalent to previous version.
hparams.learning_rate_schedule = (
"constant*linear_warmup*rsqrt_decay*rsqrt_hidden_size")
hparams.learning_rate_constant = 2.0
return hparams
@registry.register_hparams
def transformer_base():
"""Base parameters for Transformer model."""
hparams = transformer_base_v3()
return hparams
@registry.register_hparams
def transformer_big():
"""HParams for transformer big model on WMT."""
hparams = transformer_base()
hparams.hidden_size = 1024
hparams.filter_size = 4096
# Reduce batch size to 2048 from 4096 to be able to train the model on a GPU
# with 12 GB memory. For example, NVIDIA TITAN V GPU.
hparams.batch_size = 2048
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.3
return hparams
@registry.register_hparams
def transformer_tall():
"""Hparams for transformer on LM for pretraining/finetuning/mixing."""
hparams = transformer_base()
hparams.batch_size = 2048
hparams.hidden_size = 768
hparams.filter_size = 3072
hparams.num_hidden_layers = 12
hparams.num_heads = 12
hparams.label_smoothing = 0.0
hparams.max_length = 1024
hparams.eval_drop_long_sequences = True
hparams.multiproblem_mixing_schedule = "pretrain"
hparams.multiproblem_vocab_size = 65536
hparams.clip_grad_norm = 1.0
return hparams
@registry.register_hparams
def transformer_tall_finetune_tied():
"""Tied means fine-tune CNN/DM summarization as LM."""
hparams = transformer_tall()
hparams.multiproblem_max_input_length = 750
hparams.multiproblem_max_target_length = 100
hparams.multiproblem_schedule_max_examples = 0
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 80000
hparams.multiproblem_target_eval_only = True
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 1.0
hparams.optimizer = "true_adam"
return hparams
@registry.register_hparams
def transformer_tall_train_tied():
"""Tied means train CNN/DM summarization as LM."""
hparams = transformer_tall()
hparams.multiproblem_max_input_length = 750
hparams.multiproblem_max_target_length = 100
hparams.multiproblem_schedule_max_examples = 0
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_warmup_steps = 8000
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 150000
hparams.multiproblem_target_eval_only = True
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 1.0
hparams.optimizer = "true_adam"
return hparams
@registry.register_hparams
def transformer_tall_finetune_uniencdec():
"""Fine-tune CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 80000
hparams.learning_rate_constant = 5e-5
hparams.learning_rate_warmup_steps = 100
hparams.unidirectional_encoder = True
return hparams
@registry.register_hparams
def transformer_tall_train_uniencdec():
"""Train CNN/DM with a unidirectional encoder and decoder."""
hparams = transformer_tall()
hparams.max_input_seq_length = 750
hparams.max_target_seq_length = 100
hparams.optimizer = "true_adam"
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.learning_rate_decay_steps = 150000
hparams.learning_rate_constant = 2e-4
hparams.unidirectional_encoder = True
return hparams
@registry.register_hparams
def transformer_tall_finetune_textclass():
"""Hparams for transformer on LM for finetuning on text class problems."""
hparams = transformer_tall()
hparams.learning_rate_constant = 6.25e-5
hparams.learning_rate_schedule = ("linear_warmup*constant*linear_decay")
hparams.multiproblem_schedule_max_examples = 0
hparams.multiproblem_target_eval_only = True
hparams.learning_rate_warmup_steps = 50
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 25000
hparams.multiproblem_reweight_label_loss = True
hparams.multiproblem_label_weight = 0.95
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm():
"""Hparams for transformer on LM pretraining (with 64k vocab)."""
hparams = transformer_tall()
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup*constant*cosdecay")
hparams.optimizer = "adam_w"
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.999
hparams.optimizer_adam_epsilon = 1e-8
# Set max examples to something big when pretraining only the LM, definitely
# something an order of magnitude bigger than number of train steps.
hparams.multiproblem_schedule_max_examples = 5e8
# Set train steps to learning_rate_decay_steps or less
hparams.learning_rate_decay_steps = 5000000
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm_tpu_adafactor():
"""Hparams for transformer on LM pretraining (with 64k vocab) on TPU."""
hparams = transformer_tall_pretrain_lm()
update_hparams_for_tpu(hparams)
hparams.max_length = 1024
# For multi-problem on TPU we need it in absolute examples.
hparams.batch_size = 8
hparams.multiproblem_vocab_size = 2**16
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm_tpu_adafactor_large():
"""Hparams for transformer on LM pretraining on TPU, large model."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
hparams.hidden_size = 1024
hparams.num_heads = 16
hparams.filter_size = 32768 # max fitting in 16G memory is 49152, batch 2
hparams.batch_size = 4
hparams.multiproblem_mixing_schedule = "constant"
# Task order: lm/en-de/en-fr/en-ro/de-en/fr-en/ro-en/cnndm/mnli/squad.
hparams.multiproblem_per_task_threshold = "320,80,160,1,80,160,2,20,10,5"
return hparams
@registry.register_hparams
def transformer_tall_pretrain_lm_tpu():
"""Hparams for transformer on LM pretraining on TPU with AdamW."""
hparams = transformer_tall_pretrain_lm_tpu_adafactor()
# Optimizer gets reset in update_hparams_for_tpu so we set it again here.
hparams.learning_rate_constant = 2e-4
hparams.learning_rate_schedule = ("linear_warmup * constant * cosdecay")
hparams.optimizer = "adam_w"
return hparams
@registry.register_hparams
def transformer_tall_big():
"""Hparams for transformer on LM+MNLI."""
hparams = transformer_tall()
hparams.num_hidden_layers = 18
return hparams
@registry.register_hparams
def transformer_big_single_gpu():
"""HParams for transformer big model for single GPU."""
hparams = transformer_big()
hparams.layer_prepostprocess_dropout = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams
@registry.register_hparams
def transformer_base_single_gpu():
"""HParams for transformer base model for single GPU."""
hparams = transformer_base()
hparams.batch_size = 1024
hparams.learning_rate_schedule = "constant*linear_warmup*rsqrt_decay"
hparams.learning_rate_constant = 0.1
hparams.learning_rate_warmup_steps = 16000
return hparams
@registry.register_hparams
def transformer_base_multistep8():
"""HParams for simulating 8 GPUs with MultistepAdam optimizer."""
hparams = transformer_base()
hparams.optimizer = "multistep_adam"
hparams.optimizer_multistep_accumulate_steps = 8
return hparams
@registry.register_hparams
def transformer_parsing_base():
"""HParams for parsing on WSJ only."""
hparams = transformer_base()
hparams.attention_dropout = 0.2
hparams.layer_prepostprocess_dropout = 0.2
hparams.max_length = 512
hparams.learning_rate_warmup_steps = 16000
hparams.hidden_size = 1024
hparams.learning_rate = 0.05
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def transformer_parsing_big():
"""HParams for parsing on WSJ semi-supervised."""
hparams = transformer_big()
hparams.max_length = 512
hparams.shared_source_target_embedding = False
hparams.learning_rate_warmup_steps = 4000
hparams.layer_prepostprocess_dropout = 0.1
hparams.batch_size = 2048
hparams.learning_rate = 0.05
return hparams
@registry.register_hparams
def transformer_parsing_ice():
"""HParams for parsing and tagging Icelandic text."""
hparams = transformer_base_single_gpu()
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def transformer_tiny():
hparams = transformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.filter_size = 512
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_test():
hparams = transformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 16
hparams.filter_size = 8
hparams.num_heads = 2
return hparams
@registry.register_hparams
def transformer_small():
hparams = transformer_base()
hparams.num_hidden_layers = 2
hparams.hidden_size = 256
hparams.filter_size = 1024
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_l2():
hparams = transformer_base()
hparams.num_hidden_layers = 2
return hparams
@registry.register_hparams
def transformer_l4():
hparams = transformer_base()
hparams.num_hidden_layers = 4
return hparams
@registry.register_hparams
def transformer_l8():
hparams = transformer_base()
hparams.num_hidden_layers = 8
return hparams
@registry.register_hparams
def transformer_l10():
hparams = transformer_base()
hparams.num_hidden_layers = 10
return hparams
@registry.register_hparams
def transformer_h1():
hparams = transformer_base()
hparams.num_heads = 1
return hparams
@registry.register_hparams
def transformer_h4():
hparams = transformer_base()
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_h16():
hparams = transformer_base()
hparams.num_heads = 16
return hparams
@registry.register_hparams
def transformer_h32():
hparams = transformer_base()
hparams.num_heads = 32
return hparams
@registry.register_hparams
def transformer_k128():
hparams = transformer_base()
hparams.attention_key_channels = 128
return hparams
@registry.register_hparams
def transformer_k256():
hparams = transformer_base()
hparams.attention_key_channels = 256
return hparams
@registry.register_hparams
def transformer_ff1024():
hparams = transformer_base()
hparams.filter_size = 1024
return hparams
@registry.register_hparams
def transformer_ff4096():
hparams = transformer_base()
hparams.filter_size = 4096
return hparams
@registry.register_hparams
def transformer_dr0():
hparams = transformer_base()
hparams.layer_prepostprocess_dropout = 0.0
return hparams
@registry.register_hparams
def transformer_dr2():
hparams = transformer_base()
hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def transformer_ls0():
hparams = transformer_base()
hparams.label_smoothing = 0.0
return hparams
@registry.register_hparams
def transformer_ls2():
hparams = transformer_base()
hparams.label_smoothing = 0.2
return hparams
@registry.register_hparams
def transformer_hs256():
hparams = transformer_base()
hparams.hidden_size = 256
return hparams
@registry.register_hparams
def transformer_hs1024():
hparams = transformer_base()
hparams.hidden_size = 1024
return hparams
@registry.register_hparams
def transformer_big_dr1():
hparams = transformer_base()
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def transformer_big_enfr():
hparams = transformer_big_dr1()
hparams.shared_embedding_and_softmax_weights = False
hparams.filter_size = 8192
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def transformer_big_enfr_tpu():
hparams = transformer_big_enfr()
# For performance, use fewer heads so that matrix dimensions are at least 128
hparams.num_heads = 8
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_big_dr2():
hparams = transformer_big_dr1()
hparams.layer_prepostprocess_dropout = 0.2
return hparams
@registry.register_hparams
def transformer_parameter_attention_a():
hparams = transformer_base()
hparams.ffn_layer = "parameter_attention"
hparams.filter_size = 1536
return hparams
@registry.register_hparams
def transformer_parameter_attention_b():
hparams = transformer_base()
hparams.ffn_layer = "parameter_attention"
hparams.filter_size = 512
hparams.parameter_attention_key_channels = 1024
hparams.parameter_attention_value_channels = 1024
hparams.num_heads = 16
return hparams
@registry.register_hparams
def transformer_prepend_v2():
hparams = transformer_base_v2()
hparams.prepend_mode = "prepend_inputs_masked_attention"
hparams.max_length = 0
return hparams
@registry.register_hparams
def transformer_prepend_v1():
hparams = transformer_base_v1()
hparams.prepend_mode = "prepend_inputs_masked_attention"
hparams.max_length = 0
return hparams
@registry.register_hparams
def transformer_prepend():
return transformer_prepend_v2()
@registry.register_ranged_hparams
def transformer_base_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 1e-4)
@registry.register_hparams
def transformer_relative():
"""Use relative position embeddings instead of absolute position encodings."""
hparams = transformer_base()
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 20
return hparams
@registry.register_hparams
def transformer_relative_tiny():
hparams = transformer_relative()
hparams.num_hidden_layers = 2
hparams.hidden_size = 128
hparams.filter_size = 512
hparams.num_heads = 4
return hparams
@registry.register_hparams
def transformer_relative_big():
hparams = transformer_big()
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 20
return hparams
@registry.register_hparams
def transformer_timeseries():
hparams = transformer_small()
hparams.batch_size = 256
hparams.learning_rate_warmup_steps = 2000
return hparams
@registry.register_hparams
def transformer_mlperf_tpu():
"""HParams for Transformer model on TPU for MLPerf on TPU 2x2."""
hparams = transformer_base_v3()
hparams.mlperf_mode = True
hparams.symbol_modality_num_shards = 1
hparams.max_length = 256 # ignored when using "_packed" problems
hparams.batch_size = 2048 # per-chip batch size matches the reference model
hparams.hidden_size = 1024
hparams.filter_size = 4096
hparams.num_heads = 16
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
def update_hparams_for_tpu(hparams):
"""Change hparams to be compatible with TPU training."""
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
# Adaptive batch sizes and sequence lengths are not supported on TPU.
# Instead, every batch has the same sequence length and the same batch size.
# Longer sequences are dropped and shorter ones are padded.
#
# It is therefore suggested to use a problem where examples have been combined
# to a longer length, e.g. the "_packed" problems.
#
# For problems with variable sequence lengths, this parameter controls the
# maximum sequence length. Shorter sequences are dropped and longer ones
# are padded.
#
# For problems with fixed sequence lengths - e.g. the "_packed" problems,
# this hyperparameter is ignored.
hparams.max_length = 64
# TPUs have less memory than GPUs, so decrease the batch size if it's too high
if hparams.batch_size > 2048:
hparams.batch_size = 2048
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
return hparams
@registry.register_hparams
def transformer_tpu():
"""HParams for Transformer model on TPU."""
hparams = transformer_base()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_timeseries_tpu():
"""HParams for running Transformer model on timeseries on TPU."""
hparams = transformer_timeseries()
update_hparams_for_tpu(hparams)
hparams.batch_size = 256 # revert to value set in transformer_timeseries
return hparams
@registry.register_hparams
def transformer_tpu_bf16_activation():
"""HParams for Transformer model with BF16 activation on TPU."""
hparams = transformer_tpu()
hparams.activation_dtype = "bfloat16"
return hparams
@registry.register_hparams
def transformer_fairseq_fp16_activation_big():
"""Hparams intended to mirror those used in arxiv.org/pdf/1806.00187.pdf."""
hparams = transformer_big()
hparams.activation_dtype = "float16"
hparams.batch_size = 3584
return hparams
@registry.register_hparams
def transformer_packed_tpu():
"""Deprecated alias for transformer_tpu()."""
return transformer_tpu()
@registry.register_hparams
def transformer_big_tpu():
hparams = transformer_big()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_tiny_tpu():
hparams = transformer_tiny()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_ranged_hparams
def transformer_tiny_tpu_range(rhp):
"""Small range of hyperparameters."""
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_float("weight_decay", 0.0, 2.0)
@registry.register_ranged_hparams
def transformer_tpu_range(rhp):
"""Small range of hyperparameters."""
# After starting from base, set intervals for some parameters.
rhp.set_float("learning_rate", 0.3, 3.0, scale=rhp.LOG_SCALE)
rhp.set_discrete("learning_rate_warmup_steps",
[1000, 2000, 4000, 8000, 16000])
rhp.set_float("initializer_gain", 0.5, 2.0)
rhp.set_float("optimizer_adam_beta1", 0.85, 0.95)
rhp.set_float("optimizer_adam_beta2", 0.97, 0.99)
rhp.set_float("weight_decay", 0.0, 2.0)
@registry.register_hparams
def transformer_small_tpu():
"""TPU-friendly version of transformer_small.
Returns:
an hparams object.
"""
hparams = transformer_small()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_clean():
"""No dropout, label smoothing, max_length."""
hparams = transformer_base_v2()
hparams.label_smoothing = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.attention_dropout = 0.0
hparams.relu_dropout = 0.0
hparams.max_length = 0
return hparams
@registry.register_hparams
def transformer_clean_big():
hparams = transformer_clean()
hparams.hidden_size = 1024
hparams.filter_size = 4096
return hparams
@registry.register_hparams
def transformer_clean_big_tpu():
hparams = transformer_clean_big()
update_hparams_for_tpu(hparams)
return hparams
@registry.register_hparams
def transformer_tpu_with_conv():
"""Cut down on the number of heads, and use convs instead."""
hparams = transformer_tpu()
hparams.num_heads = 4 # Heads are expensive on TPUs.
hparams.ffn_layer = "conv_relu_conv"
return hparams
@registry.register_hparams
def transformer_lm_tpu_0():
"""HParams for training languagemodel_lm1b8k on tpu. 92M Params."""
hparams = transformer_clean_big()
update_hparams_for_tpu(hparams)
hparams.num_heads = 4 # Heads are expensive on TPUs.
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.layer_prepostprocess_dropout = 0.1
return hparams
@registry.register_hparams
def transformer_lm_tpu_1():
"""HParams for training languagemodel_lm1b8k on tpu. 335M Params."""
hparams = transformer_lm_tpu_0()
hparams.hidden_size = 2048
hparams.filter_size = 8192
return hparams
@registry.register_hparams
def transformer_librispeech_v1():
"""HParams for training ASR model on LibriSpeech V1."""
hparams = transformer_base()
hparams.num_heads = 4
hparams.filter_size = 1024
hparams.hidden_size = 256
hparams.num_encoder_layers = 5
hparams.num_decoder_layers = 3
hparams.learning_rate = 0.15
hparams.batch_size = 6000000
librispeech.set_librispeech_length_hparams(hparams)
return hparams
@registry.register_hparams
def transformer_librispeech_v2():
"""HParams for training ASR model on LibriSpeech V2."""
hparams = transformer_base()
hparams.max_length = 1240000
hparams.max_input_seq_length = 1550
hparams.max_target_seq_length = 350
hparams.batch_size = 16
hparams.num_decoder_layers = 4
hparams.num_encoder_layers = 6
hparams.hidden_size = 384
hparams.learning_rate = 0.15
hparams.daisy_chain_variables = False
hparams.filter_size = 1536
hparams.num_heads = 2
hparams.ffn_layer = "conv_relu_conv"
hparams.conv_first_kernel = 9
hparams.weight_decay = 0
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.2
return hparams
@registry.register_hparams
def transformer_librispeech_tpu_v1():
"""HParams for training ASR model on Librispeech on TPU v1."""
hparams = transformer_librispeech_v1()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
@registry.register_hparams
def transformer_librispeech_tpu_v2():
"""HParams for training ASR model on Librispeech on TPU v2."""
hparams = transformer_librispeech_v2()
update_hparams_for_tpu(hparams)
hparams.batch_size = 16
librispeech.set_librispeech_length_hparams(hparams)
return hparams
@registry.register_hparams
def transformer_librispeech():
"""HParams for training ASR model on Librispeech."""
return transformer_librispeech_v2()
@registry.register_hparams
def transformer_librispeech_tpu():
"""HParams for training ASR model on Librispeech on TPU."""
return transformer_librispeech_tpu_v2()
@registry.register_hparams
def transformer_common_voice():
"""HParams for training ASR model on Mozilla Common Voice."""
return transformer_librispeech()
@registry.register_hparams
def transformer_common_voice_tpu():
"""HParams for training ASR model on Mozilla Common Voice on TPU."""
hparams = transformer_librispeech_tpu()
hparams.batch_size = 8
return hparams
@registry.register_hparams
def transformer_supervised_attention():
"""HParams for supervised attention problems."""
hparams = transformer_base()
# Attention loss type (KL-divergence or MSE).
hparams.add_hparam("expected_attention_loss_type", "kl_divergence")
# Multiplier to the encoder-decoder expected attention loss.
hparams.add_hparam("expected_attention_loss_multiplier", 1.0)
return hparams
@registry.register_hparams
def transformer_tpu_1b():
"""Hparams for machine translation with ~1.1B parameters."""
hparams = transformer_tpu()
hparams.hidden_size = 2048
hparams.filter_size = 8192
hparams.num_hidden_layers = 8
# smaller batch size to avoid OOM
hparams.batch_size = 1024
hparams.activation_dtype = "bfloat16"
hparams.weight_dtype = "bfloat16"
# maximize number of parameters relative to computation by not sharing.
hparams.shared_embedding_and_softmax_weights = False
return hparams
@registry.register_hparams
def transformer_wikitext103_l4k_v0():
"""HParams for training languagemodel_wikitext103_l4k."""
hparams = transformer_big()
# Adafactor uses less memory than Adam.
# switch to Adafactor with its recommended learning rate scheme.
hparams.optimizer = "Adafactor"
hparams.learning_rate_schedule = "rsqrt_decay"
hparams.learning_rate_warmup_steps = 10000
hparams.num_heads = 4
hparams.max_length = 4096
hparams.batch_size = 4096
hparams.shared_embedding_and_softmax_weights = False
hparams.num_hidden_layers = 8
hparams.attention_dropout = 0.1
hparams.layer_prepostprocess_dropout = 0.2
hparams.relu_dropout = 0.1
hparams.label_smoothing = 0.0
# Using noise broadcast in the dropout layers saves memory during training.
hparams.attention_dropout_broadcast_dims = "0,1" # batch, heads
hparams.relu_dropout_broadcast_dims = "1" # length
hparams.layer_prepostprocess_dropout_broadcast_dims = "1" # length
# Avoid an expensive concat on TPU.
# >1 shards helps with faster parameter distribution on multi-GPU machines
hparams.symbol_modality_num_shards = 1
return hparams
@registry.register_hparams
def transformer_wikitext103_l4k_memory_v0():
"""HParams for training languagemodel_wikitext103_l4k with memory."""
hparams = transformer_wikitext103_l4k_v0()
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = 64
hparams.split_targets_strided_training = True
hparams.add_hparam("memory_type", "transformer_xl")
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length)) # 262144
hparams.pos = None
hparams.self_attention_type = "dot_product_relative"
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
hparams.add_hparam("unconditional", True)
hparams.add_hparam("recurrent_memory_batch_size", 0) # 0 = try to guess
# By default, cache one chunk only (like Transformer-XL)
hparams.add_hparam("num_memory_items", hparams.split_targets_chunk_length)
return hparams
@registry.register_hparams
def transformer_wikitext103_l16k_memory_v0():
"""HParams for training languagemodel_wikitext103_l16k with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.max_length = 16384
hparams.split_targets_chunk_length = 64
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
# The hparams specify batch size *before* chunking, but we want to have a
# consistent 4K batch size *after* chunking to fully utilize the hardware.
target_tokens_per_batch = 4096
hparams.batch_size = int(target_tokens_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
hparams.max_relative_position = 2 * hparams.split_targets_chunk_length
return hparams
@registry.register_hparams
def transformer_cifar10_memory_v0():
"""HParams for training image_cifar10_plain_gen_flat_rev with memory."""
hparams = transformer_wikitext103_l4k_memory_v0()
hparams.num_hidden_layers = 6
hparams.max_length = 32 * 32 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 4
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = (
hparams.num_memory_items + hparams.split_targets_chunk_length)
return hparams
@registry.register_hparams
def transformer_imagenet64_memory_v0():
"""HParams for training image_imagenet64_gen_flat_rev with memory."""
hparams = transformer_cifar10_memory_v0()
hparams.max_length = 64 * 64 * 3
hparams.split_targets_chunk_length = 64 * 3
hparams.split_targets_max_chunks = int(
hparams.max_length / hparams.split_targets_chunk_length)
hparams.num_memory_items = 128 * 3
# Since this is an image problem, batch size refers to examples (not tokens)
target_images_per_batch = 2
hparams.batch_size = int(target_images_per_batch * (
hparams.max_length / hparams.split_targets_chunk_length))
# The recurrent memory needs to know the actual batch size (in sequences)
hparams.recurrent_memory_batch_size = hparams.batch_size
hparams.max_relative_position = 3072
return hparams
| 36.476107 | 80 | 0.708923 |
42b1ef9d63ec549b06a487045f423884eee58cd8 | 48,667 | py | Python | sdk/python/pulumi_azure_native/sql/v20200801preview/extended_server_blob_auditing_policy.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/sql/v20200801preview/extended_server_blob_auditing_policy.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/sql/v20200801preview/extended_server_blob_auditing_policy.py | sebtelko/pulumi-azure-native | 711ec021b5c73da05611c56c8a35adb0ce3244e4 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from ._enums import *
__all__ = ['ExtendedServerBlobAuditingPolicyArgs', 'ExtendedServerBlobAuditingPolicy']
@pulumi.input_type
class ExtendedServerBlobAuditingPolicyArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
server_name: pulumi.Input[str],
state: pulumi.Input['BlobAuditingPolicyState'],
audit_actions_and_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
blob_auditing_policy_name: Optional[pulumi.Input[str]] = None,
is_azure_monitor_target_enabled: Optional[pulumi.Input[bool]] = None,
is_devops_audit_enabled: Optional[pulumi.Input[bool]] = None,
is_storage_secondary_key_in_use: Optional[pulumi.Input[bool]] = None,
predicate_expression: Optional[pulumi.Input[str]] = None,
queue_delay_ms: Optional[pulumi.Input[int]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
storage_account_access_key: Optional[pulumi.Input[str]] = None,
storage_account_subscription_id: Optional[pulumi.Input[str]] = None,
storage_endpoint: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a ExtendedServerBlobAuditingPolicy resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input['BlobAuditingPolicyState'] state: Specifies the state of the audit. If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled are required.
:param pulumi.Input[Sequence[pulumi.Input[str]]] audit_actions_and_groups: Specifies the Actions-Groups and Actions to audit.
The recommended set of action groups to use is the following combination - this will audit all the queries and stored procedures executed against the database, as well as successful and failed logins:
BATCH_COMPLETED_GROUP,
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP,
FAILED_DATABASE_AUTHENTICATION_GROUP.
This above combination is also the set that is configured by default when enabling auditing from the Azure portal.
The supported action groups to audit are (note: choose only specific groups that cover your auditing needs. Using unnecessary groups could lead to very large quantities of audit records):
APPLICATION_ROLE_CHANGE_PASSWORD_GROUP
BACKUP_RESTORE_GROUP
DATABASE_LOGOUT_GROUP
DATABASE_OBJECT_CHANGE_GROUP
DATABASE_OBJECT_OWNERSHIP_CHANGE_GROUP
DATABASE_OBJECT_PERMISSION_CHANGE_GROUP
DATABASE_OPERATION_GROUP
DATABASE_PERMISSION_CHANGE_GROUP
DATABASE_PRINCIPAL_CHANGE_GROUP
DATABASE_PRINCIPAL_IMPERSONATION_GROUP
DATABASE_ROLE_MEMBER_CHANGE_GROUP
FAILED_DATABASE_AUTHENTICATION_GROUP
SCHEMA_OBJECT_ACCESS_GROUP
SCHEMA_OBJECT_CHANGE_GROUP
SCHEMA_OBJECT_OWNERSHIP_CHANGE_GROUP
SCHEMA_OBJECT_PERMISSION_CHANGE_GROUP
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP
USER_CHANGE_PASSWORD_GROUP
BATCH_STARTED_GROUP
BATCH_COMPLETED_GROUP
These are groups that cover all sql statements and stored procedures executed against the database, and should not be used in combination with other groups as this will result in duplicate audit logs.
For more information, see [Database-Level Audit Action Groups](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-action-groups).
For Database auditing policy, specific Actions can also be specified (note that Actions cannot be specified for Server auditing policy). The supported actions to audit are:
SELECT
UPDATE
INSERT
DELETE
EXECUTE
RECEIVE
REFERENCES
The general form for defining an action to be audited is:
{action} ON {object} BY {principal}
Note that <object> in the above format can refer to an object like a table, view, or stored procedure, or an entire database or schema. For the latter cases, the forms DATABASE::{db_name} and SCHEMA::{schema_name} are used, respectively.
For example:
SELECT on dbo.myTable by public
SELECT on DATABASE::myDatabase by public
SELECT on SCHEMA::mySchema by public
For more information, see [Database-Level Audit Actions](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-actions)
:param pulumi.Input[str] blob_auditing_policy_name: The name of the blob auditing policy.
:param pulumi.Input[bool] is_azure_monitor_target_enabled: Specifies whether audit events are sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
Note that for server level audit you should use the 'master' database as {databaseName}.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
:param pulumi.Input[bool] is_devops_audit_enabled: Specifies the state of devops audit. If state is Enabled, devops logs will be sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled', 'IsAzureMonitorTargetEnabled' as true and 'IsDevopsAuditEnabled' as true
When using REST API to configure auditing, Diagnostic Settings with 'DevOpsOperationsAudit' diagnostic logs category on the master database should also be created.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/master/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
:param pulumi.Input[bool] is_storage_secondary_key_in_use: Specifies whether storageAccountAccessKey value is the storage's secondary key.
:param pulumi.Input[str] predicate_expression: Specifies condition of where clause when creating an audit.
:param pulumi.Input[int] queue_delay_ms: Specifies the amount of time in milliseconds that can elapse before audit actions are forced to be processed.
The default minimum value is 1000 (1 second). The maximum is 2,147,483,647.
:param pulumi.Input[int] retention_days: Specifies the number of days to keep in the audit logs in the storage account.
:param pulumi.Input[str] storage_account_access_key: Specifies the identifier key of the auditing storage account.
If state is Enabled and storageEndpoint is specified, not specifying the storageAccountAccessKey will use SQL server system-assigned managed identity to access the storage.
Prerequisites for using managed identity authentication:
1. Assign SQL Server a system-assigned managed identity in Azure Active Directory (AAD).
2. Grant SQL Server identity access to the storage account by adding 'Storage Blob Data Contributor' RBAC role to the server identity.
For more information, see [Auditing to storage using Managed Identity authentication](https://go.microsoft.com/fwlink/?linkid=2114355)
:param pulumi.Input[str] storage_account_subscription_id: Specifies the blob storage subscription Id.
:param pulumi.Input[str] storage_endpoint: Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled is required.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "server_name", server_name)
pulumi.set(__self__, "state", state)
if audit_actions_and_groups is not None:
pulumi.set(__self__, "audit_actions_and_groups", audit_actions_and_groups)
if blob_auditing_policy_name is not None:
pulumi.set(__self__, "blob_auditing_policy_name", blob_auditing_policy_name)
if is_azure_monitor_target_enabled is not None:
pulumi.set(__self__, "is_azure_monitor_target_enabled", is_azure_monitor_target_enabled)
if is_devops_audit_enabled is not None:
pulumi.set(__self__, "is_devops_audit_enabled", is_devops_audit_enabled)
if is_storage_secondary_key_in_use is not None:
pulumi.set(__self__, "is_storage_secondary_key_in_use", is_storage_secondary_key_in_use)
if predicate_expression is not None:
pulumi.set(__self__, "predicate_expression", predicate_expression)
if queue_delay_ms is not None:
pulumi.set(__self__, "queue_delay_ms", queue_delay_ms)
if retention_days is not None:
pulumi.set(__self__, "retention_days", retention_days)
if storage_account_access_key is not None:
pulumi.set(__self__, "storage_account_access_key", storage_account_access_key)
if storage_account_subscription_id is not None:
pulumi.set(__self__, "storage_account_subscription_id", storage_account_subscription_id)
if storage_endpoint is not None:
pulumi.set(__self__, "storage_endpoint", storage_endpoint)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serverName")
def server_name(self) -> pulumi.Input[str]:
"""
The name of the server.
"""
return pulumi.get(self, "server_name")
@server_name.setter
def server_name(self, value: pulumi.Input[str]):
pulumi.set(self, "server_name", value)
@property
@pulumi.getter
def state(self) -> pulumi.Input['BlobAuditingPolicyState']:
"""
Specifies the state of the audit. If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled are required.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: pulumi.Input['BlobAuditingPolicyState']):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="auditActionsAndGroups")
def audit_actions_and_groups(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Specifies the Actions-Groups and Actions to audit.
The recommended set of action groups to use is the following combination - this will audit all the queries and stored procedures executed against the database, as well as successful and failed logins:
BATCH_COMPLETED_GROUP,
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP,
FAILED_DATABASE_AUTHENTICATION_GROUP.
This above combination is also the set that is configured by default when enabling auditing from the Azure portal.
The supported action groups to audit are (note: choose only specific groups that cover your auditing needs. Using unnecessary groups could lead to very large quantities of audit records):
APPLICATION_ROLE_CHANGE_PASSWORD_GROUP
BACKUP_RESTORE_GROUP
DATABASE_LOGOUT_GROUP
DATABASE_OBJECT_CHANGE_GROUP
DATABASE_OBJECT_OWNERSHIP_CHANGE_GROUP
DATABASE_OBJECT_PERMISSION_CHANGE_GROUP
DATABASE_OPERATION_GROUP
DATABASE_PERMISSION_CHANGE_GROUP
DATABASE_PRINCIPAL_CHANGE_GROUP
DATABASE_PRINCIPAL_IMPERSONATION_GROUP
DATABASE_ROLE_MEMBER_CHANGE_GROUP
FAILED_DATABASE_AUTHENTICATION_GROUP
SCHEMA_OBJECT_ACCESS_GROUP
SCHEMA_OBJECT_CHANGE_GROUP
SCHEMA_OBJECT_OWNERSHIP_CHANGE_GROUP
SCHEMA_OBJECT_PERMISSION_CHANGE_GROUP
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP
USER_CHANGE_PASSWORD_GROUP
BATCH_STARTED_GROUP
BATCH_COMPLETED_GROUP
These are groups that cover all sql statements and stored procedures executed against the database, and should not be used in combination with other groups as this will result in duplicate audit logs.
For more information, see [Database-Level Audit Action Groups](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-action-groups).
For Database auditing policy, specific Actions can also be specified (note that Actions cannot be specified for Server auditing policy). The supported actions to audit are:
SELECT
UPDATE
INSERT
DELETE
EXECUTE
RECEIVE
REFERENCES
The general form for defining an action to be audited is:
{action} ON {object} BY {principal}
Note that <object> in the above format can refer to an object like a table, view, or stored procedure, or an entire database or schema. For the latter cases, the forms DATABASE::{db_name} and SCHEMA::{schema_name} are used, respectively.
For example:
SELECT on dbo.myTable by public
SELECT on DATABASE::myDatabase by public
SELECT on SCHEMA::mySchema by public
For more information, see [Database-Level Audit Actions](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-actions)
"""
return pulumi.get(self, "audit_actions_and_groups")
@audit_actions_and_groups.setter
def audit_actions_and_groups(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "audit_actions_and_groups", value)
@property
@pulumi.getter(name="blobAuditingPolicyName")
def blob_auditing_policy_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the blob auditing policy.
"""
return pulumi.get(self, "blob_auditing_policy_name")
@blob_auditing_policy_name.setter
def blob_auditing_policy_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "blob_auditing_policy_name", value)
@property
@pulumi.getter(name="isAzureMonitorTargetEnabled")
def is_azure_monitor_target_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether audit events are sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
Note that for server level audit you should use the 'master' database as {databaseName}.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
"""
return pulumi.get(self, "is_azure_monitor_target_enabled")
@is_azure_monitor_target_enabled.setter
def is_azure_monitor_target_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_azure_monitor_target_enabled", value)
@property
@pulumi.getter(name="isDevopsAuditEnabled")
def is_devops_audit_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies the state of devops audit. If state is Enabled, devops logs will be sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled', 'IsAzureMonitorTargetEnabled' as true and 'IsDevopsAuditEnabled' as true
When using REST API to configure auditing, Diagnostic Settings with 'DevOpsOperationsAudit' diagnostic logs category on the master database should also be created.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/master/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
"""
return pulumi.get(self, "is_devops_audit_enabled")
@is_devops_audit_enabled.setter
def is_devops_audit_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_devops_audit_enabled", value)
@property
@pulumi.getter(name="isStorageSecondaryKeyInUse")
def is_storage_secondary_key_in_use(self) -> Optional[pulumi.Input[bool]]:
"""
Specifies whether storageAccountAccessKey value is the storage's secondary key.
"""
return pulumi.get(self, "is_storage_secondary_key_in_use")
@is_storage_secondary_key_in_use.setter
def is_storage_secondary_key_in_use(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_storage_secondary_key_in_use", value)
@property
@pulumi.getter(name="predicateExpression")
def predicate_expression(self) -> Optional[pulumi.Input[str]]:
"""
Specifies condition of where clause when creating an audit.
"""
return pulumi.get(self, "predicate_expression")
@predicate_expression.setter
def predicate_expression(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "predicate_expression", value)
@property
@pulumi.getter(name="queueDelayMs")
def queue_delay_ms(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the amount of time in milliseconds that can elapse before audit actions are forced to be processed.
The default minimum value is 1000 (1 second). The maximum is 2,147,483,647.
"""
return pulumi.get(self, "queue_delay_ms")
@queue_delay_ms.setter
def queue_delay_ms(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "queue_delay_ms", value)
@property
@pulumi.getter(name="retentionDays")
def retention_days(self) -> Optional[pulumi.Input[int]]:
"""
Specifies the number of days to keep in the audit logs in the storage account.
"""
return pulumi.get(self, "retention_days")
@retention_days.setter
def retention_days(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "retention_days", value)
@property
@pulumi.getter(name="storageAccountAccessKey")
def storage_account_access_key(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the identifier key of the auditing storage account.
If state is Enabled and storageEndpoint is specified, not specifying the storageAccountAccessKey will use SQL server system-assigned managed identity to access the storage.
Prerequisites for using managed identity authentication:
1. Assign SQL Server a system-assigned managed identity in Azure Active Directory (AAD).
2. Grant SQL Server identity access to the storage account by adding 'Storage Blob Data Contributor' RBAC role to the server identity.
For more information, see [Auditing to storage using Managed Identity authentication](https://go.microsoft.com/fwlink/?linkid=2114355)
"""
return pulumi.get(self, "storage_account_access_key")
@storage_account_access_key.setter
def storage_account_access_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_access_key", value)
@property
@pulumi.getter(name="storageAccountSubscriptionId")
def storage_account_subscription_id(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the blob storage subscription Id.
"""
return pulumi.get(self, "storage_account_subscription_id")
@storage_account_subscription_id.setter
def storage_account_subscription_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_subscription_id", value)
@property
@pulumi.getter(name="storageEndpoint")
def storage_endpoint(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled is required.
"""
return pulumi.get(self, "storage_endpoint")
@storage_endpoint.setter
def storage_endpoint(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_endpoint", value)
class ExtendedServerBlobAuditingPolicy(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_actions_and_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
blob_auditing_policy_name: Optional[pulumi.Input[str]] = None,
is_azure_monitor_target_enabled: Optional[pulumi.Input[bool]] = None,
is_devops_audit_enabled: Optional[pulumi.Input[bool]] = None,
is_storage_secondary_key_in_use: Optional[pulumi.Input[bool]] = None,
predicate_expression: Optional[pulumi.Input[str]] = None,
queue_delay_ms: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
server_name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['BlobAuditingPolicyState']] = None,
storage_account_access_key: Optional[pulumi.Input[str]] = None,
storage_account_subscription_id: Optional[pulumi.Input[str]] = None,
storage_endpoint: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An extended server blob auditing policy.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Sequence[pulumi.Input[str]]] audit_actions_and_groups: Specifies the Actions-Groups and Actions to audit.
The recommended set of action groups to use is the following combination - this will audit all the queries and stored procedures executed against the database, as well as successful and failed logins:
BATCH_COMPLETED_GROUP,
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP,
FAILED_DATABASE_AUTHENTICATION_GROUP.
This above combination is also the set that is configured by default when enabling auditing from the Azure portal.
The supported action groups to audit are (note: choose only specific groups that cover your auditing needs. Using unnecessary groups could lead to very large quantities of audit records):
APPLICATION_ROLE_CHANGE_PASSWORD_GROUP
BACKUP_RESTORE_GROUP
DATABASE_LOGOUT_GROUP
DATABASE_OBJECT_CHANGE_GROUP
DATABASE_OBJECT_OWNERSHIP_CHANGE_GROUP
DATABASE_OBJECT_PERMISSION_CHANGE_GROUP
DATABASE_OPERATION_GROUP
DATABASE_PERMISSION_CHANGE_GROUP
DATABASE_PRINCIPAL_CHANGE_GROUP
DATABASE_PRINCIPAL_IMPERSONATION_GROUP
DATABASE_ROLE_MEMBER_CHANGE_GROUP
FAILED_DATABASE_AUTHENTICATION_GROUP
SCHEMA_OBJECT_ACCESS_GROUP
SCHEMA_OBJECT_CHANGE_GROUP
SCHEMA_OBJECT_OWNERSHIP_CHANGE_GROUP
SCHEMA_OBJECT_PERMISSION_CHANGE_GROUP
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP
USER_CHANGE_PASSWORD_GROUP
BATCH_STARTED_GROUP
BATCH_COMPLETED_GROUP
These are groups that cover all sql statements and stored procedures executed against the database, and should not be used in combination with other groups as this will result in duplicate audit logs.
For more information, see [Database-Level Audit Action Groups](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-action-groups).
For Database auditing policy, specific Actions can also be specified (note that Actions cannot be specified for Server auditing policy). The supported actions to audit are:
SELECT
UPDATE
INSERT
DELETE
EXECUTE
RECEIVE
REFERENCES
The general form for defining an action to be audited is:
{action} ON {object} BY {principal}
Note that <object> in the above format can refer to an object like a table, view, or stored procedure, or an entire database or schema. For the latter cases, the forms DATABASE::{db_name} and SCHEMA::{schema_name} are used, respectively.
For example:
SELECT on dbo.myTable by public
SELECT on DATABASE::myDatabase by public
SELECT on SCHEMA::mySchema by public
For more information, see [Database-Level Audit Actions](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-actions)
:param pulumi.Input[str] blob_auditing_policy_name: The name of the blob auditing policy.
:param pulumi.Input[bool] is_azure_monitor_target_enabled: Specifies whether audit events are sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
Note that for server level audit you should use the 'master' database as {databaseName}.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
:param pulumi.Input[bool] is_devops_audit_enabled: Specifies the state of devops audit. If state is Enabled, devops logs will be sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled', 'IsAzureMonitorTargetEnabled' as true and 'IsDevopsAuditEnabled' as true
When using REST API to configure auditing, Diagnostic Settings with 'DevOpsOperationsAudit' diagnostic logs category on the master database should also be created.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/master/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
:param pulumi.Input[bool] is_storage_secondary_key_in_use: Specifies whether storageAccountAccessKey value is the storage's secondary key.
:param pulumi.Input[str] predicate_expression: Specifies condition of where clause when creating an audit.
:param pulumi.Input[int] queue_delay_ms: Specifies the amount of time in milliseconds that can elapse before audit actions are forced to be processed.
The default minimum value is 1000 (1 second). The maximum is 2,147,483,647.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[int] retention_days: Specifies the number of days to keep in the audit logs in the storage account.
:param pulumi.Input[str] server_name: The name of the server.
:param pulumi.Input['BlobAuditingPolicyState'] state: Specifies the state of the audit. If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled are required.
:param pulumi.Input[str] storage_account_access_key: Specifies the identifier key of the auditing storage account.
If state is Enabled and storageEndpoint is specified, not specifying the storageAccountAccessKey will use SQL server system-assigned managed identity to access the storage.
Prerequisites for using managed identity authentication:
1. Assign SQL Server a system-assigned managed identity in Azure Active Directory (AAD).
2. Grant SQL Server identity access to the storage account by adding 'Storage Blob Data Contributor' RBAC role to the server identity.
For more information, see [Auditing to storage using Managed Identity authentication](https://go.microsoft.com/fwlink/?linkid=2114355)
:param pulumi.Input[str] storage_account_subscription_id: Specifies the blob storage subscription Id.
:param pulumi.Input[str] storage_endpoint: Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled is required.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExtendedServerBlobAuditingPolicyArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An extended server blob auditing policy.
:param str resource_name: The name of the resource.
:param ExtendedServerBlobAuditingPolicyArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExtendedServerBlobAuditingPolicyArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
audit_actions_and_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
blob_auditing_policy_name: Optional[pulumi.Input[str]] = None,
is_azure_monitor_target_enabled: Optional[pulumi.Input[bool]] = None,
is_devops_audit_enabled: Optional[pulumi.Input[bool]] = None,
is_storage_secondary_key_in_use: Optional[pulumi.Input[bool]] = None,
predicate_expression: Optional[pulumi.Input[str]] = None,
queue_delay_ms: Optional[pulumi.Input[int]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
retention_days: Optional[pulumi.Input[int]] = None,
server_name: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['BlobAuditingPolicyState']] = None,
storage_account_access_key: Optional[pulumi.Input[str]] = None,
storage_account_subscription_id: Optional[pulumi.Input[str]] = None,
storage_endpoint: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExtendedServerBlobAuditingPolicyArgs.__new__(ExtendedServerBlobAuditingPolicyArgs)
__props__.__dict__["audit_actions_and_groups"] = audit_actions_and_groups
__props__.__dict__["blob_auditing_policy_name"] = blob_auditing_policy_name
__props__.__dict__["is_azure_monitor_target_enabled"] = is_azure_monitor_target_enabled
__props__.__dict__["is_devops_audit_enabled"] = is_devops_audit_enabled
__props__.__dict__["is_storage_secondary_key_in_use"] = is_storage_secondary_key_in_use
__props__.__dict__["predicate_expression"] = predicate_expression
__props__.__dict__["queue_delay_ms"] = queue_delay_ms
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["retention_days"] = retention_days
if server_name is None and not opts.urn:
raise TypeError("Missing required property 'server_name'")
__props__.__dict__["server_name"] = server_name
if state is None and not opts.urn:
raise TypeError("Missing required property 'state'")
__props__.__dict__["state"] = state
__props__.__dict__["storage_account_access_key"] = storage_account_access_key
__props__.__dict__["storage_account_subscription_id"] = storage_account_subscription_id
__props__.__dict__["storage_endpoint"] = storage_endpoint
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-native:sql:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-nextgen:sql:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-native:sql/v20170301preview:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-nextgen:sql/v20170301preview:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-native:sql/v20200202preview:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-native:sql/v20201101preview:ExtendedServerBlobAuditingPolicy"), pulumi.Alias(type_="azure-nextgen:sql/v20201101preview:ExtendedServerBlobAuditingPolicy")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ExtendedServerBlobAuditingPolicy, __self__).__init__(
'azure-native:sql/v20200801preview:ExtendedServerBlobAuditingPolicy',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ExtendedServerBlobAuditingPolicy':
"""
Get an existing ExtendedServerBlobAuditingPolicy resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = ExtendedServerBlobAuditingPolicyArgs.__new__(ExtendedServerBlobAuditingPolicyArgs)
__props__.__dict__["audit_actions_and_groups"] = None
__props__.__dict__["is_azure_monitor_target_enabled"] = None
__props__.__dict__["is_devops_audit_enabled"] = None
__props__.__dict__["is_storage_secondary_key_in_use"] = None
__props__.__dict__["name"] = None
__props__.__dict__["predicate_expression"] = None
__props__.__dict__["queue_delay_ms"] = None
__props__.__dict__["retention_days"] = None
__props__.__dict__["state"] = None
__props__.__dict__["storage_account_subscription_id"] = None
__props__.__dict__["storage_endpoint"] = None
__props__.__dict__["type"] = None
return ExtendedServerBlobAuditingPolicy(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="auditActionsAndGroups")
def audit_actions_and_groups(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
Specifies the Actions-Groups and Actions to audit.
The recommended set of action groups to use is the following combination - this will audit all the queries and stored procedures executed against the database, as well as successful and failed logins:
BATCH_COMPLETED_GROUP,
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP,
FAILED_DATABASE_AUTHENTICATION_GROUP.
This above combination is also the set that is configured by default when enabling auditing from the Azure portal.
The supported action groups to audit are (note: choose only specific groups that cover your auditing needs. Using unnecessary groups could lead to very large quantities of audit records):
APPLICATION_ROLE_CHANGE_PASSWORD_GROUP
BACKUP_RESTORE_GROUP
DATABASE_LOGOUT_GROUP
DATABASE_OBJECT_CHANGE_GROUP
DATABASE_OBJECT_OWNERSHIP_CHANGE_GROUP
DATABASE_OBJECT_PERMISSION_CHANGE_GROUP
DATABASE_OPERATION_GROUP
DATABASE_PERMISSION_CHANGE_GROUP
DATABASE_PRINCIPAL_CHANGE_GROUP
DATABASE_PRINCIPAL_IMPERSONATION_GROUP
DATABASE_ROLE_MEMBER_CHANGE_GROUP
FAILED_DATABASE_AUTHENTICATION_GROUP
SCHEMA_OBJECT_ACCESS_GROUP
SCHEMA_OBJECT_CHANGE_GROUP
SCHEMA_OBJECT_OWNERSHIP_CHANGE_GROUP
SCHEMA_OBJECT_PERMISSION_CHANGE_GROUP
SUCCESSFUL_DATABASE_AUTHENTICATION_GROUP
USER_CHANGE_PASSWORD_GROUP
BATCH_STARTED_GROUP
BATCH_COMPLETED_GROUP
These are groups that cover all sql statements and stored procedures executed against the database, and should not be used in combination with other groups as this will result in duplicate audit logs.
For more information, see [Database-Level Audit Action Groups](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-action-groups).
For Database auditing policy, specific Actions can also be specified (note that Actions cannot be specified for Server auditing policy). The supported actions to audit are:
SELECT
UPDATE
INSERT
DELETE
EXECUTE
RECEIVE
REFERENCES
The general form for defining an action to be audited is:
{action} ON {object} BY {principal}
Note that <object> in the above format can refer to an object like a table, view, or stored procedure, or an entire database or schema. For the latter cases, the forms DATABASE::{db_name} and SCHEMA::{schema_name} are used, respectively.
For example:
SELECT on dbo.myTable by public
SELECT on DATABASE::myDatabase by public
SELECT on SCHEMA::mySchema by public
For more information, see [Database-Level Audit Actions](https://docs.microsoft.com/en-us/sql/relational-databases/security/auditing/sql-server-audit-action-groups-and-actions#database-level-audit-actions)
"""
return pulumi.get(self, "audit_actions_and_groups")
@property
@pulumi.getter(name="isAzureMonitorTargetEnabled")
def is_azure_monitor_target_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether audit events are sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled' and 'IsAzureMonitorTargetEnabled' as true.
When using REST API to configure auditing, Diagnostic Settings with 'SQLSecurityAuditEvents' diagnostic logs category on the database should be also created.
Note that for server level audit you should use the 'master' database as {databaseName}.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/{databaseName}/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
"""
return pulumi.get(self, "is_azure_monitor_target_enabled")
@property
@pulumi.getter(name="isDevopsAuditEnabled")
def is_devops_audit_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies the state of devops audit. If state is Enabled, devops logs will be sent to Azure Monitor.
In order to send the events to Azure Monitor, specify 'State' as 'Enabled', 'IsAzureMonitorTargetEnabled' as true and 'IsDevopsAuditEnabled' as true
When using REST API to configure auditing, Diagnostic Settings with 'DevOpsOperationsAudit' diagnostic logs category on the master database should also be created.
Diagnostic Settings URI format:
PUT https://management.azure.com/subscriptions/{subscriptionId}/resourceGroups/{resourceGroup}/providers/Microsoft.Sql/servers/{serverName}/databases/master/providers/microsoft.insights/diagnosticSettings/{settingsName}?api-version=2017-05-01-preview
For more information, see [Diagnostic Settings REST API](https://go.microsoft.com/fwlink/?linkid=2033207)
or [Diagnostic Settings PowerShell](https://go.microsoft.com/fwlink/?linkid=2033043)
"""
return pulumi.get(self, "is_devops_audit_enabled")
@property
@pulumi.getter(name="isStorageSecondaryKeyInUse")
def is_storage_secondary_key_in_use(self) -> pulumi.Output[Optional[bool]]:
"""
Specifies whether storageAccountAccessKey value is the storage's secondary key.
"""
return pulumi.get(self, "is_storage_secondary_key_in_use")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="predicateExpression")
def predicate_expression(self) -> pulumi.Output[Optional[str]]:
"""
Specifies condition of where clause when creating an audit.
"""
return pulumi.get(self, "predicate_expression")
@property
@pulumi.getter(name="queueDelayMs")
def queue_delay_ms(self) -> pulumi.Output[Optional[int]]:
"""
Specifies the amount of time in milliseconds that can elapse before audit actions are forced to be processed.
The default minimum value is 1000 (1 second). The maximum is 2,147,483,647.
"""
return pulumi.get(self, "queue_delay_ms")
@property
@pulumi.getter(name="retentionDays")
def retention_days(self) -> pulumi.Output[Optional[int]]:
"""
Specifies the number of days to keep in the audit logs in the storage account.
"""
return pulumi.get(self, "retention_days")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Specifies the state of the audit. If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled are required.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="storageAccountSubscriptionId")
def storage_account_subscription_id(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the blob storage subscription Id.
"""
return pulumi.get(self, "storage_account_subscription_id")
@property
@pulumi.getter(name="storageEndpoint")
def storage_endpoint(self) -> pulumi.Output[Optional[str]]:
"""
Specifies the blob storage endpoint (e.g. https://MyAccount.blob.core.windows.net). If state is Enabled, storageEndpoint or isAzureMonitorTargetEnabled is required.
"""
return pulumi.get(self, "storage_endpoint")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 60.231436 | 834 | 0.693797 |
61076ede9d82da1c83bf483faaf799ff5d473d2a | 4,226 | py | Python | audio/learner.py | mogwai/fastai-audio | 15bf9dfead9a869f575615564ad846b890527ea3 | [
"MIT"
] | 157 | 2019-05-31T06:14:58.000Z | 2020-09-02T20:26:41.000Z | audio/learner.py | mogwai/fastai-audio | 15bf9dfead9a869f575615564ad846b890527ea3 | [
"MIT"
] | 31 | 2019-07-08T18:43:15.000Z | 2020-09-04T12:15:45.000Z | audio/learner.py | mogwai/fastai_audio | 15bf9dfead9a869f575615564ad846b890527ea3 | [
"MIT"
] | 60 | 2019-06-03T20:32:23.000Z | 2020-09-02T20:26:43.000Z | from .data import *
from torch.nn import Conv2d, Sequential, Module
def adapt_conv(conv: Conv2d, n_channels: int, pretrained: bool = False,
init: Optional[Callable] = None, padding_mode: str = 'zeros'):
'''Create a new layer that adapts `conv` to accept `n_channels` inputs.
Copies existing weights if `pretrained` or initialises them with `init`.'''
if conv.in_channels == n_channels: return conv # No need to adapt
args = {n: getattr(conv, n) for n in [
'kernel_size', 'stride', 'padding', 'dilation', 'groups']}
bias = conv.bias is not None
if 'padding_mode' in Conv2d.__constants__: # Padding mode added in PyTorch 1.1
args['padding_mode'] = ifnone(padding_mode, conv.padding_mode)
new_conv = Conv2d(n_channels, conv.out_channels, bias=bias, **args)
if pretrained:
exp_shape = (conv.out_channels, conv.in_channels, *conv.kernel_size)
assert conv.weight.shape == exp_shape, f"Unexpected weights shape, expected {exp_shape}, got {conv.weight.shape}."
new_conv.weight.data[...] = conv.weight.data[:, 0:1, :, :]
if bias: new_conv.bias.data = conv.bias.data
elif init: init_default(new_conv, init)
new_conv.to(conv.weight.device)
return new_conv
def adapt_model(model: Union[Module, Sequential], n_channels: int, name: str = 'conv1',
pretrained: bool = False, init: Optional[Callable] = None, padding_mode: str = 'zeros'):
'''Adapt a convolutional model to `n_channels` inputs and copy weights if `pretrained` or initialise with `init`.'''
# Find direct parent of first conv layer. Could be either a Sequential or a custom Module (but not the Conv itself)
while (isinstance(model, Sequential) and
isinstance(model[0], (Sequential, Module)) and
not isinstance(model[0], Conv2d)):
model = model[0]
if isinstance(model, Sequential) and isinstance(model[0], Conv2d):
conv1 = model[0]
def update(conv): model[0] = conv
elif isinstance(model, Module) and hasattr(model, name):
conv1 = getattr(model, name)
update = partial(setattr, model, name)
else: raise TypeError(f"Could not locate first convolution layer. If it is a named layer then pass it's name, otherwise use adapt_conv.")
update(adapt_conv(conv1, n_channels, pretrained=pretrained,
init=init, padding_mode=padding_mode))
# Thanks to github.com/thomasbrandon/ for the audio_learner, adapt_conv_and adapt_model functions
def audio_learner(data: AudioDataBunch, base_arch: Callable = models.resnet18, metrics=accuracy,
cut: Union[int, Callable] = None, pretrained: bool = False, lin_ftrs: Optional[Collection[int]] = None,
ps: Floats = 0.5, custom_head: Optional[nn.Module] = None, split_on: Optional[SplitFuncOrIdxList] = None,
bn_final: bool = False, init=nn.init.kaiming_normal_, concat_pool: bool = True,
padding_mode: str = 'zeros', **kwargs: Any) -> Learner:
'''Create a learner to apply a CNN model to audio spectrograms.'''
learn = cnn_learner(data, base_arch, cut=cut, metrics=metrics, pretrained=pretrained, lin_ftrs=lin_ftrs, ps=ps,
custom_head=custom_head, split_on=split_on, bn_final=bn_final, init=init,
concat_pool=concat_pool, **kwargs)
channels = _calc_channels(data)
adapt_model(learn.model, channels, pretrained=pretrained,
init=init, padding_mode=padding_mode)
learn.unfreeze() # Model shouldn't be frozen, unlike vision
return learn
# This will have to be updated in future for multichannel but is a quick fix for now
def _calc_channels(data: AudioDataBunch):
channels = data.train_ds[0][0].nchannels * \
3 if data.config.delta else data.train_ds[0][0].nchannels
return channels
def audio_predict(learn, item: AudioItem):
'''Applies preprocessing to an AudioItem before predicting its class'''
if isinstance(item, AudioItem):
item = item.path
print("Debugging", item)
al = AudioList([item], path=item, config=learn.data.x.config)
ai = AudioList.open(al, item)
return learn.predict(ai)
| 52.825 | 141 | 0.677709 |
16680523cacd4cd3a6fa61fe3d07af16022c96d3 | 519 | py | Python | content_n_exceptions/ex120_word_counts.py | Alyssonmach/learning-python-with-codes | e5ef70f3b56712e98449b3053eb34416b8025cb1 | [
"MIT"
] | 3 | 2020-11-28T08:26:54.000Z | 2020-12-23T18:37:37.000Z | content_n_exceptions/ex120_word_counts.py | Alyssonmach/learning-python-with-codes | e5ef70f3b56712e98449b3053eb34416b8025cb1 | [
"MIT"
] | 1 | 2020-10-22T02:05:11.000Z | 2020-10-22T02:05:53.000Z | content_n_exceptions/ex120_word_counts.py | Alyssonmach/learning-python-with-codes | e5ef70f3b56712e98449b3053eb34416b8025cb1 | [
"MIT"
] | null | null | null | def count_words(filename):
"""Count the approximate number of words in a file."""
try:
with open(filename, 'r', encoding='utf-8') as f_obj:
contents = f_obj.read()
except FileNotFoundError:
msg = "Sorry, the file " + filename + " does not exist."
print(msg)
else:
# Count approximate number of words in the file.
words = contents.split()
num_words = len(words)
print("The file " + filename + " has about " + str(num_words) + " words.") | 39.923077 | 82 | 0.587669 |
dc3a6cae86db732d7d467b4fd5f18711e2aad777 | 10,078 | py | Python | src/modeling/predict_lyft.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | 44 | 2020-12-09T06:15:15.000Z | 2022-03-31T02:37:47.000Z | src/modeling/predict_lyft.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | null | null | null | src/modeling/predict_lyft.py | pfnet-research/kaggle-lyft-motion-prediction-4th-place-solution | 0bc51075db31a747eeebb7f4775a3cd26ad5f870 | [
"MIT"
] | 7 | 2020-12-09T10:08:32.000Z | 2021-08-17T01:53:51.000Z | import argparse
from distutils.util import strtobool
import numpy as np
import torch
from pathlib import Path
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Subset
from l5kit.evaluation import write_pred_csv
from l5kit.data import LocalDataManager, ChunkedDataset
from l5kit.dataset import AgentDataset
import sys
import os
from tqdm import tqdm
sys.path.append(os.pardir)
sys.path.append(os.path.join(os.pardir, os.pardir))
from lib.transforms.augmentation import _agent_type_onehot
from modeling.load_flag import Flags
from lib.rasterization.rasterizer_builder import build_custom_rasterizer
from modeling.builder import build_multi_predictor, build_multi_mode_deep_ensemble
from lib.functions.transform import transform_points_batch
# from lib.utils.dotdict import DotDict
from lib.utils.yaml_utils import save_yaml, load_yaml
from src.lib.nn.models.single.lyft_model import LyftModel
from lib.nn.models.deep_ensemble.lyft_multi_deep_ensemble_predictor import LyftMultiDeepEnsemblePredictor
from lib.sampling.agent_sampling_changing_yaw import create_generate_agent_sample_changing_yaw_partial
# Referred https://www.kaggle.com/pestipeti/pytorch-baseline-inference
def run_prediction(predictor, data_loader,
convert_world_from_agent: bool = False,
feat_mode: str = "none"):
predictor.eval()
pred_coords_list = []
confidences_list = []
timestamps_list = []
track_id_list = []
with torch.no_grad():
dataiter = tqdm(data_loader)
for data in dataiter:
image = data["image"].to(device)
# target_availabilities = data["target_availabilities"].to(device)
# targets = data["target_positions"].to(device)
if feat_mode == "agent_type":
x_feat = torch.tensor([
_agent_type_onehot(lp) for lp in data["label_probabilities"].cpu().numpy()
]).to(device)
outputs = predictor(image, x_feat)
else:
outputs = predictor(image)
if isinstance(predictor, LyftMultiDeepEnsemblePredictor):
assert len(predictor.predictors) == len(outputs)
pred = torch.cat([p for p, _ in outputs], dim=1)
confidences = torch.cat([c for _, c in outputs], dim=1) / len(outputs)
else:
pred, confidences = outputs
if convert_world_from_agent:
# https://github.com/lyft/l5kit/blob/master/examples/agent_motion_prediction/agent_motion_prediction.ipynb
# convert agent coordinates into world offsets
agents_coords = pred # (bs, num_modes, future_len, 2=xy)
dtype = pred.dtype
world_from_agents = data["world_from_agent"].type(dtype).to(device) # (bs, 3, 3)
centroids = data["centroid"].type(dtype).to(device) # (bs, 2)
bs, num_modes, future_len, cdim = agents_coords.shape
agents_coords = agents_coords.reshape(bs * num_modes * future_len, cdim)
transf_matrix = world_from_agents[:, None, None, :, :].expand(bs, num_modes, future_len, 3, 3).reshape(
bs * num_modes * future_len, 3, 3)
centroids = centroids[:, :2]
centroids = centroids[:, None, None, :].expand(bs, num_modes, future_len, 2).reshape(
bs * num_modes * future_len, 2)
pred = transform_points_batch(agents_coords, transf_matrix) - centroids
pred = pred.view(bs, num_modes, future_len, cdim)
pred_coords_list.append(pred.cpu().numpy())
confidences_list.append(confidences.cpu().numpy().copy())
timestamps_list.append(data["timestamp"].numpy().copy())
track_id_list.append(data["track_id"].numpy().copy())
timestamps = np.concatenate(timestamps_list)
track_ids = np.concatenate(track_id_list)
coords = np.concatenate(pred_coords_list)
confs = np.concatenate(confidences_list)
return timestamps, track_ids, coords, confs
def predict_and_save(predictor, test_loader, convert_world_from_agent, out_dir, model_mode, feat_mode: str = "none"):
# --- Inference ---
timestamps, track_ids, coords, confs = run_prediction(predictor, test_loader, convert_world_from_agent, feat_mode)
num_modes = confs.shape[-1]
prediction_out_dir = out_dir / f"prediction_{model_mode}{debug_str}"
os.makedirs(str(prediction_out_dir), exist_ok=True)
if num_modes == 3:
csv_path = prediction_out_dir / "submission.csv"
write_pred_csv(
csv_path,
timestamps=timestamps,
track_ids=track_ids,
coords=coords,
confs=confs)
print(f"Saved to {csv_path}")
# --- Save to npz format, for future analysis purpose ---
npz_path = prediction_out_dir / "submission.npz"
np.savez_compressed(
npz_path,
timestamps=timestamps,
track_ids=track_ids,
coords=coords,
confs=confs
)
print(f"Saved to {npz_path}")
def parse():
parser = argparse.ArgumentParser(description='')
parser.add_argument('--out', '-o', default='results/tmp',
help='Directory to output the result')
parser.add_argument('--model_mode', type=str, default='ema',
help='')
parser.add_argument('--yaw_delta', type=float, default=None, help="Use `yaw - yaw_delta`")
parser.add_argument('--convert_world_from_agent', '-c', type=strtobool, default='true',
help='Convert agent coord to world or not. Should be True from l5kit==1.1.0')
parser.add_argument('--debug', '-d', type=strtobool, default='false',
help='')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse()
out_dir = Path(args.out)
debug = args.debug
flags_dict = load_yaml(out_dir / 'flags.yaml')
cfg = load_yaml(out_dir / 'cfg.yaml')
# flags = DotDict(flags_dict)
flags = Flags()
flags.update(flags_dict)
print(f"flags: {flags_dict}")
# set env variable for data
# Not use flags.l5kit_data_folder, but use fixed test data.
l5kit_data_folder = "../../input/lyft-motion-prediction-autonomous-vehicles"
os.environ["L5KIT_DATA_FOLDER"] = l5kit_data_folder
dm = LocalDataManager(None)
print("Load dataset...")
default_test_cfg = {
'key': 'scenes/test.zarr',
'batch_size': 32,
'shuffle': False,
'num_workers': 4
}
test_cfg = cfg.get("test_data_loader", default_test_cfg)
# Rasterizer
rasterizer = build_custom_rasterizer(cfg, dm, eval=True)
test_path = test_cfg["key"]
print(f"Loading from {test_path}")
test_zarr = ChunkedDataset(dm.require(test_path)).open()
print("test_zarr", type(test_zarr))
test_mask = np.load(f"{l5kit_data_folder}/scenes/mask.npz")["arr_0"]
test_agent_dataset = AgentDataset(cfg, test_zarr, rasterizer, agents_mask=test_mask)
if args.yaw_delta is not None:
assert flags.override_sample_function_name == ""
assert hasattr(test_agent_dataset, "sample_function")
test_agent_dataset.sample_function = create_generate_agent_sample_changing_yaw_partial(
cfg, rasterizer, args.yaw_delta
)
test_dataset = test_agent_dataset
if debug:
# Only use 100 dataset for fast check...
test_dataset = Subset(test_dataset, np.arange(100))
test_loader = DataLoader(
test_dataset,
shuffle=test_cfg["shuffle"],
batch_size=test_cfg["batch_size"],
num_workers=test_cfg["num_workers"],
pin_memory=True,
)
print(test_agent_dataset)
print("# AgentDataset test:", len(test_agent_dataset))
print("# ActualDataset test:", len(test_dataset))
in_channels, height, width = test_agent_dataset[0]["image"].shape # get input image shape
print("in_channels", in_channels, "height", height, "width", width)
# ==== INIT MODEL
device = torch.device(flags.device)
if flags.pred_mode == "single":
predictor = LyftModel(cfg)
elif flags.pred_mode == "multi":
predictor = build_multi_predictor(cfg, flags, device, in_channels)
elif flags.pred_mode == "multi_deep_ensemble":
predictor = build_multi_mode_deep_ensemble(cfg, flags, device, in_channels)
else:
raise ValueError(f"[ERROR] Unexpected value flags.pred_mode={flags.pred_mode}")
model_mode = args.model_mode
debug_str = "_debug" if debug else ""
if model_mode == "original":
pt_path = out_dir/"predictor.pt"
elif model_mode == "ema":
pt_path = out_dir/"predictor_ema.pt"
elif model_mode == "cycle0":
pt_path = out_dir/"snapshot_0th_cycle.pt"
else:
raise ValueError(f"[ERROR] Unexpected value model_mode={model_mode}")
print(f"model_mode={model_mode}, Loading from {pt_path}")
try:
predictor.load_state_dict(torch.load(str(pt_path)))
except RuntimeError:
print("Load from predictor failed, loading from predictor.base_model...")
predictor.base_model.load_state_dict(torch.load(str(pt_path)))
# Use this instead for old code, before MultiPredictor refactoring.
# predictor.base_model.load_state_dict(torch.load(str(pt_path)))
predictor.to(device)
if args.yaw_delta is not None:
save_dir = out_dir / str(args.yaw_delta)
else:
save_dir = out_dir
if isinstance(predictor, LyftMultiDeepEnsemblePredictor):
for k, name in enumerate(predictor.names):
print(f"Predicting {name}...")
predict_and_save(
predictor.get_kth_predictor(k),
test_loader,
args.convert_world_from_agent,
save_dir / name,
model_mode
)
pass
else:
predict_and_save(predictor, test_loader, args.convert_world_from_agent, save_dir, model_mode, flags.feat_mode)
| 39.833992 | 122 | 0.661242 |
1627d49baed3ca1909354a5da9f55ca5423b15c5 | 268,494 | py | Python | gluon/tools.py | pcwalden/web2py | 2329bd3c4f3121e61efb4d4d36dbdb8ae6c6c90f | [
"BSD-3-Clause"
] | null | null | null | gluon/tools.py | pcwalden/web2py | 2329bd3c4f3121e61efb4d4d36dbdb8ae6c6c90f | [
"BSD-3-Clause"
] | null | null | null | gluon/tools.py | pcwalden/web2py | 2329bd3c4f3121e61efb4d4d36dbdb8ae6c6c90f | [
"BSD-3-Clause"
] | null | null | null | #!/bin/python
# -*- coding: utf-8 -*-
"""
| This file is part of the web2py Web Framework
| Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu>
| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
Auth, Mail, PluginManager and various utilities
------------------------------------------------
"""
import base64
from functools import reduce
from gluon._compat import pickle, thread, urllib2, Cookie, StringIO, urlencode
from gluon._compat import configparser, MIMEBase, MIMEMultipart, MIMEText, Header
from gluon._compat import Encoders, Charset, long, urllib_quote, iteritems
from gluon._compat import to_bytes, to_native, add_charset, string_types
from gluon._compat import charset_QP, basestring, unicodeT, to_unicode
from gluon._compat import urllib2, urlopen
import datetime
import logging
import sys
import glob
import os
import re
import time
import fnmatch
import traceback
import smtplib
import email.utils
import random
import hmac
import hashlib
import json
from email import message_from_string
from gluon.authapi import AuthAPI
from gluon.contenttype import contenttype
from gluon.storage import Storage, StorageList, Settings, Messages
from gluon.utils import web2py_uuid, compare
from gluon.fileutils import read_file, check_credentials
from gluon import *
from gluon.contrib.autolinks import expand_one
from gluon.contrib.markmin.markmin2html import replace_at_urls
from gluon.contrib.markmin.markmin2html import replace_autolinks
from gluon.contrib.markmin.markmin2html import replace_components
from pydal.objects import Row, Set, Query
import gluon.serializers as serializers
Table = DAL.Table
Field = DAL.Field
__all__ = ['Mail', 'Auth', 'Recaptcha2', 'Crud', 'Service', 'Wiki',
'PluginManager', 'fetch', 'geocode', 'reverse_geocode', 'prettydate']
# mind there are two loggers here (logger and crud.settings.logger)!
logger = logging.getLogger("web2py")
DEFAULT = lambda: None
def getarg(position, default=None):
args = current.request.args
if position < 0 and len(args) >= -position:
return args[position]
elif position >= 0 and len(args) > position:
return args[position]
else:
return default
def callback(actions, form, tablename=None):
if actions:
if tablename and isinstance(actions, dict):
actions = actions.get(tablename, [])
if not isinstance(actions, (list, tuple)):
actions = [actions]
[action(form) for action in actions]
def validators(*a):
b = []
for item in a:
if isinstance(item, (list, tuple)):
b = b + list(item)
else:
b.append(item)
return b
def call_or_redirect(f, *args):
if callable(f):
redirect(f(*args))
else:
redirect(f)
def replace_id(url, form):
if url:
url = url.replace('[id]', str(form.vars.id))
if url[0] == '/' or url[:4] == 'http':
return url
return URL(url)
REGEX_OPEN_REDIRECT = re.compile(r"^(\w+)?[:]?(/$|//.*|/\\.*|[~]/.*)")
def prevent_open_redirect(url):
# Prevent an attacker from adding an arbitrary url after the
# _next variable in the request.
host = current.request.env.http_host
print(host)
if not url:
return None
if REGEX_OPEN_REDIRECT.match(url):
parts = url.split('/')
if len(parts) > 2 and parts[2] == host:
return url
return None
return url
class Mail(object):
"""
Class for configuring and sending emails with alternative text / html
body, multiple attachments and encryption support
Works with SMTP and Google App Engine.
Args:
server: SMTP server address in address:port notation
sender: sender email address
login: sender login name and password in login:password notation
or None if no authentication is required
tls: enables/disables encryption (True by default)
In Google App Engine use ::
server='gae'
For sake of backward compatibility all fields are optional and default
to None, however, to be able to send emails at least server and sender
must be specified. They are available under following fields::
mail.settings.server
mail.settings.sender
mail.settings.login
mail.settings.timeout = 60 # seconds (default)
When server is 'logging', email is logged but not sent (debug mode)
Optionally you can use PGP encryption or X509::
mail.settings.cipher_type = None
mail.settings.gpg_home = None
mail.settings.sign = True
mail.settings.sign_passphrase = None
mail.settings.encrypt = True
mail.settings.x509_sign_keyfile = None
mail.settings.x509_sign_certfile = None
mail.settings.x509_sign_chainfile = None
mail.settings.x509_nocerts = False
mail.settings.x509_crypt_certfiles = None
cipher_type : None
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults
to True
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name /
string or a list of file names /
strings (PEM format)
Examples:
Create Mail object with authentication data for remote server::
mail = Mail('example.com:25', 'me@example.com', 'me:password')
Notice for GAE users:
attachments have an automatic content_id='attachment-i' where i is progressive number
in this way the can be referenced from the HTML as <img src="cid:attachment-0" /> etc.
"""
class Attachment(MIMEBase):
"""
Email attachment
Args:
payload: path to file or file-like object with read() method
filename: name of the attachment stored in message; if set to
None, it will be fetched from payload path; file-like
object payload must have explicit filename specified
content_id: id of the attachment; automatically contained within
`<` and `>`
content_type: content type of the attachment; if set to None,
it will be fetched from filename using gluon.contenttype
module
encoding: encoding of all strings passed to this function (except
attachment body)
Content ID is used to identify attachments within the html body;
in example, attached image with content ID 'photo' may be used in
html message as a source of img tag `<img src="cid:photo" />`.
Example::
Create attachment from text file::
attachment = Mail.Attachment('/path/to/file.txt')
Content-Type: text/plain
MIME-Version: 1.0
Content-Disposition: attachment; filename="file.txt"
Content-Transfer-Encoding: base64
SOMEBASE64CONTENT=
Create attachment from image file with custom filename and cid::
attachment = Mail.Attachment('/path/to/file.png',
filename='photo.png',
content_id='photo')
Content-Type: image/png
MIME-Version: 1.0
Content-Disposition: attachment; filename="photo.png"
Content-Id: <photo>
Content-Transfer-Encoding: base64
SOMEOTHERBASE64CONTENT=
"""
def __init__(
self,
payload,
filename=None,
content_id=None,
content_type=None,
encoding='utf-8'):
if isinstance(payload, str):
if filename is None:
filename = os.path.basename(payload)
payload = read_file(payload, 'rb')
else:
if filename is None:
raise Exception('Missing attachment name')
payload = payload.read()
if content_type is None:
content_type = contenttype(filename)
self.my_filename = filename
self.my_payload = payload
MIMEBase.__init__(self, *content_type.split('/', 1))
self.set_payload(payload)
self.add_header('Content-Disposition', 'attachment', filename=filename)
if content_id is not None:
self['Content-Id'] = '<%s>' % to_native(content_id, encoding)
Encoders.encode_base64(self)
def __init__(self, server=None, sender=None, login=None, tls=True):
settings = self.settings = Settings()
settings.server = server
settings.sender = sender
settings.login = login
settings.tls = tls
settings.timeout = 5 # seconds
settings.hostname = None
settings.ssl = False
settings.cipher_type = None
settings.gpg_home = None
settings.sign = True
settings.sign_passphrase = None
settings.encrypt = True
settings.x509_sign_keyfile = None
settings.x509_sign_certfile = None
settings.x509_sign_chainfile = None
settings.x509_nocerts = False
settings.x509_crypt_certfiles = None
settings.debug = False
settings.lock_keys = True
self.result = {}
self.error = None
def send(self,
to,
subject='[no subject]',
message='[no message]',
attachments=None,
cc=None,
bcc=None,
reply_to=None,
sender=None,
encoding='utf-8',
raw=False,
headers={},
from_address=None,
cipher_type=None,
sign=None,
sign_passphrase=None,
encrypt=None,
x509_sign_keyfile=None,
x509_sign_chainfile=None,
x509_sign_certfile=None,
x509_crypt_certfiles=None,
x509_nocerts=None
):
"""
Sends an email using data specified in constructor
Args:
to: list or tuple of receiver addresses; will also accept single
object
subject: subject of the email
message: email body text; depends on type of passed object:
- if 2-list or 2-tuple is passed: first element will be
source of plain text while second of html text;
- otherwise: object will be the only source of plain text
and html source will be set to None
If text or html source is:
- None: content part will be ignored,
- string: content part will be set to it,
- file-like object: content part will be fetched from it using
it's read() method
attachments: list or tuple of Mail.Attachment objects; will also
accept single object
cc: list or tuple of carbon copy receiver addresses; will also
accept single object
bcc: list or tuple of blind carbon copy receiver addresses; will
also accept single object
reply_to: address to which reply should be composed
encoding: encoding of all strings passed to this method (including
message bodies)
headers: dictionary of headers to refine the headers just before
sending mail, e.g. `{'X-Mailer' : 'web2py mailer'}`
from_address: address to appear in the 'From:' header, this is not
the envelope sender. If not specified the sender will be used
cipher_type :
gpg - need a python-pyme package and gpgme lib
x509 - smime
gpg_home : you can set a GNUPGHOME environment variable
to specify home of gnupg
sign : sign the message (True or False)
sign_passphrase : passphrase for key signing
encrypt : encrypt the message (True or False). It defaults to True.
... x509 only ...
x509_sign_keyfile : the signers private key filename or
string containing the key. (PEM format)
x509_sign_certfile: the signers certificate filename or
string containing the cert. (PEM format)
x509_sign_chainfile: sets the optional all-in-one file where you
can assemble the certificates of Certification
Authorities (CA) which form the certificate
chain of email certificate. It can be a
string containing the certs to. (PEM format)
x509_nocerts : if True then no attached certificate in mail
x509_crypt_certfiles: the certificates file or strings to encrypt
the messages with can be a file name / string or
a list of file names / strings (PEM format)
Examples:
Send plain text message to single address::
mail.send('you@example.com',
'Message subject',
'Plain text body of the message')
Send html message to single address::
mail.send('you@example.com',
'Message subject',
'<html>Plain text body of the message</html>')
Send text and html message to three addresses (two in cc)::
mail.send('you@example.com',
'Message subject',
('Plain text body', '<html>html body</html>'),
cc=['other1@example.com', 'other2@example.com'])
Send html only message with image attachment available from the
message by 'photo' content id::
mail.send('you@example.com',
'Message subject',
(None, '<html><img src="cid:photo" /></html>'),
Mail.Attachment('/path/to/photo.jpg'
content_id='photo'))
Send email with two attachments and no body text::
mail.send('you@example.com,
'Message subject',
None,
[Mail.Attachment('/path/to/fist.file'),
Mail.Attachment('/path/to/second.file')])
Returns:
True on success, False on failure.
Before return, method updates two object's fields:
- self.result: return value of smtplib.SMTP.sendmail() or GAE's
mail.send_mail() method
- self.error: Exception message or None if above was successful
"""
# We don't want to use base64 encoding for unicode mail
add_charset('utf-8', charset_QP, charset_QP, 'utf-8')
def encode_header(key):
if [c for c in key if 32 > ord(c) or ord(c) > 127]:
return Header(key.encode('utf-8'), 'utf-8')
else:
return key
# encoded or raw text
def encoded_or_raw(text):
if raw:
text = encode_header(text)
return text
sender = sender or self.settings.sender
if not isinstance(self.settings.server, str):
raise Exception('Server address not specified')
if not isinstance(sender, str):
raise Exception('Sender address not specified')
if not raw and attachments:
# Use multipart/mixed if there is attachments
payload_in = MIMEMultipart('mixed')
elif raw:
# no encoding configuration for raw messages
if not isinstance(message, basestring):
message = message.read()
if isinstance(message, unicodeT):
text = message.encode('utf-8')
elif not encoding == 'utf-8':
text = message.decode(encoding).encode('utf-8')
else:
text = message
# No charset passed to avoid transport encoding
# NOTE: some unicode encoded strings will produce
# unreadable mail contents.
payload_in = MIMEText(text)
if to:
if not isinstance(to, (list, tuple)):
to = [to]
else:
raise Exception('Target receiver address not specified')
if reply_to:
if not isinstance(reply_to, (list, tuple)):
reply_to = [reply_to]
if cc:
if not isinstance(cc, (list, tuple)):
cc = [cc]
if bcc:
if not isinstance(bcc, (list, tuple)):
bcc = [bcc]
if message is None:
text = html = None
elif isinstance(message, (list, tuple)):
text, html = message
elif message.strip().startswith('<html') and \
message.strip().endswith('</html>'):
text = self.settings.server == 'gae' and message or None
html = message
else:
text = message
html = None
if (text is not None or html is not None) and (not raw):
if text is not None:
if not isinstance(text, basestring):
text = text.read()
if isinstance(text, unicodeT):
text = text.encode('utf-8')
elif not encoding == 'utf-8':
text = text.decode(encoding).encode('utf-8')
if html is not None:
if not isinstance(html, basestring):
html = html.read()
if isinstance(html, unicodeT):
html = html.encode('utf-8')
elif not encoding == 'utf-8':
html = html.decode(encoding).encode('utf-8')
# Construct mime part only if needed
if text is not None and html:
# We have text and html we need multipart/alternative
attachment = MIMEMultipart('alternative')
attachment.attach(MIMEText(text, _charset='utf-8'))
attachment.attach(MIMEText(html, 'html', _charset='utf-8'))
elif text is not None:
attachment = MIMEText(text, _charset='utf-8')
elif html:
attachment = MIMEText(html, 'html', _charset='utf-8')
if attachments:
# If there is attachments put text and html into
# multipart/mixed
payload_in.attach(attachment)
else:
# No attachments no multipart/mixed
payload_in = attachment
if (attachments is None) or raw:
pass
elif isinstance(attachments, (list, tuple)):
for attachment in attachments:
payload_in.attach(attachment)
else:
payload_in.attach(attachments)
attachments = [attachments]
#######################################################
# CIPHER #
#######################################################
cipher_type = cipher_type or self.settings.cipher_type
sign = sign if sign is not None else self.settings.sign
sign_passphrase = sign_passphrase or self.settings.sign_passphrase
encrypt = encrypt if encrypt is not None else self.settings.encrypt
#######################################################
# GPGME #
#######################################################
if cipher_type == 'gpg':
if self.settings.gpg_home:
# Set GNUPGHOME environment variable to set home of gnupg
import os
os.environ['GNUPGHOME'] = self.settings.gpg_home
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to gpg"
return False
# need a python-pyme package and gpgme lib
from pyme import core, errors
from pyme.constants.sig import mode
############################################
# sign #
############################################
if sign:
import string
core.check_version(None)
pin = string.replace(payload_in.as_string(), '\n', '\r\n')
plain = core.Data(pin)
sig = core.Data()
c = core.Context()
c.set_armor(1)
c.signers_clear()
# search for signing key for From:
for sigkey in c.op_keylist_all(sender, 1):
if sigkey.can_sign:
c.signers_add(sigkey)
if not c.signers_enum(0):
self.error = 'No key for signing [%s]' % sender
return False
c.set_passphrase_cb(lambda x, y, z: sign_passphrase)
try:
# make a signature
c.op_sign(plain, sig, mode.DETACH)
sig.seek(0, 0)
# make it part of the email
payload = MIMEMultipart('signed',
boundary=None,
_subparts=None,
**dict(micalg="pgp-sha1",
protocol="application/pgp-signature"))
# insert the origin payload
payload.attach(payload_in)
# insert the detached signature
p = MIMEBase("application", 'pgp-signature')
p.set_payload(sig.read())
payload.attach(p)
# it's just a trick to handle the no encryption case
payload_in = payload
except errors.GPGMEError as ex:
self.error = "GPG error: %s" % ex.getstring()
return False
############################################
# encrypt #
############################################
if encrypt:
core.check_version(None)
plain = core.Data(payload_in.as_string())
cipher = core.Data()
c = core.Context()
c.set_armor(1)
# collect the public keys for encryption
recipients = []
rec = to[:]
if cc:
rec.extend(cc)
if bcc:
rec.extend(bcc)
for addr in rec:
c.op_keylist_start(addr, 0)
r = c.op_keylist_next()
if r is None:
self.error = 'No key for [%s]' % addr
return False
recipients.append(r)
try:
# make the encryption
c.op_encrypt(recipients, 1, plain, cipher)
cipher.seek(0, 0)
# make it a part of the email
payload = MIMEMultipart('encrypted',
boundary=None,
_subparts=None,
**dict(protocol="application/pgp-encrypted"))
p = MIMEBase("application", 'pgp-encrypted')
p.set_payload("Version: 1\r\n")
payload.attach(p)
p = MIMEBase("application", 'octet-stream')
p.set_payload(cipher.read())
payload.attach(p)
except errors.GPGMEError as ex:
self.error = "GPG error: %s" % ex.getstring()
return False
#######################################################
# X.509 #
#######################################################
elif cipher_type == 'x509':
if not sign and not encrypt:
self.error = "No sign and no encrypt is set but cipher type to x509"
return False
import os
x509_sign_keyfile = x509_sign_keyfile or self.settings.x509_sign_keyfile
x509_sign_chainfile = x509_sign_chainfile or self.settings.x509_sign_chainfile
x509_sign_certfile = x509_sign_certfile or self.settings.x509_sign_certfile or \
x509_sign_keyfile or self.settings.x509_sign_certfile
# crypt certfiles could be a string or a list
x509_crypt_certfiles = x509_crypt_certfiles or self.settings.x509_crypt_certfiles
x509_nocerts = x509_nocerts or\
self.settings.x509_nocerts
# need m2crypto
try:
from M2Crypto import BIO, SMIME, X509
except Exception as e:
self.error = "Can't load M2Crypto module"
return False
msg_bio = BIO.MemoryBuffer(payload_in.as_string())
s = SMIME.SMIME()
# SIGN
if sign:
# key for signing
try:
keyfile_bio = BIO.openfile(x509_sign_keyfile)\
if os.path.isfile(x509_sign_keyfile)\
else BIO.MemoryBuffer(x509_sign_keyfile)
sign_certfile_bio = BIO.openfile(x509_sign_certfile)\
if os.path.isfile(x509_sign_certfile)\
else BIO.MemoryBuffer(x509_sign_certfile)
s.load_key_bio(keyfile_bio, sign_certfile_bio,
callback=lambda x: sign_passphrase)
if x509_sign_chainfile:
sk = X509.X509_Stack()
chain = X509.load_cert(x509_sign_chainfile)\
if os.path.isfile(x509_sign_chainfile)\
else X509.load_cert_string(x509_sign_chainfile)
sk.push(chain)
s.set_x509_stack(sk)
except Exception as e:
self.error = "Something went wrong on certificate / private key loading: <%s>" % str(e)
return False
try:
if x509_nocerts:
flags = SMIME.PKCS7_NOCERTS
else:
flags = 0
if not encrypt:
flags += SMIME.PKCS7_DETACHED
p7 = s.sign(msg_bio, flags=flags)
msg_bio = BIO.MemoryBuffer(payload_in.as_string(
)) # Recreate coz sign() has consumed it.
except Exception as e:
self.error = "Something went wrong on signing: <%s> %s" % (
str(e), str(flags))
return False
# ENCRYPT
if encrypt:
try:
sk = X509.X509_Stack()
if not isinstance(x509_crypt_certfiles, (list, tuple)):
x509_crypt_certfiles = [x509_crypt_certfiles]
# make an encryption cert's stack
for crypt_certfile in x509_crypt_certfiles:
certfile = X509.load_cert(crypt_certfile)\
if os.path.isfile(crypt_certfile)\
else X509.load_cert_string(crypt_certfile)
sk.push(certfile)
s.set_x509_stack(sk)
s.set_cipher(SMIME.Cipher('des_ede3_cbc'))
tmp_bio = BIO.MemoryBuffer()
if sign:
s.write(tmp_bio, p7)
else:
tmp_bio.write(payload_in.as_string())
p7 = s.encrypt(tmp_bio)
except Exception as e:
self.error = "Something went wrong on encrypting: <%s>" % str(e)
return False
# Final stage in sign and encryption
out = BIO.MemoryBuffer()
if encrypt:
s.write(out, p7)
else:
if sign:
s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED)
else:
out.write('\r\n')
out.write(payload_in.as_string())
out.close()
st = str(out.read())
payload = message_from_string(st)
else:
# no cryptography process as usual
payload = payload_in
if from_address:
payload['From'] = encoded_or_raw(to_unicode(from_address, encoding))
else:
payload['From'] = encoded_or_raw(to_unicode(sender, encoding))
origTo = to[:]
if to:
payload['To'] = encoded_or_raw(to_unicode(', '.join(to), encoding))
if reply_to:
payload['Reply-To'] = encoded_or_raw(to_unicode(', '.join(reply_to), encoding))
if cc:
payload['Cc'] = encoded_or_raw(to_unicode(', '.join(cc), encoding))
to.extend(cc)
if bcc:
to.extend(bcc)
payload['Subject'] = encoded_or_raw(to_unicode(subject, encoding))
payload['Date'] = email.utils.formatdate()
for k, v in iteritems(headers):
payload[k] = encoded_or_raw(to_unicode(v, encoding))
result = {}
try:
if self.settings.server == 'logging':
entry = 'email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % \
('-' * 40, sender, ', '.join(to), subject, text or html, '-' * 40)
logger.warning(entry)
elif self.settings.server.startswith('logging:'):
entry = 'email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % \
('-' * 40, sender, ', '.join(to), subject, text or html, '-' * 40)
open(self.settings.server[8:], 'a').write(entry)
elif self.settings.server == 'gae':
xcc = dict()
if cc:
xcc['cc'] = cc
if bcc:
xcc['bcc'] = bcc
if reply_to:
xcc['reply_to'] = reply_to
from google.appengine.api import mail
attachments = attachments and [mail.Attachment(
a.my_filename,
a.my_payload,
content_id='<attachment-%s>' % k
) for k, a in enumerate(attachments) if not raw]
if attachments:
result = mail.send_mail(
sender=sender, to=origTo,
subject=to_unicode(subject, encoding),
body=to_unicode(text or '', encoding),
html=html,
attachments=attachments, **xcc)
elif html and (not raw):
result = mail.send_mail(
sender=sender, to=origTo,
subject=to_unicode(subject, encoding), body=to_unicode(text or '', encoding), html=html, **xcc)
else:
result = mail.send_mail(
sender=sender, to=origTo,
subject=to_unicode(subject, encoding), body=to_unicode(text or '', encoding), **xcc)
elif self.settings.server == 'aws':
import boto3
from botocore.exceptions import ClientError
client = boto3.client('ses')
try:
raw = {'Data': payload.as_string()}
response = client.send_raw_email(RawMessage=raw,
Source=sender,
Destinations=to)
return True
except ClientError as e:
# we should log this error:
# print e.response['Error']['Message']
return False
else:
smtp_args = self.settings.server.split(':')
kwargs = dict(timeout=self.settings.timeout)
func = smtplib.SMTP_SSL if self.settings.ssl else smtplib.SMTP
server = func(*smtp_args, **kwargs)
try:
if self.settings.tls and not self.settings.ssl:
server.ehlo(self.settings.hostname)
server.starttls()
server.ehlo(self.settings.hostname)
if self.settings.login:
server.login(*self.settings.login.split(':', 1))
result = server.sendmail(sender, to, payload.as_string())
finally:
# do not want to hide errors raising some exception here
try:
server.quit()
except smtplib.SMTPException:
# ensure to close any socket with SMTP server
try:
server.close()
except Exception:
pass
except Exception as e:
logger.warning('Mail.send failure:%s' % e)
self.result = result
self.error = e
return False
self.result = result
self.error = None
return True
class Recaptcha2(DIV):
"""
Experimental:
Creates a DIV holding the newer Recaptcha from Google (v2)
Args:
request : the request. If not passed, uses current request
public_key : the public key Google gave you
private_key : the private key Google gave you
error_message : the error message to show if verification fails
label : the label to use
options (dict) : takes these parameters
- hl
- theme
- type
- tabindex
- callback
- expired-callback
see https://developers.google.com/recaptcha/docs/display for docs about those
comment : the comment
Examples:
Use as::
form = FORM(Recaptcha2(public_key='...', private_key='...'))
or::
form = SQLFORM(...)
form.append(Recaptcha2(public_key='...', private_key='...'))
to protect the login page instead, use::
from gluon.tools import Recaptcha2
auth.settings.captcha = Recaptcha2(request, public_key='...', private_key='...')
"""
API_URI = 'https://www.google.com/recaptcha/api.js'
VERIFY_SERVER = 'https://www.google.com/recaptcha/api/siteverify'
def __init__(self,
request=None,
public_key='',
private_key='',
error_message='invalid',
label='Verify:',
options=None,
comment='',
):
request = request or current.request
self.request_vars = request and request.vars or current.request.vars
self.remote_addr = request.env.remote_addr
self.public_key = public_key
self.private_key = private_key
self.errors = Storage()
self.error_message = error_message
self.components = []
self.attributes = {}
self.label = label
self.options = options or {}
self.comment = comment
def _validate(self):
recaptcha_response_field = self.request_vars.pop('g-recaptcha-response', None)
remoteip = self.remote_addr
if not recaptcha_response_field:
self.errors['captcha'] = self.error_message
return False
params = urlencode({
'secret': self.private_key,
'remoteip': remoteip,
'response': recaptcha_response_field,
}).encode('utf-8')
request = urllib2.Request(
url=self.VERIFY_SERVER,
data=to_bytes(params),
headers={'Content-type': 'application/x-www-form-urlencoded',
'User-agent': 'reCAPTCHA Python'})
httpresp = urlopen(request)
content = httpresp.read()
httpresp.close()
try:
response_dict = json.loads(to_native(content))
except:
self.errors['captcha'] = self.error_message
return False
if response_dict.get('success', False):
self.request_vars.captcha = ''
return True
else:
self.errors['captcha'] = self.error_message
return False
def xml(self):
api_uri = self.API_URI
hl = self.options.pop('hl', None)
if hl:
api_uri = self.API_URI + '?hl=%s' % hl
public_key = self.public_key
self.options['sitekey'] = public_key
captcha = DIV(
SCRIPT(_src=api_uri, _async='', _defer=''),
DIV(_class="g-recaptcha", data=self.options),
TAG.noscript(XML("""
<div style="width: 302px; height: 352px;">
<div style="width: 302px; height: 352px; position: relative;">
<div style="width: 302px; height: 352px; position: absolute;">
<iframe src="https://www.google.com/recaptcha/api/fallback?k=%(public_key)s"
frameborder="0" scrolling="no"
style="width: 302px; height:352px; border-style: none;">
</iframe>
</div>
<div style="width: 250px; height: 80px; position: absolute; border-style: none;
bottom: 21px; left: 25px; margin: 0px; padding: 0px; right: 25px;">
<textarea id="g-recaptcha-response" name="g-recaptcha-response"
class="g-recaptcha-response"
style="width: 250px; height: 80px; border: 1px solid #c1c1c1;
margin: 0px; padding: 0px; resize: none;" value="">
</textarea>
</div>
</div>
</div>""" % dict(public_key=public_key))
)
)
if not self.errors.captcha:
return XML(captcha).xml()
else:
captcha.append(DIV(self.errors['captcha'], _class='error'))
return XML(captcha).xml()
# this should only be used for captcha and perhaps not even for that
def addrow(form, a, b, c, style, _id, position=-1):
if style == "divs":
form[0].insert(position, DIV(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "table2cols":
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(c, _class='w2p_fc')))
form[0].insert(position + 1, TR(TD(b, _class='w2p_fw'),
_colspan=2, _id=_id))
elif style == "ul":
form[0].insert(position, LI(DIV(LABEL(a), _class='w2p_fl'),
DIV(b, _class='w2p_fw'),
DIV(c, _class='w2p_fc'),
_id=_id))
elif style == "bootstrap":
form[0].insert(position, DIV(LABEL(a, _class='control-label'),
DIV(b, SPAN(c, _class='inline-help'),
_class='controls'),
_class='control-group', _id=_id))
elif style in ("bootstrap3_inline", "bootstrap4_inline"):
form[0].insert(position, DIV(LABEL(a, _class='control-label col-sm-3'),
DIV(b, SPAN(c, _class='help-block'),
_class='col-sm-9'),
_class='form-group row', _id=_id))
elif style in ("bootstrap3_stacked", "bootstrap4_stacked"):
form[0].insert(position, DIV(LABEL(a, _class='control-label'),
b, SPAN(c, _class='help-block'),
_class='form-group row', _id=_id))
else:
form[0].insert(position, TR(TD(LABEL(a), _class='w2p_fl'),
TD(b, _class='w2p_fw'),
TD(c, _class='w2p_fc'), _id=_id))
class AuthJWT(object):
"""
Experimental!
Args:
- secret_key: the secret. Without salting, an attacker knowing this can impersonate
any user
- algorithm : uses as they are in the JWT specs, HS256, HS384 or HS512 basically means
signing with HMAC with a 256, 284 or 512bit hash
- verify_expiration : verifies the expiration checking the exp claim
- leeway: allow n seconds of skew when checking for token expiration
- expiration : how many seconds a token may be valid
- allow_refresh: enable the machinery to get a refreshed token passing a not-already-expired
token
- refresh_expiration_delta: to avoid continous refresh of the token
- header_prefix : self-explanatory. "JWT" and "Bearer" seems to be the emerging standards
- jwt_add_header: a dict holding additional mappings to the header. by default only alg and typ are filled
- user_param: the name of the parameter holding the username when requesting a token. Can be useful, e.g, for
email-based authentication, with "email" as a parameter
- pass_param: same as above, but for the password
- realm: self-explanatory
- salt: can be static or a function that takes the payload as an argument.
Example:
def mysalt(payload):
return payload['hmac_key'].split('-')[0]
- additional_payload: can be a dict to merge with the payload or a function that takes
the payload as input and returns the modified payload
Example:
def myadditional_payload(payload):
payload['my_name_is'] = 'bond,james bond'
return payload
- before_authorization: can be a callable that takes the deserialized token (a dict) as input.
Gets called right after signature verification but before the actual
authorization takes place. It may be use to cast
the extra auth_user fields to their actual types.
You can raise with HTTP a proper error message
Example:
def mybefore_authorization(tokend):
if not tokend['my_name_is'] == 'bond,james bond':
raise HTTP(400, 'Invalid JWT my_name_is claim')
- max_header_length: check max length to avoid load()ing unusually large tokens (could mean crafted, e.g. in a DDoS.)
Basic Usage:
in models (or the controller needing it)
myjwt = AuthJWT(auth, secret_key='secret')
in the controller issuing tokens
def login_and_take_token():
return myjwt.jwt_token_manager()
A call then to /app/controller/login_and_take_token with username and password returns the token
A call to /app/controller/login_and_take_token with the original token returns the refreshed token
To protect a function with JWT
@myjwt.allows_jwt()
@auth.requires_login()
def protected():
return '%s$%s' % (request.now, auth.user_id)
To inject optional auth info into the action with JWT
@myjwt.allows_jwt()
def unprotected():
if auth.user:
return '%s$%s' % (request.now, auth.user_id)
return "No auth info!"
"""
def __init__(self,
auth,
secret_key,
algorithm='HS256',
verify_expiration=True,
leeway=30,
expiration=60 * 5,
allow_refresh=True,
refresh_expiration_delta=60 * 60,
header_prefix='Bearer',
jwt_add_header=None,
user_param='username',
pass_param='password',
realm='Login required',
salt=None,
additional_payload=None,
before_authorization=None,
max_header_length=4 * 1024,
):
self.secret_key = secret_key
self.auth = auth
self.algorithm = algorithm
if self.algorithm not in ('HS256', 'HS384', 'HS512'):
raise NotImplementedError('Algorithm %s not allowed' % algorithm)
self.verify_expiration = verify_expiration
self.leeway = leeway
self.expiration = expiration
self.allow_refresh = allow_refresh
self.refresh_expiration_delta = refresh_expiration_delta
self.header_prefix = header_prefix
self.jwt_add_header = jwt_add_header or {}
base_header = {'alg': self.algorithm, 'typ': 'JWT'}
for k, v in iteritems(self.jwt_add_header):
base_header[k] = v
self.cached_b64h = self.jwt_b64e(json.dumps(base_header))
digestmod_mapping = {
'HS256': hashlib.sha256,
'HS384': hashlib.sha384,
'HS512': hashlib.sha512
}
self.digestmod = digestmod_mapping[algorithm]
self.user_param = user_param
self.pass_param = pass_param
self.realm = realm
self.salt = salt
self.additional_payload = additional_payload
self.before_authorization = before_authorization
self.max_header_length = max_header_length
self.recvd_token = None
@staticmethod
def jwt_b64e(string):
string = to_bytes(string)
return base64.urlsafe_b64encode(string).strip(b'=')
@staticmethod
def jwt_b64d(string):
"""base64 decodes a single bytestring (and is tolerant to getting
called with a unicode string).
The result is also a bytestring.
"""
string = to_bytes(string, 'ascii', 'ignore')
return base64.urlsafe_b64decode(string + b'=' * (-len(string) % 4))
def generate_token(self, payload):
secret = to_bytes(self.secret_key)
if self.salt:
if callable(self.salt):
secret = "%s$%s" % (secret, self.salt(payload))
else:
secret = "%s$%s" % (secret, self.salt)
if isinstance(secret, unicodeT):
secret = secret.encode('ascii', 'ignore')
b64h = self.cached_b64h
b64p = self.jwt_b64e(serializers.json(payload))
jbody = b64h + b'.' + b64p
mauth = hmac.new(key=secret, msg=jbody, digestmod=self.digestmod)
jsign = self.jwt_b64e(mauth.digest())
return to_native(jbody + b'.' + jsign)
def verify_signature(self, body, signature, secret):
mauth = hmac.new(key=secret, msg=body, digestmod=self.digestmod)
return compare(self.jwt_b64e(mauth.digest()), signature)
def load_token(self, token):
token = to_bytes(token, 'utf-8', 'strict')
body, sig = token.rsplit(b'.', 1)
b64h, b64b = body.split(b'.', 1)
if b64h != self.cached_b64h:
# header not the same
raise HTTP(400, 'Invalid JWT Header')
secret = self.secret_key
tokend = serializers.loads_json(to_native(self.jwt_b64d(b64b)))
if self.salt:
if callable(self.salt):
secret = "%s$%s" % (secret, self.salt(tokend))
else:
secret = "%s$%s" % (secret, self.salt)
secret = to_bytes(secret, 'ascii', 'ignore')
if not self.verify_signature(body, sig, secret):
# signature verification failed
raise HTTP(400, 'Token signature is invalid')
if self.verify_expiration:
now = time.mktime(datetime.datetime.utcnow().timetuple())
if tokend['exp'] + self.leeway < now:
raise HTTP(400, 'Token is expired')
if callable(self.before_authorization):
self.before_authorization(tokend)
return tokend
def serialize_auth_session(self, session_auth):
"""
As bad as it sounds, as long as this is rarely used (vs using the token)
this is the faster method, even if we ditch session in jwt_token_manager().
We (mis)use the heavy default auth mechanism to avoid any further computation,
while sticking to a somewhat-stable Auth API.
"""
# TODO: Check the following comment
# is the following safe or should we use
# calendar.timegm(datetime.datetime.utcnow().timetuple())
# result seem to be the same (seconds since epoch, in UTC)
now = time.mktime(datetime.datetime.utcnow().timetuple())
expires = now + self.expiration
payload = dict(
hmac_key=session_auth['hmac_key'],
user_groups=session_auth['user_groups'],
user=session_auth['user'].as_dict(),
iat=now,
exp=expires
)
return payload
def refresh_token(self, orig_payload):
now = time.mktime(datetime.datetime.utcnow().timetuple())
if self.verify_expiration:
orig_exp = orig_payload['exp']
if orig_exp + self.leeway < now:
# token already expired, can't be used for refresh
raise HTTP(400, 'Token already expired')
orig_iat = orig_payload.get('orig_iat') or orig_payload['iat']
if orig_iat + self.refresh_expiration_delta < now:
# refreshed too long ago
raise HTTP(400, 'Token issued too long ago')
expires = now + self.expiration
orig_payload.update(
orig_iat=orig_iat,
iat=now,
exp=expires,
hmac_key=web2py_uuid()
)
self.alter_payload(orig_payload)
return orig_payload
def alter_payload(self, payload):
if self.additional_payload:
if callable(self.additional_payload):
payload = self.additional_payload(payload)
elif isinstance(self.additional_payload, dict):
payload.update(self.additional_payload)
return payload
def jwt_token_manager(self, token_param='_token'):
"""
The part that issues (and refreshes) tokens.
Used in a controller, given myjwt is the istantiated class, as
@myjwt.allow_jwt(required=False, verify_expiration=False)
def api_auth():
return myjwt.jwt_token_manager()
Then, a call to /app/c/api_auth with username and password
returns a token, while /app/c/api_auth with the current token
issues another token (expired, but within grace time)
"""
request = current.request
response = current.response
session = current.session
# forget and unlock response
session.forget(response)
valid_user = None
ret = None
token = None
try:
token = self.recvd_token or self.get_jwt_token_from_request(token_param)
except HTTP:
pass
if token:
if not self.allow_refresh:
raise HTTP(403, 'Refreshing token is not allowed')
tokend = self.load_token(token)
# verification can fail here
refreshed = self.refresh_token(tokend)
ret = {'token': self.generate_token(refreshed)}
elif self.user_param in request.vars and self.pass_param in request.vars:
username = request.vars[self.user_param]
password = request.vars[self.pass_param]
valid_user = self.auth.login_bare(username, password)
else:
valid_user = self.auth.user
self.auth.login_user(valid_user)
if valid_user:
payload = self.serialize_auth_session(session.auth)
self.alter_payload(payload)
ret = {'token': self.generate_token(payload)}
elif ret is None:
raise HTTP(401,
'Not Authorized - need to be logged in, to pass a token '
'for refresh or username and password for login',
**{'WWW-Authenticate': 'JWT realm="%s"' % self.realm})
response.headers['Content-Type'] = 'application/json'
return serializers.json(ret)
def inject_token(self, tokend):
"""
The real deal, not touching the db but still logging-in the user
"""
self.auth.user = Storage(tokend['user'])
self.auth.user_groups = tokend['user_groups']
self.auth.hmac_key = tokend['hmac_key']
def get_jwt_token_from_request(self, token_param='_token'):
"""
The method that extracts and validates the token, either
from the header or the _token var
token_param: request.vars attribute with the token used only if the http authorization header is not present.
"""
token = None
token_in_header = current.request.env.http_authorization
if token_in_header:
parts = token_in_header.split()
if parts[0].lower() != self.header_prefix.lower():
raise HTTP(400, 'Invalid JWT header')
elif len(parts) == 1:
raise HTTP(400, 'Invalid JWT header, missing token')
elif len(parts) > 2:
raise HTTP(400, 'Invalid JWT header, token contains spaces')
token = parts[1]
else:
token = current.request.vars.get(token_param)
if token is None:
raise HTTP(400, 'JWT header not found and JWT parameter {} missing in request'.format(token_param))
self.recvd_token = token
return token
def allows_jwt(self, otherwise=None, required=True, verify_expiration=True, token_param='_token'):
"""
The decorator that takes care of injecting auth info in the decorated action.
Works w/o resorting to session.
Args:
required: the token is mandatory (either in request.var._token or in the HTTP hearder Authorization Bearer)
verify_expiration: allows to bypass expiration check. Useful to manage token renewal.
token_param: request.vars attribute with the token used only if the http authorization header is not present (default: "_token").
"""
def decorator(action):
def f(*args, **kwargs):
try:
token = self.get_jwt_token_from_request(token_param=token_param)
except HTTP as e:
if required:
raise e
token = None
if token and len(token) < self.max_header_length:
old_verify_expiration = self.verify_expiration
try:
self.verify_expiration = verify_expiration
tokend = self.load_token(token)
except ValueError:
raise HTTP(400, 'Invalid JWT header, wrong token format')
finally:
self.verify_expiration = old_verify_expiration
self.inject_token(tokend)
return action(*args, **kwargs)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
class Auth(AuthAPI):
default_settings = dict(AuthAPI.default_settings,
allow_basic_login=False,
allow_basic_login_only=False,
allow_delete_accounts=False,
alternate_requires_registration=False,
auth_manager_role=None,
auth_two_factor_enabled=False,
auth_two_factor_tries_left=3,
bulk_register_enabled=False,
captcha=None,
cas_maps=None,
client_side=True,
formstyle=None,
hideerror=False,
label_separator=None,
login_after_password_change=True,
login_after_registration=False,
login_captcha=None,
login_specify_error=False,
long_expiration=3600 * 30 * 24, # one month
mailer=None,
manager_actions={},
multi_login=False,
on_failed_authentication=lambda x: redirect(x),
pre_registration_div=None,
prevent_open_redirect_attacks=True,
prevent_password_reset_attacks=True,
profile_fields=None,
register_captcha=None,
register_fields=None,
register_verify_password=True,
remember_me_form=True,
reset_password_requires_verification=False,
retrieve_password_captcha=None,
retrieve_username_captcha=None,
showid=False,
table_cas=None,
table_cas_name='auth_cas',
table_event=None,
table_group=None,
table_membership=None,
table_permission=None,
table_token_name='auth_token',
table_user=None,
two_factor_authentication_group=None,
update_fields=['email'],
wiki=Settings()
)
# ## these are messages that can be customized
default_messages = dict(AuthAPI.default_messages,
access_denied='Insufficient privileges',
bulk_invite_body='You have been invited to join %(site)s, click %(link)s to complete '
'the process',
bulk_invite_subject='Invitation to join %(site)s',
delete_label='Check to delete',
email_sent='Email sent',
email_verified='Email verified',
function_disabled='Function disabled',
impersonate_log='User %(id)s is impersonating %(other_id)s',
invalid_reset_password='Invalid reset password',
invalid_two_factor_code='Incorrect code. {0} more attempt(s) remaining.',
is_empty="Cannot be empty",
label_client_ip='Client IP',
label_description='Description',
label_email='E-mail',
label_first_name='First name',
label_group_id='Group ID',
label_last_name='Last name',
label_name='Name',
label_origin='Origin',
label_password='Password',
label_record_id='Record ID',
label_registration_id='Registration identifier',
label_registration_key='Registration key',
label_remember_me="Remember me (for 30 days)",
label_reset_password_key='Reset Password key',
label_role='Role',
label_table_name='Object or table name',
label_time_stamp='Timestamp',
label_two_factor='Authentication code',
label_user_id='User ID',
label_username='Username',
login_button='Log In',
login_disabled='Login disabled by administrator',
new_password='New password',
new_password_sent='A new password was emailed to you',
old_password='Old password',
password_change_button='Change password',
password_reset_button='Request reset password',
profile_save_button='Apply changes',
register_button='Sign Up',
reset_password='Click on the link %(link)s to reset your password',
reset_password_log='User %(id)s Password reset',
reset_password_subject='Password reset',
retrieve_password='Your password is: %(password)s',
retrieve_password_log='User %(id)s Password retrieved',
retrieve_password_subject='Password retrieve',
retrieve_two_factor_code='Your temporary login code is {0}',
retrieve_two_factor_code_subject='Two-step Login Authentication Code',
retrieve_username='Your username is: %(username)s',
retrieve_username_log='User %(id)s Username retrieved',
retrieve_username_subject='Username retrieve',
submit_button='Submit',
two_factor_comment='This code was emailed to you and is required for login.',
unable_send_email='Unable to send email',
username_sent='Your username was emailed to you',
verify_email='Welcome %(username)s! Click on the link %(link)s to verify your email',
verify_email_log='User %(id)s Verification email sent',
verify_email_subject='Email verification',
verify_password='Verify Password',
verify_password_comment='please input your password again'
)
"""
Class for authentication, authorization, role based access control.
Includes:
- registration and profile
- login and logout
- username and password retrieval
- event logging
- role creation and assignment
- user defined group/role based permission
Args:
environment: is there for legacy but unused (awful)
db: has to be the database where to create tables for authentication
mailer: `Mail(...)` or None (no mailer) or True (make a mailer)
hmac_key: can be a hmac_key or hmac_key=Auth.get_or_create_key()
controller: (where is the user action?)
cas_provider: (delegate authentication to the URL, CAS2)
Authentication Example::
from gluon.contrib.utils import *
mail=Mail()
mail.settings.server='smtp.gmail.com:587'
mail.settings.sender='you@somewhere.com'
mail.settings.login='username:password'
auth=Auth(db)
auth.settings.mailer=mail
# auth.settings....=...
auth.define_tables()
def authentication():
return dict(form=auth())
Exposes:
- `http://.../{application}/{controller}/authentication/login`
- `http://.../{application}/{controller}/authentication/logout`
- `http://.../{application}/{controller}/authentication/register`
- `http://.../{application}/{controller}/authentication/verify_email`
- `http://.../{application}/{controller}/authentication/retrieve_username`
- `http://.../{application}/{controller}/authentication/retrieve_password`
- `http://.../{application}/{controller}/authentication/reset_password`
- `http://.../{application}/{controller}/authentication/profile`
- `http://.../{application}/{controller}/authentication/change_password`
On registration a group with role=new_user.id is created
and user is given membership of this group.
You can create a group with::
group_id=auth.add_group('Manager', 'can access the manage action')
auth.add_permission(group_id, 'access to manage')
Here "access to manage" is just a user defined string.
You can give access to a user::
auth.add_membership(group_id, user_id)
If user id is omitted, the logged in user is assumed
Then you can decorate any action::
@auth.requires_permission('access to manage')
def manage():
return dict()
You can restrict a permission to a specific table::
auth.add_permission(group_id, 'edit', db.sometable)
@auth.requires_permission('edit', db.sometable)
Or to a specific record::
auth.add_permission(group_id, 'edit', db.sometable, 45)
@auth.requires_permission('edit', db.sometable, 45)
If authorization is not granted calls::
auth.settings.on_failed_authorization
Other options::
auth.settings.mailer=None
auth.settings.expiration=3600 # seconds
...
### these are messages that can be customized
...
"""
@staticmethod
def get_or_create_key(filename=None, alg='sha512'):
request = current.request
if not filename:
filename = os.path.join(request.folder, 'private', 'auth.key')
if os.path.exists(filename):
key = open(filename, 'r').read().strip()
else:
key = alg + ':' + web2py_uuid()
open(filename, 'w').write(key)
return key
def url(self, f=None, args=None, vars=None, scheme=False):
if args is None:
args = []
if vars is None:
vars = {}
host = scheme and self.settings.host
return URL(c=self.settings.controller,
f=f, args=args, vars=vars, scheme=scheme, host=host)
def here(self):
return URL(args=current.request.args, vars=current.request.get_vars)
def select_host(self, host, host_names=None):
"""
checks that host is valid, i.e. in the list of glob host_names
if the host is missing, then is it selects the first entry from host_names
read more here: https://github.com/web2py/web2py/issues/1196
"""
if host:
if host_names:
for item in host_names:
if fnmatch.fnmatch(host, item):
break
else:
raise HTTP(403, "Invalid Hostname")
elif host_names:
host = host_names[0]
else:
host = 'localhost'
return host
def __init__(self, environment=None, db=None, mailer=True,
hmac_key=None, controller='default', function='user',
cas_provider=None, signature=True, secure=False,
csrf_prevention=True, propagate_extension=None,
url_index=None, jwt=None, host_names=None):
# next two lines for backward compatibility
if not db and environment and isinstance(environment, DAL):
db = environment
self.db = db
self.environment = current
self.csrf_prevention = csrf_prevention
request = current.request
session = current.session
auth = session.auth
self.user_groups = auth and auth.user_groups or {}
if secure:
request.requires_https()
now = request.now
# if we have auth info
# if not expired it, used it
# if expired, clear the session
# else, only clear auth info in the session
if auth:
delta = datetime.timedelta(days=0, seconds=auth.expiration)
if auth.last_visit and auth.last_visit + delta > now:
self.user = auth.user
# this is a trick to speed up sessions to avoid many writes
if (now - auth.last_visit).seconds > (auth.expiration // 10):
auth.last_visit = now
else:
self.user = None
if session.auth:
del session.auth
session.renew(clear_session=True)
else:
self.user = None
if session.auth:
del session.auth
# ## what happens after login?
url_index = url_index or URL(controller, 'index')
url_login = URL(controller, function, args='login',
extension=propagate_extension)
# ## what happens after registration?
settings = self.settings = Settings()
settings.update(Auth.default_settings)
host = self.select_host(request.env.http_host, host_names)
settings.update(
cas_domains=[host],
enable_tokens=False,
cas_provider=cas_provider,
cas_actions=dict(login='login',
validate='validate',
servicevalidate='serviceValidate',
proxyvalidate='proxyValidate',
logout='logout'),
cas_create_user=True,
extra_fields={},
actions_disabled=[],
controller=controller,
function=function,
login_url=url_login,
logged_url=URL(controller, function, args='profile'),
download_url=URL(controller, 'download'),
mailer=(mailer is True) and Mail() or mailer,
on_failed_authorization=URL(controller, function, args='not_authorized'),
login_next=url_index,
login_onvalidation=[],
login_onaccept=[],
login_onfail=[],
login_methods=[self],
login_form=self,
logout_next=url_index,
logout_onlogout=None,
register_next=url_index,
register_onvalidation=[],
register_onaccept=[],
verify_email_next=url_login,
verify_email_onaccept=[],
profile_next=url_index,
profile_onvalidation=[],
profile_onaccept=[],
retrieve_username_next=url_index,
retrieve_password_next=url_index,
request_reset_password_next=url_login,
reset_password_next=url_index,
change_password_next=url_index,
change_password_onvalidation=[],
change_password_onaccept=[],
retrieve_password_onvalidation=[],
request_reset_password_onvalidation=[],
request_reset_password_onaccept=[],
reset_password_onvalidation=[],
reset_password_onaccept=[],
hmac_key=hmac_key,
formstyle=current.response.formstyle,
label_separator=current.response.form_label_separator,
two_factor_methods=[],
two_factor_onvalidation=[],
host=host,
)
settings.lock_keys = True
# ## these are messages that can be customized
messages = self.messages = Messages(current.T)
messages.update(Auth.default_messages)
messages.update(ajax_failed_authentication=
DIV(H4('NOT AUTHORIZED'),
'Please ',
A('login',
_href=self.settings.login_url +
('?_next=' + urllib_quote(current.request.env.http_web2py_component_location))
if current.request.env.http_web2py_component_location else ''),
' to view this content.',
_class='not-authorized alert alert-block'))
messages.lock_keys = True
# for "remember me" option
response = current.response
if auth and auth.remember_me:
# when user wants to be logged in for longer
response.session_cookie_expires = auth.expiration
if signature:
self.define_signature()
else:
self.signature = None
self.jwt_handler = jwt and AuthJWT(self, **jwt)
def get_vars_next(self):
next = current.request.vars._next
if isinstance(next, (list, tuple)):
next = next[0]
if next and self.settings.prevent_open_redirect_attacks:
return prevent_open_redirect(next)
return next or None
def table_cas(self):
return self.db[self.settings.table_cas_name]
def table_token(self):
return self.db[self.settings.table_token_name]
def _HTTP(self, *a, **b):
"""
only used in lambda: self._HTTP(404)
"""
raise HTTP(*a, **b)
def __call__(self):
"""
Example:
Use as::
def authentication():
return dict(form=auth())
"""
request = current.request
args = request.args
if not args:
redirect(self.url(args='login', vars=request.vars))
elif args[0] in self.settings.actions_disabled:
raise HTTP(404)
if args[0] in ('login', 'logout', 'register', 'verify_email',
'retrieve_username', 'retrieve_password',
'reset_password', 'request_reset_password',
'change_password', 'profile', 'groups',
'impersonate', 'not_authorized', 'confirm_registration',
'bulk_register', 'manage_tokens', 'jwt'):
if len(request.args) >= 2 and args[0] == 'impersonate':
return getattr(self, args[0])(request.args[1])
else:
return getattr(self, args[0])()
elif args[0] == 'cas' and not self.settings.cas_provider:
if args(1) == self.settings.cas_actions['login']:
return self.cas_login(version=2)
elif args(1) == self.settings.cas_actions['validate']:
return self.cas_validate(version=1)
elif args(1) == self.settings.cas_actions['servicevalidate']:
return self.cas_validate(version=2, proxy=False)
elif args(1) == self.settings.cas_actions['proxyvalidate']:
return self.cas_validate(version=2, proxy=True)
elif (args(1) == 'p3'
and args(2) == self.settings.cas_actions['servicevalidate']):
return self.cas_validate(version=3, proxy=False)
elif (args(1) == 'p3'
and args(2) == self.settings.cas_actions['proxyvalidate']):
return self.cas_validate(version=3, proxy=True)
elif args(1) == self.settings.cas_actions['logout']:
return self.logout(next=request.vars.service or DEFAULT)
else:
raise HTTP(404)
def navbar(self, prefix='Welcome', action=None,
separators=(' [ ', ' | ', ' ] '), user_identifier=DEFAULT,
referrer_actions=DEFAULT, mode='default'):
""" Navbar with support for more templates
This uses some code from the old navbar.
Args:
mode: see options for list of
"""
items = [] # Hold all menu items in a list
self.bar = '' # The final
T = current.T
referrer_actions = [] if not referrer_actions else referrer_actions
if not action:
action = self.url(self.settings.function)
request = current.request
if URL() == action:
next = ''
else:
next = '?_next=' + urllib_quote(URL(args=request.args,
vars=request.get_vars))
href = lambda function: \
'%s/%s%s' % (action, function, next if referrer_actions is DEFAULT or function in referrer_actions else '')
if isinstance(prefix, str):
prefix = T(prefix)
if prefix:
prefix = prefix.strip() + ' '
def Anr(*a, **b):
b['_rel'] = 'nofollow'
return A(*a, **b)
if self.user_id: # User is logged in
logout_next = self.settings.logout_next
items.append({'name': T('Log Out'),
'href': '%s/logout?_next=%s' % (action, urllib_quote(logout_next)),
'icon': 'icon-off'})
if 'profile' not in self.settings.actions_disabled:
items.append({'name': T('Profile'), 'href': href('profile'),
'icon': 'icon-user'})
if 'change_password' not in self.settings.actions_disabled:
items.append({'name': T('Password'),
'href': href('change_password'),
'icon': 'icon-lock'})
if user_identifier is DEFAULT:
user_identifier = '%(first_name)s'
if callable(user_identifier):
user_identifier = user_identifier(self.user)
elif ((isinstance(user_identifier, str) or
type(user_identifier).__name__ == 'lazyT') and
re.search(r'%\(.+\)s', user_identifier)):
user_identifier = user_identifier % self.user
if not user_identifier:
user_identifier = ''
else: # User is not logged in
items.append({'name': T('Log In'), 'href': href('login'),
'icon': 'icon-off'})
if 'register' not in self.settings.actions_disabled:
items.append({'name': T('Sign Up'), 'href': href('register'),
'icon': 'icon-user'})
if 'request_reset_password' not in self.settings.actions_disabled:
items.append({'name': T('Lost password?'),
'href': href('request_reset_password'),
'icon': 'icon-lock'})
if self.settings.use_username and 'retrieve_username' not in self.settings.actions_disabled:
items.append({'name': T('Forgot username?'),
'href': href('retrieve_username'),
'icon': 'icon-edit'})
def menu(): # For inclusion in MENU
self.bar = [(items[0]['name'], False, items[0]['href'], [])]
del items[0]
for item in items:
self.bar[0][3].append((item['name'], False, item['href']))
def bootstrap3(): # Default web2py scaffolding
def rename(icon): return icon + ' ' + icon.replace('icon', 'glyphicon')
self.bar = UL(LI(Anr(I(_class=rename('icon ' + items[0]['icon'])),
' ' + items[0]['name'],
_href=items[0]['href'])), _class='dropdown-menu')
del items[0]
for item in items:
self.bar.insert(-1, LI(Anr(I(_class=rename('icon ' + item['icon'])),
' ' + item['name'],
_href=item['href'])))
self.bar.insert(-1, LI('', _class='divider'))
if self.user_id:
self.bar = LI(Anr(prefix, user_identifier,
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}),
self.bar, _class='dropdown')
else:
self.bar = LI(Anr(T('Log In'),
_href='#', _class="dropdown-toggle",
data={'toggle': 'dropdown'}), self.bar,
_class='dropdown')
def bare():
""" In order to do advanced customization we only need the
prefix, the user_identifier and the href attribute of items
Examples:
Use as::
# in module custom_layout.py
from gluon import *
def navbar(auth_navbar):
bar = auth_navbar
user = bar["user"]
if not user:
btn_login = A(current.T("Login"),
_href=bar["login"],
_class="btn btn-success",
_rel="nofollow")
btn_register = A(current.T("Sign up"),
_href=bar["register"],
_class="btn btn-primary",
_rel="nofollow")
return DIV(btn_register, btn_login, _class="btn-group")
else:
toggletext = "%s back %s" % (bar["prefix"], user)
toggle = A(toggletext,
_href="#",
_class="dropdown-toggle",
_rel="nofollow",
**{"_data-toggle": "dropdown"})
li_profile = LI(A(I(_class="icon-user"), ' ',
current.T("Account details"),
_href=bar["profile"], _rel="nofollow"))
li_custom = LI(A(I(_class="icon-book"), ' ',
current.T("My Agenda"),
_href="#", rel="nofollow"))
li_logout = LI(A(I(_class="icon-off"), ' ',
current.T("logout"),
_href=bar["logout"], _rel="nofollow"))
dropdown = UL(li_profile,
li_custom,
LI('', _class="divider"),
li_logout,
_class="dropdown-menu", _role="menu")
return LI(toggle, dropdown, _class="dropdown")
# in models db.py
import custom_layout as custom
# in layout.html
<ul id="navbar" class="nav pull-right">
{{='auth' in globals() and \
custom.navbar(auth.navbar(mode='bare')) or ''}}</ul>
"""
bare = {'prefix': prefix, 'user': user_identifier if self.user_id else None}
for i in items:
if i['name'] == T('Log In'):
k = 'login'
elif i['name'] == T('Sign Up'):
k = 'register'
elif i['name'] == T('Lost password?'):
k = 'request_reset_password'
elif i['name'] == T('Forgot username?'):
k = 'retrieve_username'
elif i['name'] == T('Log Out'):
k = 'logout'
elif i['name'] == T('Profile'):
k = 'profile'
elif i['name'] == T('Password'):
k = 'change_password'
bare[k] = i['href']
self.bar = bare
options = {'asmenu': menu,
'dropdown': bootstrap3,
'bare': bare
} # Define custom modes.
if mode in options and callable(options[mode]):
options[mode]()
else:
s1, s2, s3 = separators
if self.user_id:
self.bar = SPAN(prefix, user_identifier, s1,
Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
else:
self.bar = SPAN(s1, Anr(items[0]['name'],
_href=items[0]['href']), s3,
_class='auth_navbar')
for item in items[1:]:
self.bar.insert(-1, s2)
self.bar.insert(-1, Anr(item['name'], _href=item['href']))
return self.bar
def enable_record_versioning(self,
tables,
archive_db=None,
archive_names='%(tablename)s_archive',
current_record='current_record',
current_record_label=None):
"""
Used to enable full record versioning (including auth tables)::
auth = Auth(db)
auth.define_tables(signature=True)
# define our own tables
db.define_table('mything',Field('name'),auth.signature)
auth.enable_record_versioning(tables=db)
tables can be the db (all table) or a list of tables.
only tables with modified_by and modified_on fiels (as created
by auth.signature) will have versioning. Old record versions will be
in table 'mything_archive' automatically defined.
when you enable enable_record_versioning, records are never
deleted but marked with is_active=False.
enable_record_versioning enables a common_filter for
every table that filters out records with is_active = False
Note:
If you use auth.enable_record_versioning,
do not use auth.archive or you will end up with duplicates.
auth.archive does explicitly what enable_record_versioning
does automatically.
"""
current_record_label = current_record_label or current.T(
current_record.replace('_', ' ').title())
for table in tables:
fieldnames = table.fields()
if 'id' in fieldnames and 'modified_on' in fieldnames and current_record not in fieldnames:
table._enable_record_versioning(archive_db=archive_db,
archive_name=archive_names,
current_record=current_record,
current_record_label=current_record_label)
def define_tables(self, username=None, signature=None, enable_tokens=False,
migrate=None, fake_migrate=None):
"""
To be called unless tables are defined manually
Examples:
Use as::
# defines all needed tables and table files
# 'myprefix_auth_user.table', ...
auth.define_tables(migrate='myprefix_')
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = self.db
if migrate is None:
migrate = db._migrate
if fake_migrate is None:
fake_migrate = db._fake_migrate
settings = self.settings
settings.enable_tokens = enable_tokens
signature_list = \
super(Auth, self).define_tables(username, signature, migrate, fake_migrate)._table_signature_list
now = current.request.now
reference_table_user = 'reference %s' % settings.table_user_name
if settings.cas_domains:
if settings.table_cas_name not in db.tables:
db.define_table(
settings.table_cas_name,
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('created_on', 'datetime', default=now),
Field('service', requires=IS_URL()),
Field('ticket'),
Field('renew', 'boolean', default=False),
*settings.extra_fields.get(settings.table_cas_name, []),
**dict(
migrate=self._get_migrate(
settings.table_cas_name, migrate),
fake_migrate=fake_migrate))
if settings.enable_tokens:
extra_fields = settings.extra_fields.get(
settings.table_token_name, []) + signature_list
if settings.table_token_name not in db.tables:
db.define_table(
settings.table_token_name,
Field('user_id', reference_table_user, default=None,
label=self.messages.label_user_id),
Field('expires_on', 'datetime', default=datetime.datetime(2999, 12, 31)),
Field('token', writable=False, default=web2py_uuid, unique=True),
*extra_fields,
**dict(migrate=self._get_migrate(settings.table_token_name, migrate),
fake_migrate=fake_migrate))
if not db._lazy_tables:
settings.table_user = db[settings.table_user_name]
settings.table_group = db[settings.table_group_name]
settings.table_membership = db[settings.table_membership_name]
settings.table_permission = db[settings.table_permission_name]
settings.table_event = db[settings.table_event_name]
if settings.cas_domains:
settings.table_cas = db[settings.table_cas_name]
if settings.cas_provider: # THIS IS NOT LAZY
settings.actions_disabled = \
['profile', 'register', 'change_password',
'request_reset_password', 'retrieve_username']
from gluon.contrib.login_methods.cas_auth import CasAuth
maps = settings.cas_maps
if not maps:
table_user = self.table_user()
maps = dict((name, lambda v, n=name: v.get(n, None)) for name in
table_user.fields if name != 'id'
and table_user[name].readable)
maps['registration_id'] = \
lambda v, p=settings.cas_provider: '%s/%s' % (p, v['user'])
actions = [settings.cas_actions['login'],
settings.cas_actions['servicevalidate'],
settings.cas_actions['logout']]
settings.login_form = CasAuth(
casversion=2,
urlbase=settings.cas_provider,
actions=actions,
maps=maps)
return self
def get_or_create_user(self, keys, update_fields=['email'],
login=True, get=True):
"""
Used for alternate login methods:
If the user exists already then password is updated.
If the user doesn't yet exist, then they are created.
"""
table_user = self.table_user()
create_user = self.settings.cas_create_user
user = None
checks = []
# make a guess about who this user is
guess_fields = ['registration_id', 'username', 'email']
if self.settings.login_userfield:
guess_fields.append(self.settings.login_userfield)
for fieldname in guess_fields:
if fieldname in table_user.fields() and \
keys.get(fieldname, None):
checks.append(fieldname)
value = keys[fieldname]
user = table_user(**{fieldname: value})
if user:
break
if not checks:
return None
if 'registration_id' not in keys:
keys['registration_id'] = keys[checks[0]]
# if we think we found the user but registration_id does not match,
# make new user
if 'registration_id' in checks \
and user \
and user.registration_id \
and ('registration_id' not in keys or user.registration_id != str(keys['registration_id'])):
user = None # THINK MORE ABOUT THIS? DO WE TRUST OPENID PROVIDER?
if user:
if not get:
# added for register_bare to avoid overwriting users
return None
update_keys = dict(registration_id=keys['registration_id'])
for key in update_fields:
if key in keys:
update_keys[key] = keys[key]
user.update_record(**update_keys)
elif checks:
if create_user is False:
# Remove current open session a send message
self.logout(next=None, onlogout=None, log=None)
raise HTTP(403, "Forbidden. User need to be created first.")
if 'first_name' not in keys and 'first_name' in table_user.fields:
guess = keys.get('email', 'anonymous').split('@')[0]
keys['first_name'] = keys.get('username', guess)
vars = table_user._filter_fields(keys)
user_id = table_user.insert(**vars)
user = table_user[user_id]
if self.settings.create_user_groups:
group_id = self.add_group(self.settings.create_user_groups % user)
self.add_membership(group_id, user_id)
if self.settings.everybody_group_id:
self.add_membership(self.settings.everybody_group_id, user_id)
if login:
self.user = user
if self.settings.register_onaccept:
callback(self.settings.register_onaccept, Storage(vars=user))
return user
def basic(self, basic_auth_realm=False):
"""
Performs basic login.
Args:
basic_auth_realm: optional basic http authentication realm. Can take
str or unicode or function or callable or boolean.
reads current.request.env.http_authorization
and returns basic_allowed,basic_accepted,user.
if basic_auth_realm is defined is a callable it's return value
is used to set the basic authentication realm, if it's a string
its content is used instead. Otherwise basic authentication realm
is set to the application name.
If basic_auth_realm is None or False (the default) the behavior
is to skip sending any challenge.
"""
if not self.settings.allow_basic_login:
return (False, False, False)
basic = current.request.env.http_authorization
if basic_auth_realm:
if callable(basic_auth_realm):
basic_auth_realm = basic_auth_realm()
elif isinstance(basic_auth_realm, string_types):
basic_realm = to_unicode(basic_auth_realm)
elif basic_auth_realm is True:
basic_realm = '' + current.request.application
http_401 = HTTP(401, 'Not Authorized', **{'WWW-Authenticate': 'Basic realm="' + basic_realm + '"'})
if not basic or not basic[:6].lower() == 'basic ':
if basic_auth_realm:
raise http_401
return (True, False, False)
(username, sep, password) = base64.b64decode(basic[6:]).partition(b':')
is_valid_user = sep and self.login_bare(username, password)
if not is_valid_user and basic_auth_realm:
raise http_401
return (True, True, is_valid_user)
def _get_login_settings(self):
table_user = self.table_user()
userfield = self.settings.login_userfield or ('username' \
if self.settings.login_userfield or 'username' \
in table_user.fields else 'email')
passfield = self.settings.password_field
return Storage({'table_user': table_user,
'userfield': userfield,
'passfield': passfield})
def login_bare(self, username, password):
"""
Logins user as specified by username (or email) and password
"""
settings = self._get_login_settings()
user = settings.table_user(**{settings.userfield: username})
if user and user.get(settings.passfield, False):
password = settings.table_user[
settings.passfield].validate(password)[0]
if ((user.registration_key is None or
not user.registration_key.strip()) and
password == user[settings.passfield]):
self.login_user(user)
return user
else:
# user not in database try other login methods
for login_method in self.settings.login_methods:
if login_method != self and login_method(username, password):
self.user = user
return user
return False
def register_bare(self, **fields):
"""
Registers a user as specified by username (or email)
and a raw password.
"""
settings = self._get_login_settings()
# users can register_bare even if no password is provided,
# in this case they will have to reset their password to login
if fields.get(settings.passfield):
fields[settings.passfield] = \
settings.table_user[settings.passfield].validate(fields[settings.passfield], None)[0]
if not fields.get(settings.userfield):
raise ValueError('register_bare: userfield not provided or invalid')
user = self.get_or_create_user(fields, login=False, get=False,
update_fields=self.settings.update_fields)
if not user:
# get or create did not create a user (it ignores duplicate records)
return False
return user
def cas_login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
version=2,
):
request = current.request
response = current.response
session = current.session
db, table = self.db, self.table_cas()
session._cas_service = request.vars.service or session._cas_service
if request.env.http_host not in self.settings.cas_domains or \
not session._cas_service:
raise HTTP(403, 'not authorized')
def allow_access(interactivelogin=False):
row = table(service=session._cas_service, user_id=self.user.id)
if row:
ticket = row.ticket
else:
ticket = 'ST-' + web2py_uuid()
table.insert(service=session._cas_service,
user_id=self.user.id,
ticket=ticket,
created_on=request.now,
renew=interactivelogin)
service = session._cas_service
query_sep = '&' if '?' in service else '?'
del session._cas_service
if 'warn' in request.vars and not interactivelogin:
response.headers[
'refresh'] = "5;URL=%s" % service + query_sep + "ticket=" + ticket
return A("Continue to %s" % service,
_href=service + query_sep + "ticket=" + ticket)
else:
redirect(service + query_sep + "ticket=" + ticket)
if self.is_logged_in() and 'renew' not in request.vars:
return allow_access()
elif not self.is_logged_in() and 'gateway' in request.vars:
redirect(session._cas_service)
def cas_onaccept(form, onaccept=onaccept):
if onaccept is not DEFAULT:
onaccept(form)
return allow_access(interactivelogin=True)
return self.login(next, onvalidation, cas_onaccept, log)
def cas_validate(self, version=2, proxy=False):
request = current.request
db, table = self.db, self.table_cas()
current.response.headers['Content-Type'] = 'text'
ticket = request.vars.ticket
renew = 'renew' in request.vars
row = table(ticket=ticket)
success = False
if row:
userfield = self.settings.login_userfield or 'username' \
if 'username' in table.fields else 'email'
# If ticket is a service Ticket and RENEW flag respected
if ticket[0:3] == 'ST-' and \
not ((row.renew and renew) ^ renew):
user = self.table_user()(row.user_id)
row.delete_record()
success = True
def build_response(body):
xml_body = to_native(TAG['cas:serviceResponse'](
body, **{'_xmlns:cas': 'http://www.yale.edu/tp/cas'}).xml())
return '<?xml version="1.0" encoding="UTF-8"?>\n' + xml_body
if success:
if version == 1:
message = 'yes\n%s' % user[userfield]
elif version == 3:
username = user.get('username', user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
TAG['cas:attributes'](
*[TAG['cas:' + field.name](user[field.name])
for field in self.table_user()
if field.readable])))
else: # assume version 2
username = user.get('username', user[userfield])
message = build_response(
TAG['cas:authenticationSuccess'](
TAG['cas:user'](username),
*[TAG['cas:' + field.name](user[field.name])
for field in self.table_user()
if field.readable]))
else:
if version == 1:
message = 'no\n'
elif row:
message = build_response(TAG['cas:authenticationFailure']())
else:
message = build_response(
TAG['cas:authenticationFailure'](
'Ticket %s not recognized' % ticket,
_code='INVALID TICKET'))
raise HTTP(200, message)
def _reset_two_factor_auth(self, session):
"""
When two-step authentication is enabled, this function is used to
clear the session after successfully completing second challenge
or when the maximum number of tries allowed has expired.
"""
session.auth_two_factor_user = None
session.auth_two_factor = None
session.auth_two_factor_enabled = False
# Set the number of attempts. It should be more than 1.
session.auth_two_factor_tries_left = self.settings.auth_two_factor_tries_left
def when_is_logged_in_bypass_next_in_url(self, next, session):
"""
This function should be use when someone want to avoid asking for user
credentials when loaded page contains "user/login?_next=NEXT_COMPONENT"
in the URL is refresh but user is already authenticated.
"""
if self.is_logged_in():
if next == session._auth_next:
del session._auth_next
redirect(next, client_side=self.settings.client_side)
def login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a login form
"""
settings = self.settings
request = current.request
response = current.response
session = current.session
# use session for federated login
snext = self.get_vars_next()
if snext:
session._auth_next = snext
elif session._auth_next:
snext = session._auth_next
# pass
if next is DEFAULT:
# important for security
next = settings.login_next
if callable(next):
next = next()
user_next = snext
if user_next:
external = user_next.split('://')
if external[0].lower() in ['http', 'https', 'ftp']:
host_next = user_next.split('//', 1)[-1].split('/')[0]
if host_next in settings.cas_domains:
next = user_next
else:
next = user_next
# Avoid asking unnecessary user credentials when user is logged in
self.when_is_logged_in_bypass_next_in_url(next=next, session=session)
# Moved here to avoid unnecessary execution in case of redirection to next in case of logged in user
table_user = self.table_user()
if 'username' in table_user.fields or \
not settings.login_email_validate:
tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty)
if not settings.username_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
else:
tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email)
if not settings.email_case_sensitive:
tmpvalidator = [IS_LOWER(), tmpvalidator]
passfield = settings.password_field
try:
table_user[passfield].requires[-1].min_length = 0
except:
pass
if onvalidation is DEFAULT:
onvalidation = settings.login_onvalidation
if onaccept is DEFAULT:
onaccept = settings.login_onaccept
if log is DEFAULT:
log = self.messages['login_log']
onfail = settings.login_onfail
user = None # default
# Setup the default field used for the form
multi_login = False
if self.settings.login_userfield:
username = self.settings.login_userfield
else:
if 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
if self.settings.multi_login:
multi_login = True
old_requires = table_user[username].requires
table_user[username].requires = tmpvalidator
# If two-factor authentication is enabled, and the maximum
# number of tries allowed is used up, reset the session to
# pre-login state with two-factor auth
if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1:
# Exceeded maximum allowed tries for this code. Require user to enter
# username and password again.
user = None
accepted_form = False
self._reset_two_factor_auth(session)
# Redirect to the default 'next' page without logging
# in. If that page requires login, user will be redirected
# back to the main login form
redirect(next, client_side=settings.client_side)
# Before showing the default login form, check whether
# we are already on the second step of two-step authentication.
# If we are, then skip this login form and use the form for the
# second challenge instead.
# Note to devs: The code inside the if-block is unchanged from the
# previous version of this file, other than for indentation inside
# to put it inside the if-block
if session.auth_two_factor_user is None:
if settings.remember_me_form:
extra_fields = [
Field('remember_me', 'boolean', default=False,
label=self.messages.label_remember_me)]
else:
extra_fields = []
# do we use our own login form, or from a central source?
if settings.login_form == self:
form = SQLFORM(table_user,
fields=[username, passfield],
hidden=dict(_next=next),
showid=settings.showid,
submit_button=self.messages.login_button,
delete_label=self.messages.delete_label,
formstyle=settings.formstyle,
separator=settings.label_separator,
extra_fields=extra_fields,
)
captcha = settings.login_captcha or \
(settings.login_captcha is not False and settings.captcha)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,
settings.formstyle, 'captcha__row')
accepted_form = False
specific_error = self.messages.invalid_user
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
# check for username in db
entered_username = form.vars[username]
if multi_login and '@' in entered_username:
# if '@' in username check for email, not username
user = table_user(email=entered_username)
else:
user = table_user(**{username: entered_username})
if user:
# user in db, check if registration pending or disabled
specific_error = self.messages.invalid_password
temp_user = user
if (temp_user.registration_key or '').startswith('pending'):
response.flash = self.messages.registration_pending
return form
elif temp_user.registration_key in ('disabled', 'blocked'):
response.flash = self.messages.login_disabled
return form
elif (temp_user.registration_key is not None and temp_user.registration_key.strip()):
response.flash = \
self.messages.registration_verifying
return form
# try alternate logins 1st as these have the
# current version of the password
user = None
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if self not in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
# alternates have failed, maybe because service inaccessible
if settings.login_methods[0] == self:
# try logging in locally using cached credentials
if form.vars.get(passfield, '') == temp_user[passfield]:
# success
user = temp_user
else:
# user not in db
if not settings.alternate_requires_registration:
# we're allowed to auto-register users from external systems
for login_method in settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if self not in settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(
form.vars, settings.update_fields)
break
if not user:
self.log_event(self.messages['login_failed_log'],
request.post_vars)
# invalid login
session.flash = specific_error if self.settings.login_specify_error else self.messages.invalid_login
callback(onfail, None)
redirect(self.url(args=request.args, vars=request.get_vars),client_side=settings.client_side)
else: # use a central authentication server
cas = settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
user = self.get_or_create_user(
table_user._filter_fields(cas_user),
settings.update_fields)
elif hasattr(cas, 'login_form'):
return cas.login_form()
else:
# we need to pass through login again before going on
next = self.url(settings.function, args='login')
redirect(cas.login_url(next),
client_side=settings.client_side)
# Extra login logic for two-factor authentication
#################################################
# If the 'user' variable has a value, this means that the first
# authentication step was successful (i.e. user provided correct
# username and password at the first challenge).
# Check if this user is signed up for two-factor authentication
# If auth.settings.auth_two_factor_enabled it will enable two factor
# for all the app. Another way to anble two factor is that the user
# must be part of a group that is called auth.settings.two_factor_authentication_group
if user and self.settings.auth_two_factor_enabled is True:
session.auth_two_factor_enabled = True
elif user and self.settings.two_factor_authentication_group:
role = self.settings.two_factor_authentication_group
session.auth_two_factor_enabled = self.has_membership(user_id=user.id, role=role)
# challenge
if session.auth_two_factor_enabled:
form = SQLFORM.factory(
Field('authentication_code',
label=self.messages.label_two_factor,
required=True,
comment=self.messages.two_factor_comment),
hidden=dict(_next=next),
formstyle=settings.formstyle,
separator=settings.label_separator
)
# accepted_form is used by some default web2py code later in the
# function that handles running specified functions before redirect
# Set it to False until the challenge form is accepted.
accepted_form = False
# Handle the case when a user has submitted the login/password
# form successfully, and the password has been validated, but
# the two-factor form has not been displayed or validated yet.
if session.auth_two_factor_user is None and user is not None:
session.auth_two_factor_user = user # store the validated user and associate with this session
session.auth_two_factor = random.randint(100000, 999999)
session.auth_two_factor_tries_left = self.settings.auth_two_factor_tries_left
# Set the way we generate the code or we send the code. For example using SMS...
two_factor_methods = self.settings.two_factor_methods
if not two_factor_methods:
# TODO: Add some error checking to handle cases where email cannot be sent
self.settings.mailer.send(
to=user.email,
subject=self.messages.retrieve_two_factor_code_subject,
message=self.messages.retrieve_two_factor_code.format(session.auth_two_factor))
else:
# Check for all method. It is possible to have multiples
for two_factor_method in two_factor_methods:
try:
# By default we use session.auth_two_factor generated before.
session.auth_two_factor = two_factor_method(user, session.auth_two_factor)
except:
pass
else:
break
if form.accepts(request, session if self.csrf_prevention else None,
formname='login', dbio=False,
onvalidation=onvalidation,
hideerror=settings.hideerror):
accepted_form = True
"""
The lists is executed after form validation for each of the corresponding action.
For example, in your model:
In your models copy and paste:
# Before define tables, we add some extra field to auth_user
auth.settings.extra_fields['auth_user'] = [
Field('motp_secret', 'password', length=512, default='', label='MOTP Secret'),
Field('motp_pin', 'string', length=128, default='', label='MOTP PIN')]
OFFSET = 60 # Be sure is the same in your OTP Client
# Set session.auth_two_factor to None. Because the code is generated by external app.
# This will avoid to use the default setting and send a code by email.
def _set_two_factor(user, auth_two_factor):
return None
def verify_otp(user, otp):
import time
from hashlib import md5
epoch_time = int(time.time())
time_start = int(str(epoch_time - OFFSET)[:-1])
time_end = int(str(epoch_time + OFFSET)[:-1])
for t in range(time_start - 1, time_end + 1):
to_hash = str(t) + user.motp_secret + user.motp_pin
hash = md5(to_hash).hexdigest()[:6]
if otp == hash:
return hash
auth.settings.auth_two_factor_enabled = True
auth.messages.two_factor_comment = "Verify your OTP Client for the code."
auth.settings.two_factor_methods = [lambda user,
auth_two_factor: _set_two_factor(user, auth_two_factor)]
auth.settings.two_factor_onvalidation = [lambda user, otp: verify_otp(user, otp)]
"""
if self.settings.two_factor_onvalidation:
for two_factor_onvalidation in self.settings.two_factor_onvalidation:
try:
session.auth_two_factor = \
two_factor_onvalidation(session.auth_two_factor_user, form.vars['authentication_code'])
except:
pass
else:
break
if form.vars['authentication_code'] == str(session.auth_two_factor):
# Handle the case when the two-factor form has been successfully validated
# and the user was previously stored (the current user should be None because
# in this case, the previous username/password login form should not be displayed.
# This will allow the code after the 2-factor authentication block to proceed as
# normal.
if user is None or user == session.auth_two_factor_user:
user = session.auth_two_factor_user
# For security, because the username stored in the
# session somehow does not match the just validated
# user. Should not be possible without session stealing
# which is hard with SSL.
elif user != session.auth_two_factor_user:
user = None
# Either way, the user and code associated with this session should
# be removed. This handles cases where the session login may have
# expired but browser window is open, so the old session key and
# session usernamem will still exist
self._reset_two_factor_auth(session)
else:
session.auth_two_factor_tries_left -= 1
# If the number of retries are higher than auth_two_factor_tries_left
# Require user to enter username and password again.
if session.auth_two_factor_enabled and session.auth_two_factor_tries_left < 1:
# Exceeded maximum allowed tries for this code. Require user to enter
# username and password again.
user = None
accepted_form = False
self._reset_two_factor_auth(session)
# Redirect to the default 'next' page without logging
# in. If that page requires login, user will be redirected
# back to the main login form
redirect(next, client_side=settings.client_side)
response.flash = self.messages.invalid_two_factor_code.format(session.auth_two_factor_tries_left)
return form
else:
return form
# End login logic for two-factor authentication
# process authenticated users
if user:
user = Row(table_user._filter_fields(user, id=True))
# process authenticated users
# user wants to be logged in for longer
self.login_user(user)
session.auth.expiration = \
request.post_vars.remember_me and \
settings.long_expiration or \
settings.expiration
session.auth.remember_me = 'remember_me' in request.post_vars
self.log_event(log, user)
session.flash = self.messages.logged_in
# how to continue
if settings.login_form == self:
if accepted_form:
callback(onaccept, form)
if next == session._auth_next:
session._auth_next = None
next = replace_id(next, form)
redirect(next, client_side=settings.client_side)
table_user[username].requires = old_requires
return form
elif user:
callback(onaccept, None)
if next == session._auth_next:
del session._auth_next
redirect(next, client_side=settings.client_side)
def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT):
"""
Logouts and redirects to login
"""
# Clear out 2-step authentication information if user logs
# out. This information is also cleared on successful login.
self._reset_two_factor_auth(current.session)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.logout_next
if onlogout is DEFAULT:
onlogout = self.settings.logout_onlogout
if onlogout:
onlogout(self.user)
if log is DEFAULT:
log = self.messages['logout_log']
if self.user:
self.log_event(log, self.user)
if self.settings.login_form != self:
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
next = cas.logout_url(next)
current.session.auth = None
self.user = None
if self.settings.renew_session_onlogout:
current.session.renew(clear_session=not self.settings.keep_session_onlogout)
current.session.flash = self.messages.logged_out
if next is not None:
redirect(next)
def logout_bare(self):
self.logout(next=None, onlogout=None, log=None)
def register(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a registration form
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if self.is_logged_in():
redirect(self.settings.logged_url,
client_side=self.settings.client_side)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.register_next
if onvalidation is DEFAULT:
onvalidation = self.settings.register_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.register_onaccept
if log is DEFAULT:
log = self.messages['register_log']
table_user = self.table_user()
if self.settings.login_userfield:
username = self.settings.login_userfield
elif 'username' in table_user.fields:
username = 'username'
else:
username = 'email'
# Ensure the username field is unique.
unique_validator = IS_NOT_IN_DB(self.db, table_user[username])
if not table_user[username].requires:
table_user[username].requires = unique_validator
elif isinstance(table_user[username].requires, (list, tuple)):
if not any([isinstance(validator, IS_NOT_IN_DB) for validator in
table_user[username].requires]):
if isinstance(table_user[username].requires, list):
table_user[username].requires.append(unique_validator)
else:
table_user[username].requires += (unique_validator, )
elif not isinstance(table_user[username].requires, IS_NOT_IN_DB):
table_user[username].requires = [table_user[username].requires,
unique_validator]
passfield = self.settings.password_field
formstyle = self.settings.formstyle
try: # Make sure we have our original minimum length as other auth forms change it
table_user[passfield].requires[-1].min_length = self.settings.password_min_length
except:
pass
if self.settings.register_verify_password:
if self.settings.register_fields is None:
self.settings.register_fields = [f.name for f in table_user if f.writable and not f.compute]
k = self.settings.register_fields.index(passfield)
self.settings.register_fields.insert(k + 1, "password_two")
extra_fields = [
Field("password_two", "password",
requires=IS_EQUAL_TO(request.post_vars.get(passfield, None),
error_message=self.messages.mismatched_password),
label=current.T("Confirm Password"))]
else:
extra_fields = []
form = SQLFORM(table_user,
fields=self.settings.register_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.register_button,
delete_label=self.messages.delete_label,
formstyle=formstyle,
separator=self.settings.label_separator,
extra_fields=extra_fields
)
captcha = self.settings.register_captcha or self.settings.captcha
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
# Add a message if specified
if self.settings.pre_registration_div:
addrow(form, '',
DIV(_id="pre-reg", *self.settings.pre_registration_div),
'', formstyle, '')
key = web2py_uuid()
if self.settings.registration_requires_approval:
key = 'pending-' + key
table_user.registration_key.default = key
if form.accepts(request, session if self.csrf_prevention else None,
formname='register',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
description = self.messages.group_description % form.vars
if self.settings.create_user_groups:
group_id = self.add_group(self.settings.create_user_groups % form.vars, description)
self.add_membership(group_id, form.vars.id)
if self.settings.everybody_group_id:
self.add_membership(self.settings.everybody_group_id, form.vars.id)
if self.settings.registration_requires_verification:
link = self.url(
self.settings.function, args=('verify_email', key), scheme=True)
d = dict(form.vars)
d.update(dict(key=key, link=link, username=form.vars[username],
firstname=form.vars['firstname'],
lastname=form.vars['lastname']))
if not (self.settings.mailer and self.settings.mailer.send(
to=form.vars.email,
subject=self.messages.verify_email_subject,
message=self.messages.verify_email % d)):
self.db.rollback()
response.flash = self.messages.unable_send_email
return form
session.flash = self.messages.email_sent
if self.settings.registration_requires_approval and \
not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='pending')
session.flash = self.messages.registration_pending
elif (not self.settings.registration_requires_verification or self.settings.login_after_registration):
if not self.settings.registration_requires_verification:
table_user[form.vars.id] = dict(registration_key='')
session.flash = self.messages.registration_successful
user = table_user(**{username: form.vars[username]})
self.login_user(user)
session.flash = self.messages.logged_in
self.log_event(log, form.vars)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def verify_email(self,
next=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Action used to verify the registration email
"""
key = getarg(-1)
table_user = self.table_user()
user = table_user(registration_key=key)
if not user:
redirect(self.settings.login_url)
if self.settings.registration_requires_approval:
user.update_record(registration_key='pending')
current.session.flash = self.messages.registration_pending
else:
user.update_record(registration_key='')
current.session.flash = self.messages.email_verified
# make sure session has same user.registrato_key as db record
if current.session.auth and current.session.auth.user:
current.session.auth.user.registration_key = user.registration_key
if log is DEFAULT:
log = self.messages['verify_email_log']
if next is DEFAULT:
next = self.settings.verify_email_next
if onaccept is DEFAULT:
onaccept = self.settings.verify_email_onaccept
self.log_event(log, user)
callback(onaccept, user)
redirect(next)
def retrieve_username(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to retrieve the user username
(only if there is a username field)
"""
table_user = self.table_user()
if 'username' not in table_user.fields:
raise HTTP(404)
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_username_captcha or \
(self.settings.retrieve_username_captcha is not False and self.settings.captcha)
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_username_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_username_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_username_onaccept
if log is DEFAULT:
log = self.messages['retrieve_username_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_username', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
users = table_user._db(table_user.email == form.vars.email).select()
if not users:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
username = ', '.join(u.username for u in users)
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_username_subject,
message=self.messages.retrieve_username % dict(username=username))
session.flash = self.messages.email_sent
for user in users:
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def random_password(self):
import string
import random
password = ''
specials = r'!#$*'
for i in range(0, 3):
password += random.choice(string.ascii_lowercase)
password += random.choice(string.ascii_uppercase)
password += random.choice(string.digits)
password += random.choice(specials)
return ''.join(random.sample(password, len(password)))
def reset_password_deprecated(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password (deprecated)
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if next is DEFAULT:
next = self.get_vars_next() or self.settings.retrieve_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.retrieve_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.retrieve_password_onaccept
if log is DEFAULT:
log = self.messages['retrieve_password_log']
old_requires = table_user.email.requires
table_user.email.requires = [IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
form = SQLFORM(table_user,
fields=['email'],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session if self.csrf_prevention else None,
formname='retrieve_password', dbio=False,
onvalidation=onvalidation, hideerror=self.settings.hideerror):
user = table_user(email=form.vars.email)
if not user:
current.session.flash = \
self.messages.invalid_email
redirect(self.url(args=request.args))
key = user.registration_key
if key in ('pending', 'disabled', 'blocked') or (key or '').startswith('pending'):
current.session.flash = \
self.messages.registration_pending
redirect(self.url(args=request.args))
password = self.random_password()
passfield = self.settings.password_field
d = {
passfield: str(table_user[passfield].validate(password)[0]),
'registration_key': ''
}
user.update_record(**d)
if self.settings.mailer and \
self.settings.mailer.send(to=form.vars.email,
subject=self.messages.retrieve_password_subject,
message=self.messages.retrieve_password % dict(password=password)):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next)
table_user.email.requires = old_requires
return form
def confirm_registration(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to confirm user registration
"""
table_user = self.table_user()
request = current.request
# response = current.response
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.reset_password_next
if self.settings.prevent_password_reset_attacks:
key = request.vars.key
if not key and len(request.args) > 1:
key = request.args[-1]
if key:
session._reset_password_key = key
if next:
redirect_vars = {'_next': next}
else:
redirect_vars = {}
redirect(self.url(args='confirm_registration',
vars=redirect_vars))
else:
key = session._reset_password_key
else:
key = request.vars.key or getarg(-1)
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception as e:
session.flash = self.messages.invalid_reset_password
redirect(next, client_side=self.settings.client_side)
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('first_name',
label='First Name',
required=True),
Field('last_name',
label='Last Name',
required=True),
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.table_user()[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button='Confirm Registration',
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.process().accepted:
user.update_record(
**{passfield: str(form.vars.new_password),
'first_name': str(form.vars.first_name),
'last_name': str(form.vars.last_name),
'registration_key': '',
'reset_password_key': ''})
session.flash = self.messages.password_changed
if self.settings.login_after_password_change:
self.login_user(user)
redirect(next, client_side=self.settings.client_side)
return form
def email_registration(self, subject, body, user):
"""
Sends and email invitation to a user informing they have been registered with the application
"""
reset_password_key = str(int(time.time())) + '-' + web2py_uuid()
link = self.url(self.settings.function,
args=('confirm_registration',), vars={'key': reset_password_key},
scheme=True)
d = dict(user)
d.update(dict(key=reset_password_key, link=link, site=current.request.env.http_host))
if self.settings.mailer and self.settings.mailer.send(
to=user.email,
subject=subject % d,
message=body % d):
user.update_record(reset_password_key=reset_password_key)
return True
return False
def bulk_register(self, max_emails=100):
"""
Creates a form for ther user to send invites to other users to join
"""
if not self.user:
redirect(self.settings.login_url)
if not self.settings.bulk_register_enabled:
return HTTP(404)
form = SQLFORM.factory(
Field('subject', 'string', default=self.messages.bulk_invite_subject, requires=IS_NOT_EMPTY()),
Field('emails', 'text', requires=IS_NOT_EMPTY()),
Field('message', 'text', default=self.messages.bulk_invite_body, requires=IS_NOT_EMPTY()),
formstyle=self.settings.formstyle)
if form.process().accepted:
emails = re.compile('[^\s\'"@<>,;:]+\@[^\s\'"@<>,;:]+').findall(form.vars.emails)
# send the invitations
emails_sent = []
emails_fail = []
emails_exist = []
for email in emails[:max_emails]:
if self.table_user()(email=email):
emails_exist.append(email)
else:
user = self.register_bare(email=email)
if self.email_registration(form.vars.subject, form.vars.message, user):
emails_sent.append(email)
else:
emails_fail.append(email)
emails_fail += emails[max_emails:]
form = DIV(H4('Emails sent'), UL(*[A(x, _href='mailto:' + x) for x in emails_sent]),
H4('Emails failed'), UL(*[A(x, _href='mailto:' + x) for x in emails_fail]),
H4('Emails existing'), UL(*[A(x, _href='mailto:' + x) for x in emails_exist]))
return form
def manage_tokens(self):
if not self.user:
redirect(self.settings.login_url)
table_token = self.table_token()
table_token.user_id.writable = False
table_token.user_id.default = self.user.id
table_token.token.writable = False
if current.request.args(1) == 'new':
table_token.token.readable = False
form = SQLFORM.grid(table_token, args=['manage_tokens'])
return form
def reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
# response = current.response
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.reset_password_next
if self.settings.prevent_password_reset_attacks:
key = request.vars.key
if key:
session._reset_password_key = key
redirect(self.url(args='reset_password'))
else:
key = session._reset_password_key
else:
key = request.vars.key
try:
t0 = int(key.split('-')[0])
if time.time() - t0 > 60 * 60 * 24:
raise Exception
user = table_user(reset_password_key=key)
if not user:
raise Exception
except Exception:
session.flash = self.messages.invalid_reset_password
redirect(next, client_side=self.settings.client_side)
key = user.registration_key
if key in ('pending', 'disabled', 'blocked') or (key or '').startswith('pending'):
session.flash = self.messages.registration_pending
redirect(next, client_side=self.settings.client_side)
if onvalidation is DEFAULT:
onvalidation = self.settings.reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.reset_password_onaccept
passfield = self.settings.password_field
form = SQLFORM.factory(
Field('new_password', 'password',
label=self.messages.new_password,
requires=self.table_user()[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_reset_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session, onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user.update_record(
**{passfield: str(form.vars.new_password),
'registration_key': '',
'reset_password_key': ''})
session.flash = self.messages.password_changed
if self.settings.login_after_password_change:
self.login_user(user)
callback(onaccept, form)
redirect(next, client_side=self.settings.client_side)
return form
def request_reset_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form to reset the user password
"""
table_user = self.table_user()
request = current.request
response = current.response
session = current.session
captcha = self.settings.retrieve_password_captcha or \
(self.settings.retrieve_password_captcha is not False and self.settings.captcha)
if next is DEFAULT:
next = self.get_vars_next() or self.settings.request_reset_password_next
if not self.settings.mailer:
response.flash = self.messages.function_disabled
return ''
if onvalidation is DEFAULT:
onvalidation = self.settings.request_reset_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.request_reset_password_onaccept
if log is DEFAULT:
log = self.messages['reset_password_log']
userfield = self.settings.login_userfield or 'username' \
if self.settings.login_userfield or 'username' \
in table_user.fields else 'email'
if userfield == 'email':
table_user.email.requires = [
IS_EMAIL(error_message=self.messages.invalid_email),
IS_IN_DB(self.db, table_user.email,
error_message=self.messages.invalid_email)]
if not self.settings.email_case_sensitive:
table_user.email.requires.insert(0, IS_LOWER())
elif userfield == 'username':
table_user.username.requires = [
IS_IN_DB(self.db, table_user.username,
error_message=self.messages.invalid_username)]
if not self.settings.username_case_sensitive:
table_user.username.requires.insert(0, IS_LOWER())
form = SQLFORM(table_user,
fields=[userfield],
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.password_reset_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if captcha:
addrow(form, captcha.label, captcha,
captcha.comment, self.settings.formstyle, 'captcha__row')
if form.accepts(request, session if self.csrf_prevention else None,
formname='reset_password', dbio=False,
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
user = table_user(**{userfield: form.vars.get(userfield)})
key = user.registration_key
if not user:
session.flash = self.messages['invalid_%s' % userfield]
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
elif key in ('pending', 'disabled', 'blocked') or (key or '').startswith('pending'):
session.flash = self.messages.registration_pending
redirect(self.url(args=request.args),
client_side=self.settings.client_side)
if self.email_reset_password(user):
session.flash = self.messages.email_sent
else:
session.flash = self.messages.unable_send_email
self.log_event(log, user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
# old_requires = table_user.email.requires
return form
def email_reset_password(self, user):
reset_password_key = str(int(time.time())) + '-' + web2py_uuid()
link = self.url(self.settings.function,
args=('reset_password',), vars={'key': reset_password_key},
scheme=True)
d = dict(user)
d.update(dict(key=reset_password_key, link=link))
if self.settings.mailer and self.settings.mailer.send(
to=user.email,
subject=self.messages.reset_password_subject,
message=self.messages.reset_password % d):
user.update_record(reset_password_key=reset_password_key)
return True
return False
def retrieve_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
if self.settings.reset_password_requires_verification:
return self.request_reset_password(next, onvalidation, onaccept, log)
else:
return self.reset_password_deprecated(next, onvalidation, onaccept, log)
def change_password(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change password
"""
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
# Go to external link to change the password
if self.settings.login_form != self:
cas = self.settings.login_form
# To prevent error if change_password_url function is not defined in alternate login
if hasattr(cas, 'change_password_url'):
next = cas.change_password_url(next)
if next is not None:
redirect(next)
db = self.db
table_user = self.table_user()
s = db(table_user.id == self.user.id)
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.change_password_next
if onvalidation is DEFAULT:
onvalidation = self.settings.change_password_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.change_password_onaccept
if log is DEFAULT:
log = self.messages['change_password_log']
passfield = self.settings.password_field
requires = table_user[passfield].requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = [t for t in requires if isinstance(t, CRYPT)]
if requires:
requires[0] = CRYPT(**requires[0].__dict__) # Copy the existing CRYPT attributes
requires[0].min_length = 0 # But do not enforce minimum length for the old password
form = SQLFORM.factory(
Field('old_password', 'password', requires=requires,
label=self.messages.old_password),
Field('new_password', 'password',
label=self.messages.new_password,
requires=table_user[passfield].requires),
Field('new_password2', 'password',
label=self.messages.verify_password,
requires=[IS_EXPR('value==%s' % repr(request.vars.new_password),
self.messages.mismatched_password)]),
submit_button=self.messages.password_change_button,
hidden=dict(_next=next),
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if form.accepts(request, session,
formname='change_password',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
current_user = s.select(limitby=(0, 1), orderby_on_limitby=False).first()
if not form.vars['old_password'] == current_user[passfield]:
form.errors['old_password'] = self.messages.invalid_password
else:
d = {passfield: str(form.vars.new_password)}
s.update(**d)
session.flash = self.messages.password_changed
self.log_event(log, self.user)
callback(onaccept, form)
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def profile(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
Returns a form that lets the user change his/her profile
"""
table_user = self.table_user()
if not self.is_logged_in():
redirect(self.settings.login_url,
client_side=self.settings.client_side)
passfield = self.settings.password_field
table_user[passfield].writable = False
table_user['email'].writable = False
request = current.request
session = current.session
if next is DEFAULT:
next = self.get_vars_next() or self.settings.profile_next
if onvalidation is DEFAULT:
onvalidation = self.settings.profile_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.profile_onaccept
if log is DEFAULT:
log = self.messages['profile_log']
form = SQLFORM(
table_user,
self.user.id,
fields=self.settings.profile_fields,
hidden=dict(_next=next),
showid=self.settings.showid,
submit_button=self.messages.profile_save_button,
delete_label=self.messages.delete_label,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
deletable=self.settings.allow_delete_accounts,
)
if form.accepts(request, session,
formname='profile',
onvalidation=onvalidation,
hideerror=self.settings.hideerror):
extra_fields = self.settings.extra_fields.get(self.settings.table_user_name, [])
if not form.deleted:
if any(f.compute for f in extra_fields):
user = table_user[self.user.id]
self._update_session_user(user)
self.update_groups()
else:
self.user.update(table_user._filter_fields(form.vars))
session.flash = self.messages.profile_updated
self.log_event(log, self.user)
callback(onaccept, form)
if form.deleted:
return self.logout()
if not next:
next = self.url(args=request.args)
else:
next = replace_id(next, form)
redirect(next, client_side=self.settings.client_side)
return form
def run_login_onaccept(self):
onaccept = self.settings.login_onaccept
if onaccept:
form = Storage(dict(vars=self.user))
if not isinstance(onaccept, (list, tuple)):
onaccept = [onaccept]
for callback in onaccept:
callback(form)
def jwt(self):
"""
To use JWT authentication:
1) instantiate auth with::
auth = Auth(db, jwt = {'secret_key':'secret'})
where 'secret' is your own secret string.
2) Decorate functions that require login but should accept the JWT token credentials::
@auth.allows_jwt()
@auth.requires_login()
def myapi(): return 'hello %s' % auth.user.email
Notice jwt is allowed but not required. if user is logged in, myapi is accessible.
3) Use it!
Now API users can obtain a token with
http://.../app/default/user/jwt?username=...&password=....
(returns json object with a token attribute)
API users can refresh an existing token with
http://.../app/default/user/jwt?token=...
they can authenticate themselves when calling http:/.../myapi by injecting a header
Authorization: Bearer <the jwt token>
Any additional attributes in the jwt argument of Auth() below::
auth = Auth(db, jwt = {...})
are passed to the constructor of class AuthJWT. Look there for documentation.
"""
if not self.jwt_handler:
raise HTTP(401, "Not authorized")
else:
rtn = self.jwt_handler.jwt_token_manager()
raise HTTP(200, rtn, cookies=None, **current.response.headers)
def is_impersonating(self):
return self.is_logged_in() and 'impersonator' in current.session.auth
def impersonate(self, user_id=DEFAULT):
"""
To use this make a POST to
`http://..../impersonate request.post_vars.user_id=<id>`
Set request.post_vars.user_id to 0 to restore original user.
requires impersonator is logged in and::
has_permission('impersonate', 'auth_user', user_id)
"""
request = current.request
session = current.session
auth = session.auth
table_user = self.table_user()
if not self.is_logged_in():
raise HTTP(401, "Not Authorized")
current_id = auth.user.id
requested_id = user_id
user = None
if user_id is DEFAULT:
user_id = current.request.post_vars.user_id
if user_id and user_id != self.user.id and user_id != '0':
if not self.has_permission('impersonate',
self.table_user(),
user_id):
raise HTTP(403, "Forbidden")
user = table_user(user_id)
if not user:
raise HTTP(401, "Not Authorized")
auth.impersonator = pickle.dumps(session, pickle.HIGHEST_PROTOCOL)
auth.user.update(
table_user._filter_fields(user, True))
self.user = auth.user
self.update_groups()
log = self.messages['impersonate_log']
self.log_event(log, dict(id=current_id, other_id=auth.user.id))
self.run_login_onaccept()
elif user_id in (0, '0'):
if self.is_impersonating():
session.clear()
session.update(pickle.loads(auth.impersonator))
self.user = session.auth.user
self.update_groups()
self.run_login_onaccept()
return None
if requested_id is DEFAULT and not request.post_vars:
return SQLFORM.factory(Field('user_id', 'integer'))
elif not user:
return None
else:
return SQLFORM(table_user, user.id, readonly=True)
def groups(self):
"""
Displays the groups and their roles for the logged in user
"""
if not self.is_logged_in():
redirect(self.settings.login_url)
table_membership = self.table_membership()
memberships = self.db(
table_membership.user_id == self.user.id).select()
table = TABLE()
for membership in memberships:
table_group = self.table_group()
groups = self.db(table_group.id == membership.group_id).select()
if groups:
group = groups[0]
table.append(TR(H3(group.role, '(%s)' % group.id)))
table.append(TR(P(group.description)))
if not memberships:
return None
return table
def not_authorized(self):
"""
You can change the view for this page to make it look as you like
"""
if current.request.ajax:
raise HTTP(403, 'ACCESS DENIED')
return self.messages.access_denied
def allows_jwt(self, otherwise=None):
if not self.jwt_handler:
raise HTTP(401, "Not authorized")
else:
return self.jwt_handler.allows_jwt(otherwise=otherwise)
def requires(self, condition, requires_login=True, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
def decorator(action):
def f(*a, **b):
basic_allowed, basic_accepted, user = self.basic()
user = user or self.user
login_required = requires_login
if callable(login_required):
login_required = login_required()
if login_required:
if not user:
if current.request.ajax:
raise HTTP(401, self.messages.ajax_failed_authentication)
elif otherwise is not None:
if callable(otherwise):
return otherwise()
redirect(otherwise)
elif self.settings.allow_basic_login_only or \
basic_accepted or current.request.is_restful:
raise HTTP(403, "Not authorized")
else:
next = self.here()
current.session.flash = current.response.flash
return call_or_redirect(self.settings.on_failed_authentication,
self.settings.login_url + '?_next=' + urllib_quote(next))
if callable(condition):
flag = condition()
else:
flag = condition
if not flag:
current.session.flash = self.messages.access_denied
return call_or_redirect(
self.settings.on_failed_authorization)
return action(*a, **b)
f.__doc__ = action.__doc__
f.__name__ = action.__name__
f.__dict__.update(action.__dict__)
return f
return decorator
def requires_login(self, otherwise=None):
"""
Decorator that prevents access to action if not logged in
"""
return self.requires(True, otherwise=otherwise)
def requires_login_or_token(self, otherwise=None):
if self.settings.enable_tokens is True:
user = None
request = current.request
token = request.env.http_web2py_user_token or request.vars._token
table_token = self.table_token()
table_user = self.table_user()
from gluon.settings import global_settings
if global_settings.web2py_runtime_gae:
row = table_token(token=token)
if row:
user = table_user(row.user_id)
else:
row = self.db(table_token.token == token)(table_user.id == table_token.user_id).select().first()
if row:
user = row[table_user._tablename]
if user:
self.login_user(user)
return self.requires(True, otherwise=otherwise)
def requires_membership(self, role=None, group_id=None, otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def has_membership(self=self, group_id=group_id, role=role):
return self.has_membership(group_id=group_id, role=role)
return self.requires(has_membership, otherwise=otherwise)
def requires_permission(self, name, table_name='', record_id=0,
otherwise=None):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of any group (role) that
has 'name' access to 'table_name', 'record_id'.
"""
def has_permission(self=self, name=name, table_name=table_name, record_id=record_id):
return self.has_permission(name, table_name, record_id)
return self.requires(has_permission, otherwise=otherwise)
def requires_signature(self, otherwise=None, hash_vars=True, hash_extension=True):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id.
If role is provided instead of group_id then the
group_id is calculated.
"""
def verify():
return URL.verify(current.request, user_signature=True, hash_vars=hash_vars, hash_extension=True)
return self.requires(verify, otherwise)
def accessible_query(self, name, table, user_id=None):
"""
Returns a query with all accessible records for user_id or
the current logged in user
this method does not work on GAE because uses JOIN and IN
Example:
Use as::
db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL)
"""
if not user_id:
user_id = self.user_id
db = self.db
if isinstance(table, str) and table in self.db.tables():
table = self.db[table]
elif isinstance(table, (Set, Query)):
# experimental: build a chained query for all tables
if isinstance(table, Set):
cquery = table.query
else:
cquery = table
tablenames = db._adapter.tables(cquery)
for tablename in tablenames:
cquery &= self.accessible_query(name, tablename, user_id=user_id)
return cquery
if not isinstance(table, str) and \
self.has_permission(name, table, 0, user_id):
return table.id > 0
membership = self.table_membership()
permission = self.table_permission()
query = table.id.belongs(
db(membership.user_id == user_id)
(membership.group_id == permission.group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
if self.settings.everybody_group_id:
query |= table.id.belongs(
db(permission.group_id == self.settings.everybody_group_id)
(permission.name == name)
(permission.table_name == table)
._select(permission.record_id))
return query
@staticmethod
def archive(form,
archive_table=None,
current_record='current_record',
archive_current=False,
fields=None):
"""
If you have a table (db.mytable) that needs full revision history you
can just do::
form = crud.update(db.mytable, myrecord, onaccept=auth.archive)
or::
form = SQLFORM(db.mytable, myrecord).process(onaccept=auth.archive)
crud.archive will define a new table "mytable_archive" and store
a copy of the current record (if archive_current=True)
or a copy of the previous record (if archive_current=False)
in the newly created table including a reference
to the current record.
fields allows to specify extra fields that need to be archived.
If you want to access such table you need to define it yourself
in a model::
db.define_table('mytable_archive',
Field('current_record', db.mytable),
db.mytable)
Notice such table includes all fields of db.mytable plus one: current_record.
crud.archive does not timestamp the stored record unless your original table
has a fields like::
db.define_table(...,
Field('saved_on', 'datetime',
default=request.now, update=request.now, writable=False),
Field('saved_by', auth.user,
default=auth.user_id, update=auth.user_id, writable=False),
there is nothing special about these fields since they are filled before
the record is archived.
If you want to change the archive table name and the name of the reference field
you can do, for example::
db.define_table('myhistory',
Field('parent_record', db.mytable), db.mytable)
and use it as::
form = crud.update(db.mytable, myrecord,
onaccept=lambda form:crud.archive(form,
archive_table=db.myhistory,
current_record='parent_record'))
"""
if not archive_current and not form.record:
return None
table = form.table
if not archive_table:
archive_table_name = '%s_archive' % table
if archive_table_name not in table._db:
table._db.define_table(
archive_table_name,
Field(current_record, table),
*[field.clone(unique=False) for field in table])
archive_table = table._db[archive_table_name]
new_record = {current_record: form.vars.id}
for fieldname in archive_table.fields:
if fieldname not in ['id', current_record]:
if archive_current and fieldname in form.vars:
new_record[fieldname] = form.vars[fieldname]
elif form.record and fieldname in form.record:
new_record[fieldname] = form.record[fieldname]
if fields:
new_record.update(fields)
id = archive_table.insert(**new_record)
return id
def wiki(self,
slug=None,
env=None,
render='markmin',
manage_permissions=False,
force_prefix='',
restrict_search=False,
resolve=True,
extra=None,
menu_groups=None,
templates=None,
migrate=True,
controller=None,
function=None,
force_render=False,
groups=None):
if controller and function:
resolve = False
if not hasattr(self, '_wiki'):
self._wiki = Wiki(self, render=render,
manage_permissions=manage_permissions,
force_prefix=force_prefix,
restrict_search=restrict_search,
env=env, extra=extra or {},
menu_groups=menu_groups,
templates=templates,
migrate=migrate,
controller=controller,
function=function,
groups=groups)
else:
self._wiki.settings.extra = extra or {}
self._wiki.env.update(env or {})
# if resolve is set to True, process request as wiki call
# resolve=False allows initial setup without wiki redirection
wiki = None
if resolve:
if slug:
wiki = self._wiki.read(slug, force_render)
if isinstance(wiki, dict) and 'content' in wiki:
# We don't want to return a dict object, just the wiki
wiki = wiki['content']
else:
wiki = self._wiki()
if isinstance(wiki, basestring):
wiki = XML(wiki)
return wiki
def wikimenu(self):
"""To be used in menu.py for app wide wiki menus"""
if (hasattr(self, "_wiki") and
self._wiki.settings.controller and
self._wiki.settings.function):
self._wiki.automenu()
class Crud(object): # pragma: no cover
default_messages = dict(
submit_button='Submit',
delete_label='Check to delete',
record_created='Record Created',
record_updated='Record Updated',
record_deleted='Record Deleted',
update_log='Record %(id)s updated',
create_log='Record %(id)s created',
read_log='Record %(id)s read',
delete_log='Record %(id)s deleted',
)
def url(self, f=None, args=None, vars=None):
"""
This should point to the controller that exposes
download and crud
"""
if args is None:
args = []
if vars is None:
vars = {}
return URL(c=self.settings.controller, f=f, args=args, vars=vars)
def __init__(self, environment, db=None, controller='default'):
self.db = db
if not db and environment and isinstance(environment, DAL):
self.db = environment
elif not db:
raise SyntaxError("must pass db as first or second argument")
self.environment = current
settings = self.settings = Settings()
settings.auth = None
settings.logger = None
settings.create_next = None
settings.update_next = None
settings.controller = controller
settings.delete_next = self.url()
settings.download_url = self.url('download')
settings.create_onvalidation = StorageList()
settings.update_onvalidation = StorageList()
settings.delete_onvalidation = StorageList()
settings.create_onaccept = StorageList()
settings.update_onaccept = StorageList()
settings.update_ondelete = StorageList()
settings.delete_onaccept = StorageList()
settings.update_deletable = True
settings.showid = False
settings.keepvalues = False
settings.create_captcha = None
settings.update_captcha = None
settings.captcha = None
settings.formstyle = 'table3cols'
settings.label_separator = ': '
settings.hideerror = False
settings.detect_record_change = True
settings.hmac_key = None
settings.lock_keys = True
messages = self.messages = Messages(current.T)
messages.update(Crud.default_messages)
messages.lock_keys = True
def __call__(self):
args = current.request.args
if len(args) < 1:
raise HTTP(404)
elif args[0] == 'tables':
return self.tables()
elif len(args) > 1 and not args(1) in self.db.tables:
raise HTTP(404)
table = self.db[args(1)]
if args[0] == 'create':
return self.create(table)
elif args[0] == 'select':
return self.select(table, linkto=self.url(args='read'))
elif args[0] == 'search':
form, rows = self.search(table, linkto=self.url(args='read'))
return DIV(form, SQLTABLE(rows))
elif args[0] == 'read':
return self.read(table, args(2))
elif args[0] == 'update':
return self.update(table, args(2))
elif args[0] == 'delete':
return self.delete(table, args(2))
else:
raise HTTP(404)
def log_event(self, message, vars):
if self.settings.logger:
self.settings.logger.log_event(message, vars, origin='crud')
def has_permission(self, name, table, record=0):
if not self.settings.auth:
return True
try:
record_id = record.id
except:
record_id = record
return self.settings.auth.has_permission(name, str(table), record_id)
def tables(self):
return TABLE(*[TR(A(name,
_href=self.url(args=('select', name))))
for name in self.db.tables])
@staticmethod
def archive(form, archive_table=None, current_record='current_record'):
return Auth.archive(form, archive_table=archive_table,
current_record=current_record)
def update(self,
table,
record,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
ondelete=DEFAULT,
log=DEFAULT,
message=DEFAULT,
deletable=DEFAULT,
formname=DEFAULT,
**attributes
):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
try:
record_id = record.id
except:
record_id = record or 0
if record_id and not self.has_permission('update', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
if not record_id and not self.has_permission('create', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
response = current.response
session = current.session
if request.extension == 'json' and request.vars.json:
request.vars.update(json.loads(request.vars.json))
if next is DEFAULT:
next = prevent_open_redirect(request.get_vars._next) \
or prevent_open_redirect(request.post_vars._next) \
or self.settings.update_next
if onvalidation is DEFAULT:
onvalidation = self.settings.update_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.update_onaccept
if ondelete is DEFAULT:
ondelete = self.settings.update_ondelete
if log is DEFAULT:
log = self.messages['update_log']
if deletable is DEFAULT:
deletable = self.settings.update_deletable
if message is DEFAULT:
message = self.messages.record_updated
if 'hidden' not in attributes:
attributes['hidden'] = {}
attributes['hidden']['_next'] = next
form = SQLFORM(
table,
record,
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
deletable=deletable,
upload=self.settings.download_url,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator,
**attributes # contains hidden
)
self.accepted = False
self.deleted = False
captcha = self.settings.update_captcha or self.settings.captcha
if record and captcha:
addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row')
captcha = self.settings.create_captcha or self.settings.captcha
if not record and captcha:
addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle, 'captcha__row')
if request.extension not in ('html', 'load'):
(_session, _formname) = (None, None)
else:
(_session, _formname) = (
session, '%s/%s' % (table._tablename, form.record_id))
if formname is not DEFAULT:
_formname = formname
keepvalues = self.settings.keepvalues
if request.vars.delete_this_record:
keepvalues = False
if isinstance(onvalidation, StorageList):
onvalidation = onvalidation.get(table._tablename, [])
if form.accepts(request, _session, formname=_formname,
onvalidation=onvalidation, keepvalues=keepvalues,
hideerror=self.settings.hideerror,
detect_record_change=self.settings.detect_record_change):
self.accepted = True
response.flash = message
if log:
self.log_event(log, form.vars)
if request.vars.delete_this_record:
self.deleted = True
message = self.messages.record_deleted
callback(ondelete, form, table._tablename)
response.flash = message
callback(onaccept, form, table._tablename)
if request.extension not in ('html', 'load'):
raise HTTP(200, 'RECORD CREATED/UPDATED')
if isinstance(next, (list, tuple)): # fix issue with 2.6
next = next[0]
if next: # Only redirect when explicit
next = replace_id(next, form)
session.flash = response.flash
redirect(next)
elif request.extension not in ('html', 'load'):
raise HTTP(401, serializers.json(dict(errors=form.errors)))
return form
def create(self,
table,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
message=DEFAULT,
formname=DEFAULT,
**attributes
):
if next is DEFAULT:
next = self.settings.create_next
if onvalidation is DEFAULT:
onvalidation = self.settings.create_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.create_onaccept
if log is DEFAULT:
log = self.messages['create_log']
if message is DEFAULT:
message = self.messages.record_created
return self.update(table,
None,
next=next,
onvalidation=onvalidation,
onaccept=onaccept,
log=log,
message=message,
deletable=False,
formname=formname,
**attributes
)
def read(self, table, record):
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('read', table, record):
redirect(self.settings.auth.settings.on_failed_authorization)
form = SQLFORM(
table,
record,
readonly=True,
comments=False,
upload=self.settings.download_url,
showid=self.settings.showid,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if current.request.extension not in ('html', 'load'):
return table._filter_fields(form.record, id=True)
return form
def delete(self,
table,
record_id,
next=DEFAULT,
message=DEFAULT,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('delete', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
request = current.request
session = current.session
if next is DEFAULT:
next = prevent_open_redirect(request.get_vars._next) \
or prevent_open_redirect(request.post_vars._next) \
or self.settings.delete_next
if message is DEFAULT:
message = self.messages.record_deleted
record = table[record_id]
if record:
callback(self.settings.delete_onvalidation, record)
del table[record_id]
callback(self.settings.delete_onaccept, record, table._tablename)
session.flash = message
redirect(next)
def rows(self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not self.has_permission('select', table):
redirect(self.settings.auth.settings.on_failed_authorization)
# if record_id and not self.has_permission('select', table):
# redirect(self.settings.auth.settings.on_failed_authorization)
if not isinstance(table, Table):
table = self.db[table]
if not query:
query = table.id > 0
if not fields:
fields = [field for field in table if field.readable]
else:
fields = [table[f] if isinstance(f, str) else f for f in fields]
rows = self.db(query).select(*fields, **dict(orderby=orderby,
limitby=limitby))
return rows
def select(self,
table,
query=None,
fields=None,
orderby=None,
limitby=None,
headers=None,
**attr
):
headers = headers or {}
rows = self.rows(table, query, fields, orderby, limitby)
if not rows:
return None # Nicer than an empty table.
if 'upload' not in attr:
attr['upload'] = self.url('download')
if current.request.extension not in ('html', 'load'):
return rows.as_list()
if not headers:
if isinstance(table, str):
table = self.db[table]
headers = dict((str(k), k.label) for k in table)
return SQLTABLE(rows, headers=headers, **attr)
def get_format(self, field):
rtable = field._db[field.type[10:]]
format = rtable.get('_format', None)
if format and isinstance(format, str):
return format[2:-2]
return field.name
def get_query(self, field, op, value, refsearch=False):
try:
if refsearch:
format = self.get_format(field)
if op == 'equals':
if not refsearch:
return field == value
else:
return lambda row: row[field.name][format] == value
elif op == 'not equal':
if not refsearch:
return field != value
else:
return lambda row: row[field.name][format] != value
elif op == 'greater than':
if not refsearch:
return field > value
else:
return lambda row: row[field.name][format] > value
elif op == 'less than':
if not refsearch:
return field < value
else:
return lambda row: row[field.name][format] < value
elif op == 'starts with':
if not refsearch:
return field.like(value + '%')
else:
return lambda row: str(row[field.name][format]).startswith(value)
elif op == 'ends with':
if not refsearch:
return field.like('%' + value)
else:
return lambda row: str(row[field.name][format]).endswith(value)
elif op == 'contains':
if not refsearch:
return field.like('%' + value + '%')
else:
return lambda row: value in row[field.name][format]
except:
return None
def search(self, *tables, **args):
"""
Creates a search form and its results for a table
Examples:
Use as::
form, results = crud.search(db.test,
queries = ['equals', 'not equal', 'contains'],
query_labels={'equals':'Equals',
'not equal':'Not equal'},
fields = ['id','children'],
field_labels = {
'id':'ID','children':'Children'},
zero='Please choose',
query = (db.test.id > 0)&(db.test.id != 3) )
"""
table = tables[0]
fields = args.get('fields', table.fields)
validate = args.get('validate', True)
request = current.request
db = self.db
if not (isinstance(table, Table) or table in db.tables):
raise HTTP(404)
attributes = {}
for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'):
if key in args:
attributes[key] = args[key]
tbl = TABLE()
selected = []
refsearch = []
results = []
showall = args.get('showall', False)
if showall:
selected = fields
chkall = args.get('chkall', False)
if chkall:
for f in fields:
request.vars['chk%s' % f] = 'on'
ops = args.get('queries', [])
zero = args.get('zero', '')
if not ops:
ops = ['equals', 'not equal', 'greater than',
'less than', 'starts with',
'ends with', 'contains']
ops.insert(0, zero)
query_labels = args.get('query_labels', {})
query = args.get('query', table.id > 0)
field_labels = args.get('field_labels', {})
for field in fields:
field = table[field]
if not field.readable:
continue
fieldname = field.name
chkval = request.vars.get('chk' + fieldname, None)
txtval = request.vars.get('txt' + fieldname, None)
opval = request.vars.get('op' + fieldname, None)
row = TR(TD(INPUT(_type="checkbox", _name="chk" + fieldname,
_disabled=(field.type == 'id'),
value=(field.type == 'id' or chkval == 'on'))),
TD(field_labels.get(fieldname, field.label)),
TD(SELECT([OPTION(query_labels.get(op, op),
_value=op) for op in ops],
_name="op" + fieldname,
value=opval)),
TD(INPUT(_type="text", _name="txt" + fieldname,
_value=txtval, _id='txt' + fieldname,
_class=str(field.type))))
tbl.append(row)
if request.post_vars and (chkval or field.type == 'id'):
if txtval and opval != '':
if field.type[0:10] == 'reference ':
refsearch.append(self.get_query(field, opval, txtval, refsearch=True))
elif validate:
value, error = field.validate(txtval)
if not error:
# TODO deal with 'starts with', 'ends with', 'contains' on GAE
query &= self.get_query(field, opval, value)
else:
row[3].append(DIV(error, _class='error'))
else:
query &= self.get_query(field, opval, txtval)
selected.append(field)
form = FORM(tbl, INPUT(_type="submit"))
if selected:
try:
results = db(query).select(*selected, **attributes)
for r in refsearch:
results = results.find(r)
except: # TODO: hmmm, we should do better here
results = None
return form, results
urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor()))
def fetch(url, data=None, headers=None,
cookie=Cookie.SimpleCookie(),
user_agent='Mozilla/5.0'):
headers = headers or {}
if data is not None:
data = urlencode(data)
if user_agent:
headers['User-agent'] = user_agent
headers['Cookie'] = ' '.join(
['%s=%s;' % (c.key, c.value) for c in cookie.values()])
try:
from google.appengine.api import urlfetch
except ImportError:
req = urllib2.Request(url, data, headers)
html = urlopen(req).read()
else:
method = ((data is None) and urlfetch.GET) or urlfetch.POST
while url is not None:
response = urlfetch.fetch(url=url, payload=data,
method=method, headers=headers,
allow_truncated=False, follow_redirects=False,
deadline=10)
# next request will be a get, so no need to send the data again
data = None
method = urlfetch.GET
# load cookies from the response
cookie.load(response.headers.get('set-cookie', ''))
url = response.headers.get('location')
html = response.content
return html
regex_geocode = \
re.compile(r"""<geometry>[\W]*?<location>[\W]*?<lat>(?P<la>[^<]*)</lat>[\W]*?<lng>(?P<lo>[^<]*)</lng>[\W]*?</location>""")
def geocode(address):
try:
a = urllib_quote(address)
txt = fetch('http://maps.googleapis.com/maps/api/geocode/xml?sensor=false&address=%s' % a)
item = regex_geocode.search(txt)
(la, lo) = (float(item.group('la')), float(item.group('lo')))
return (la, lo)
except:
return (0.0, 0.0)
def reverse_geocode(lat, lng, lang=None):
""" Try to get an approximate address for a given latitude, longitude. """
if not lang:
lang = current.T.accepted_language
try:
return json.loads(fetch('http://maps.googleapis.com/maps/api/geocode/json?latlng=%(lat)s,%(lng)s&language=%(lang)s' % locals()))['results'][0]['formatted_address']
except:
return ''
def universal_caller(f, *a, **b):
c = f.__code__.co_argcount
n = f.__code__.co_varnames[:c]
defaults = f.__defaults__ or []
pos_args = n[0:-len(defaults)]
named_args = n[-len(defaults):]
arg_dict = {}
# Fill the arg_dict with name and value for the submitted, positional values
for pos_index, pos_val in enumerate(a[:c]):
arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument
# There might be pos_args left, that are sent as named_values. Gather them as well.
# If a argument already is populated with values we simply replaces them.
for arg_name in pos_args[len(arg_dict):]:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
if len(arg_dict) >= len(pos_args):
# All the positional arguments is found. The function may now be called.
# However, we need to update the arg_dict with the values from the named arguments as well.
for arg_name in named_args:
if arg_name in b:
arg_dict[arg_name] = b[arg_name]
return f(**arg_dict)
# Raise an error, the function cannot be called.
raise HTTP(404, "Object does not exist")
class Service(object):
def __init__(self, environment=None, check_args=False):
self.check_args = check_args
self.run_procedures = {}
self.csv_procedures = {}
self.xml_procedures = {}
self.rss_procedures = {}
self.json_procedures = {}
self.jsonrpc_procedures = {}
self.jsonrpc2_procedures = {}
self.xmlrpc_procedures = {}
self.amfrpc_procedures = {}
self.amfrpc3_procedures = {}
self.soap_procedures = {}
def run(self, f):
"""
Example:
Use as::
service = Service()
@service.run
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/run/myfunction?a=3&b=4
"""
self.run_procedures[f.__name__] = f
return f
def csv(self, f):
"""
Example:
Use as::
service = Service()
@service.csv
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/csv/myfunction?a=3&b=4
"""
self.csv_procedures[f.__name__] = f
return f
def xml(self, f):
"""
Example:
Use as::
service = Service()
@service.xml
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/xml/myfunction?a=3&b=4
"""
self.xml_procedures[f.__name__] = f
return f
def rss(self, f):
"""
Example:
Use as::
service = Service()
@service.rss
def myfunction():
return dict(title=..., link=..., description=...,
created_on=..., entries=[dict(title=..., link=...,
description=..., created_on=...])
def call():
return service()
Then call it with:
wget http://..../app/default/call/rss/myfunction
"""
self.rss_procedures[f.__name__] = f
return f
def json(self, f):
"""
Example:
Use as::
service = Service()
@service.json
def myfunction(a, b):
return [{a: b}]
def call():
return service()
Then call it with:;
wget http://..../app/default/call/json/myfunction?a=hello&b=world
"""
self.json_procedures[f.__name__] = f
return f
def jsonrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world
"""
self.jsonrpc_procedures[f.__name__] = f
return f
def jsonrpc2(self, f):
"""
Example:
Use as::
service = Service()
@service.jsonrpc2
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget --post-data '{"jsonrpc": "2.0",
"id": 1,
"method": "myfunction",
"params": {"a": 1, "b": 2}}' http://..../app/default/call/jsonrpc2
"""
self.jsonrpc2_procedures[f.__name__] = f
return f
def xmlrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.xmlrpc
def myfunction(a, b):
return a + b
def call():
return service()
The call it with:
wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world
"""
self.xmlrpc_procedures[f.__name__] = f
return f
def amfrpc(self, f):
"""
Example:
Use as::
service = Service()
@service.amfrpc
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world
"""
self.amfrpc_procedures[f.__name__] = f
return f
def amfrpc3(self, domain='default'):
"""
Example:
Use as::
service = Service()
@service.amfrpc3('domain')
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with:
wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world
"""
if not isinstance(domain, str):
raise SyntaxError("AMF3 requires a domain for function")
def _amfrpc3(f):
if domain:
self.amfrpc3_procedures[domain + '.' + f.__name__] = f
else:
self.amfrpc3_procedures[f.__name__] = f
return f
return _amfrpc3
def soap(self, name=None, returns=None, args=None, doc=None, response_element_name=None):
"""
Example:
Use as::
service = Service()
@service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,})
def myfunction(a, b):
return a + b
def call():
return service()
Then call it with::
from gluon.contrib.pysimplesoap.client import SoapClient
client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL")
response = client.MyFunction(a=1,b=2)
return response['result']
It also exposes online generated documentation and xml example messages
at `http://..../app/default/call/soap`
"""
def _soap(f):
self.soap_procedures[name or f.__name__] = f, returns, args, doc, response_element_name
return f
return _soap
def serve_run(self, args=None):
request = current.request
if not args:
args = request.args
if args and args[0] in self.run_procedures:
return str(self.call_service_function(self.run_procedures[args[0]],
*args[1:], **dict(request.vars)))
self.error()
def serve_csv(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/x-csv'
if not args:
args = request.args
def none_exception(value):
if isinstance(value, unicodeT):
return value.encode('utf8')
if hasattr(value, 'isoformat'):
return value.isoformat()[:19].replace('T', ' ')
if value is None:
return '<NULL>'
return value
if args and args[0] in self.csv_procedures:
import types
r = self.call_service_function(self.csv_procedures[args[0]],
*args[1:], **dict(request.vars))
s = StringIO()
if hasattr(r, 'export_to_csv_file'):
r.export_to_csv_file(s)
elif r and not isinstance(r, types.GeneratorType) and isinstance(r[0], (dict, Storage)):
import csv
writer = csv.writer(s)
writer.writerow(list(r[0].keys()))
for line in r:
writer.writerow([none_exception(v)
for v in line.values()])
else:
import csv
writer = csv.writer(s)
for line in r:
writer.writerow(line)
return s.getvalue()
self.error()
def serve_xml(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'text/xml'
if not args:
args = request.args
if args and args[0] in self.xml_procedures:
s = self.call_service_function(self.xml_procedures[args[0]],
*args[1:], **dict(request.vars))
if hasattr(s, 'as_list'):
s = s.as_list()
return serializers.xml(s, quote=False)
self.error()
def serve_rss(self, args=None):
request = current.request
response = current.response
if not args:
args = request.args
if args and args[0] in self.rss_procedures:
feed = self.call_service_function(self.rss_procedures[args[0]],
*args[1:], **dict(request.vars))
else:
self.error()
response.headers['Content-Type'] = 'application/rss+xml'
return serializers.rss(feed)
def serve_json(self, args=None):
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
if not args:
args = request.args
d = dict(request.vars)
if args and args[0] in self.json_procedures:
s = self.call_service_function(self.json_procedures[args[0]], *args[1:], **d)
if hasattr(s, 'as_list'):
s = s.as_list()
return response.json(s)
self.error()
class JsonRpcException(Exception):
def __init__(self, code, info):
jrpc_error = Service.jsonrpc_errors.get(code)
if jrpc_error:
self.message, self.description = jrpc_error
self.code, self.info = code, info
# jsonrpc 2.0 error types. records the following structure {code: (message,meaning)}
jsonrpc_errors = {
-32700: ("Parse error. Invalid JSON was received by the server.",
"An error occurred on the server while parsing the JSON text."),
-32600: ("Invalid Request", "The JSON sent is not a valid Request object."),
-32601: ("Method not found", "The method does not exist / is not available."),
-32602: ("Invalid params", "Invalid method parameter(s)."),
-32603: ("Internal error", "Internal JSON-RPC error."),
-32099: ("Server error", "Reserved for implementation-defined server-errors.")}
def serve_jsonrpc(self):
def return_response(id, result):
return serializers.json({'version': '1.1', 'id': id, 'result': result, 'error': None})
def return_error(id, code, message, data=None):
error = {'name': 'JSONRPCError',
'code': code, 'message': message}
if data is not None:
error['data'] = data
return serializers.json({'id': id,
'version': '1.1',
'error': error,
})
request = current.request
response = current.response
response.headers['Content-Type'] = 'application/json; charset=utf-8'
methods = self.jsonrpc_procedures
data = json.loads(request.body.read())
jsonrpc_2 = data.get('jsonrpc')
if jsonrpc_2: # hand over to version 2 of the protocol
return self.serve_jsonrpc2(data)
id, method, params = data.get('id'), data.get('method'), data.get('params', [])
if id is None:
return return_error(0, 100, 'missing id')
if method not in methods:
return return_error(id, 100, 'method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
return return_response(id, s)
except Service.JsonRpcException as e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
message = '%s: %s' % (etype.__name__, eval)
data = request.is_local and traceback.format_tb(etb)
logger.warning('jsonrpc exception %s\n%s' % (message, traceback.format_tb(etb)))
return return_error(id, 100, message, data)
def serve_jsonrpc2(self, data=None, batch_element=False):
def return_response(id, result):
if not must_respond:
return None
return serializers.json({'jsonrpc': '2.0', 'id': id, 'result': result})
def return_error(id, code, message=None, data=None):
error = {'code': code}
if code in Service.jsonrpc_errors:
error['message'] = Service.jsonrpc_errors[code][0]
error['data'] = Service.jsonrpc_errors[code][1]
if message is not None:
error['message'] = message
if data is not None:
error['data'] = data
return serializers.json({'jsonrpc': '2.0', 'id': id, 'error': error})
def validate(data):
"""
Validate request as defined in: http://www.jsonrpc.org/specification#request_object.
Args:
data(str): The json object.
Returns:
- True -- if successful
- False -- if no error should be reported (i.e. data is missing 'id' member)
Raises:
JsonRPCException
"""
iparms = set(data.keys())
mandatory_args = set(['jsonrpc', 'method'])
missing_args = mandatory_args - iparms
if missing_args:
raise Service.JsonRpcException(-32600, 'Missing arguments %s.' % list(missing_args))
if data['jsonrpc'] != '2.0':
raise Service.JsonRpcException(-32603, 'Unsupported jsonrpc version "%s"' % data['jsonrpc'])
if 'id' not in iparms:
return False
return True
request = current.request
response = current.response
if not data:
response.headers['Content-Type'] = 'application/json; charset=utf-8'
try:
data = json.loads(request.body.read())
except ValueError: # decoding error in json lib
return return_error(None, -32700)
# Batch handling
if isinstance(data, list) and not batch_element:
retlist = []
for c in data:
retstr = self.serve_jsonrpc2(c, batch_element=True)
if retstr: # do not add empty responses
retlist.append(retstr)
if len(retlist) == 0: # return nothing
return ''
else:
return "[" + ','.join(retlist) + "]"
methods = self.jsonrpc2_procedures
methods.update(self.jsonrpc_procedures)
try:
must_respond = validate(data)
except Service.JsonRpcException as e:
return return_error(None, e.code, e.info)
id, method, params = data.get('id'), data['method'], data.get('params', '')
if method not in methods:
return return_error(id, -32601, data='Method "%s" does not exist' % method)
try:
if isinstance(params, dict):
s = methods[method](**params)
else:
s = methods[method](*params)
if hasattr(s, 'as_list'):
s = s.as_list()
if must_respond:
return return_response(id, s)
else:
return ''
except HTTP as e:
raise e
except Service.JsonRpcException as e:
return return_error(id, e.code, e.info)
except:
etype, eval, etb = sys.exc_info()
data = '%s: %s\n' % (etype.__name__, eval) + str(request.is_local and traceback.format_tb(etb))
logger.warning('%s: %s\n%s' % (etype.__name__, eval, traceback.format_tb(etb)))
return return_error(id, -32099, data=data)
def serve_xmlrpc(self):
request = current.request
response = current.response
services = list(self.xmlrpc_procedures.values())
return response.xmlrpc(request, services)
def serve_amfrpc(self, version=0):
try:
import pyamf
import pyamf.remoting.gateway
except:
return "pyamf not installed or not in Python sys.path"
request = current.request
response = current.response
if version == 3:
services = self.amfrpc3_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
pyamf_request = pyamf.remoting.decode(request.body)
else:
services = self.amfrpc_procedures
base_gateway = pyamf.remoting.gateway.BaseGateway(services)
context = pyamf.get_context(pyamf.AMF0)
pyamf_request = pyamf.remoting.decode(request.body, context)
pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion)
for name, message in pyamf_request:
pyamf_response[name] = base_gateway.getProcessor(message)(message)
response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE
if version == 3:
return pyamf.remoting.encode(pyamf_response).getvalue()
else:
return pyamf.remoting.encode(pyamf_response, context).getvalue()
def serve_soap(self, version="1.1"):
try:
from gluon.contrib.pysimplesoap.server import SoapDispatcher
except:
return "pysimplesoap not installed in contrib"
request = current.request
response = current.response
procedures = self.soap_procedures
location = "%s://%s%s" % (request.env.wsgi_url_scheme,
request.env.http_host,
URL(r=request, f="call/soap", vars={}))
namespace = 'namespace' in response and response.namespace or location
documentation = response.description or ''
dispatcher = SoapDispatcher(
name=response.title,
location=location,
action=location, # SOAPAction
namespace=namespace,
prefix='pys',
documentation=documentation,
ns=True)
for method, (function, returns, args, doc, resp_elem_name) in iteritems(procedures):
dispatcher.register_function(method, function, returns, args, doc, resp_elem_name)
if request.env.request_method == 'POST':
fault = {}
# Process normal Soap Operation
response.headers['Content-Type'] = 'text/xml'
xml = dispatcher.dispatch(request.body.read(), fault=fault)
if fault:
# May want to consider populating a ticket here...
response.status = 500
# return the soap response
return xml
elif 'WSDL' in request.vars:
# Return Web Service Description
response.headers['Content-Type'] = 'text/xml'
return dispatcher.wsdl()
elif 'op' in request.vars:
# Return method help webpage
response.headers['Content-Type'] = 'text/html'
method = request.vars['op']
sample_req_xml, sample_res_xml, doc = dispatcher.help(method)
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
A("See all webservice operations",
_href=URL(r=request, f="call/soap", vars={})),
H2(method),
P(doc),
UL(LI("Location: %s" % dispatcher.location),
LI("Namespace: %s" % dispatcher.namespace),
LI("SoapAction: %s" % dispatcher.action),
),
H3("Sample SOAP XML Request Message:"),
CODE(sample_req_xml, language="xml"),
H3("Sample SOAP XML Response Message:"),
CODE(sample_res_xml, language="xml"),
]
return {'body': body}
else:
# Return general help and method list webpage
response.headers['Content-Type'] = 'text/html'
body = [H1("Welcome to Web2Py SOAP webservice gateway"),
P(response.description),
P("The following operations are available"),
A("See WSDL for webservice description",
_href=URL(r=request, f="call/soap", vars={"WSDL": None})),
UL([LI(A("%s: %s" % (method, doc or ''),
_href=URL(r=request, f="call/soap", vars={'op': method})))
for method, doc in dispatcher.list_methods()]),
]
return {'body': body}
def __call__(self):
"""
Registers services with::
service = Service()
@service.run
@service.rss
@service.json
@service.jsonrpc
@service.xmlrpc
@service.amfrpc
@service.amfrpc3('domain')
@service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,})
Exposes services with::
def call():
return service()
You can call services with::
http://..../app/default/call/run?[parameters]
http://..../app/default/call/rss?[parameters]
http://..../app/default/call/json?[parameters]
http://..../app/default/call/jsonrpc
http://..../app/default/call/xmlrpc
http://..../app/default/call/amfrpc
http://..../app/default/call/amfrpc3
http://..../app/default/call/soap
"""
request = current.request
if len(request.args) < 1:
raise HTTP(404, "Not Found")
arg0 = request.args(0)
if arg0 == 'run':
return self.serve_run(request.args[1:])
elif arg0 == 'rss':
return self.serve_rss(request.args[1:])
elif arg0 == 'csv':
return self.serve_csv(request.args[1:])
elif arg0 == 'xml':
return self.serve_xml(request.args[1:])
elif arg0 == 'json':
return self.serve_json(request.args[1:])
elif arg0 == 'jsonrpc':
return self.serve_jsonrpc()
elif arg0 == 'jsonrpc2':
return self.serve_jsonrpc2()
elif arg0 == 'xmlrpc':
return self.serve_xmlrpc()
elif arg0 == 'amfrpc':
return self.serve_amfrpc()
elif arg0 == 'amfrpc3':
return self.serve_amfrpc(3)
elif arg0 == 'soap':
return self.serve_soap()
else:
self.error()
def error(self):
raise HTTP(404, "Object does not exist")
# we make this a method so that subclasses can override it if they want to do more specific argument-checking
# but the default implmentation is the simplest: just pass the arguments we got, with no checking
def call_service_function(self, f, *a, **b):
if self.check_args:
return universal_caller(f, *a, **b)
else:
return f(*a, **b)
def completion(callback):
"""
Executes a task on completion of the called action.
Example:
Use as::
from gluon.tools import completion
@completion(lambda d: logging.info(repr(d)))
def index():
return dict(message='hello')
It logs the output of the function every time input is called.
The argument of completion is executed in a new thread.
"""
def _completion(f):
def __completion(*a, **b):
d = None
try:
d = f(*a, **b)
return d
finally:
thread.start_new_thread(callback, (d,))
return __completion
return _completion
def prettydate(d, T=lambda x: x, utc=False):
now = datetime.datetime.utcnow() if utc else datetime.datetime.now()
if isinstance(d, datetime.datetime):
dt = now - d
elif isinstance(d, datetime.date):
dt = now.date() - d
elif not d:
return ''
else:
return '[invalid date]'
if dt.days < 0:
suffix = ' from now'
dt = -dt
else:
suffix = ' ago'
if dt.days >= 2 * 365:
return T('%d years' + suffix) % int(dt.days // 365)
elif dt.days >= 365:
return T('1 year' + suffix)
elif dt.days >= 60:
return T('%d months' + suffix) % int(dt.days // 30)
elif dt.days >= 27: # 4 weeks ugly
return T('1 month' + suffix)
elif dt.days >= 14:
return T('%d weeks' + suffix) % int(dt.days // 7)
elif dt.days >= 7:
return T('1 week' + suffix)
elif dt.days > 1:
return T('%d days' + suffix) % dt.days
elif dt.days == 1:
return T('1 day' + suffix)
elif dt.seconds >= 2 * 60 * 60:
return T('%d hours' + suffix) % int(dt.seconds // 3600)
elif dt.seconds >= 60 * 60:
return T('1 hour' + suffix)
elif dt.seconds >= 2 * 60:
return T('%d minutes' + suffix) % int(dt.seconds // 60)
elif dt.seconds >= 60:
return T('1 minute' + suffix)
elif dt.seconds > 1:
return T('%d seconds' + suffix) % dt.seconds
elif dt.seconds == 1:
return T('1 second' + suffix)
else:
return T('now')
def test_thread_separation():
def f():
c = PluginManager()
lock1.acquire()
lock2.acquire()
c.x = 7
lock1.release()
lock2.release()
lock1 = thread.allocate_lock()
lock2 = thread.allocate_lock()
lock1.acquire()
thread.start_new_thread(f, ())
a = PluginManager()
a.x = 5
lock1.release()
lock2.acquire()
return a.x
class PluginManager(object):
"""
Plugin Manager is similar to a storage object but it is a single level
singleton. This means that multiple instances within the same thread share
the same attributes.
Its constructor is also special. The first argument is the name of the
plugin you are defining.
The named arguments are parameters needed by the plugin with default values.
If the parameters were previous defined, the old values are used.
Example:
in some general configuration file::
plugins = PluginManager()
plugins.me.param1=3
within the plugin model::
_ = PluginManager('me',param1=5,param2=6,param3=7)
where the plugin is used::
>>> print(plugins.me.param1)
3
>>> print(plugins.me.param2)
6
>>> plugins.me.param3 = 8
>>> print(plugins.me.param3)
8
Here are some tests::
>>> a=PluginManager()
>>> a.x=6
>>> b=PluginManager('check')
>>> print(b.x)
6
>>> b=PluginManager() # reset settings
>>> print(b.x)
<Storage {}>
>>> b.x=7
>>> print(a.x)
7
>>> a.y.z=8
>>> print(b.y.z)
8
>>> test_thread_separation()
5
>>> plugins=PluginManager('me',db='mydb')
>>> print(plugins.me.db)
mydb
>>> print('me' in plugins)
True
>>> print(plugins.me.installed)
True
"""
instances = {}
def __new__(cls, *a, **b):
id = thread.get_ident()
lock = thread.allocate_lock()
try:
lock.acquire()
try:
return cls.instances[id]
except KeyError:
instance = object.__new__(cls, *a, **b)
cls.instances[id] = instance
return instance
finally:
lock.release()
def __init__(self, plugin=None, **defaults):
if not plugin:
self.__dict__.clear()
settings = self.__getattr__(plugin)
settings.installed = True
settings.update(
(k, v) for k, v in defaults.items() if k not in settings)
def __getattr__(self, key):
if key not in self.__dict__:
self.__dict__[key] = Storage()
return self.__dict__[key]
def keys(self):
return list(self.__dict__.keys())
def __contains__(self, key):
return key in self.__dict__
class Expose(object):
def __init__(self, base=None, basename=None, extensions=None,
allow_download=True, follow_symlink_out=False):
"""
Examples:
Use as::
def static():
return dict(files=Expose())
or::
def static():
path = os.path.join(request.folder,'static','public')
return dict(files=Expose(path,basename='public'))
Args:
extensions: an optional list of file extensions for filtering
displayed files: e.g. `['.py', '.jpg']`
allow_download: whether to allow downloading selected files
follow_symlink_out: whether to follow symbolic links that points
points outside of `base`.
Warning: setting this to `True` might pose a security risk
if you don't also have complete control over writing
and file creation under `base`.
"""
self.follow_symlink_out = follow_symlink_out
self.base = self.normalize_path(
base or os.path.join(current.request.folder, 'static'))
self.basename = basename or current.request.function
self.base = base = os.path.realpath(base or os.path.join(current.request.folder, 'static'))
basename = basename or current.request.function
self.basename = basename
if current.request.raw_args:
self.args = [arg for arg in current.request.raw_args.split('/') if arg]
else:
self.args = [arg for arg in current.request.args if arg]
filename = os.path.join(self.base, *self.args)
if not os.path.exists(filename):
raise HTTP(404, "FILE NOT FOUND")
if not self.in_base(filename):
raise HTTP(401, "NOT AUTHORIZED")
if allow_download and not os.path.isdir(filename):
current.response.headers['Content-Type'] = contenttype(filename)
raise HTTP(200, open(filename, 'rb'), **current.response.headers)
self.path = path = os.path.join(filename, '*')
dirname_len = len(path) - 1
allowed = [f for f in sorted(glob.glob(path))
if not any([self.isprivate(f), self.issymlink_out(f)])]
self.folders = [f[dirname_len:]
for f in allowed if os.path.isdir(f)]
self.filenames = [f[dirname_len:]
for f in allowed if not os.path.isdir(f)]
if 'README' in self.filenames:
with open(os.path.join(filename, 'README')) as f:
readme = f.read()
self.paragraph = MARKMIN(readme)
else:
self.paragraph = None
if extensions:
self.filenames = [f for f in self.filenames
if os.path.splitext(f)[-1] in extensions]
def breadcrumbs(self, basename):
path = []
span = SPAN()
span.append(A(basename, _href=URL()))
for arg in self.args:
span.append('/')
path.append(arg)
span.append(A(arg, _href=URL(args='/'.join(path))))
return span
def table_folders(self):
if self.folders:
return SPAN(H3('Folders'),
TABLE(*[TR(TD(A(folder, _href=URL(args=self.args + [folder]))))
for folder in self.folders], **dict(_class="table")))
return ''
@staticmethod
def __in_base(subdir, basedir, sep=os.path.sep):
"""True if subdir/ is under basedir/"""
s = lambda f: '%s%s' % (f.rstrip(sep), sep) # f -> f/
# The trailing '/' is for the case of '/foobar' in_base of '/foo':
# - becase '/foobar' starts with '/foo'
# - but '/foobar/' doesn't start with '/foo/'
return s(subdir).startswith(s(basedir))
def in_base(self, f):
"""True if f/ is under self.base/
Where f ans slef.base are normalized paths
"""
return self.__in_base(self.normalize_path(f), self.base)
def normalize_path(self, f):
if self.follow_symlink_out:
return os.path.normpath(f)
else:
return os.path.realpath(f)
def issymlink_out(self, f):
"""True if f is a symlink and is pointing outside of self.base"""
return os.path.islink(f) and not self.in_base(f)
@staticmethod
def isprivate(f):
# remove '/private' prefix to deal with symbolic links on OSX
if f.startswith('/private/'):
f = f[8:]
return 'private' in f or f.startswith('.') or f.endswith('~')
@staticmethod
def isimage(f):
return os.path.splitext(f)[-1].lower() in (
'.png', '.jpg', '.jpeg', '.gif', '.tiff')
def table_files(self, width=160):
if self.filenames:
return SPAN(H3('Files'),
TABLE(*[TR(TD(A(f, _href=URL(args=self.args + [f]))),
TD(IMG(_src=URL(args=self.args + [f]),
_style='max-width:%spx' % width)
if width and self.isimage(f) else ''))
for f in self.filenames], **dict(_class="table")))
return ''
def xml(self):
return DIV(
H2(self.breadcrumbs(self.basename)),
self.paragraph or '',
self.table_folders(),
self.table_files()).xml()
class Wiki(object):
everybody = 'everybody'
rows_page = 25
def markmin_base(self, body):
return MARKMIN(body, extra=self.settings.extra,
url=True, environment=self.env,
autolinks=lambda link: expand_one(link, {})).xml()
def render_tags(self, tags):
return DIV(
_class='w2p_wiki_tags',
*[A(t.strip(), _href=URL(args='_search', vars=dict(q=t)))
for t in tags or [] if t.strip()])
def markmin_render(self, page):
return self.markmin_base(page.body) + self.render_tags(page.tags).xml()
def html_render(self, page):
html = page.body
# @///function -> http://..../function
html = replace_at_urls(html, URL)
# http://...jpg -> <img src="http://...jpg/> or embed
html = replace_autolinks(html, lambda link: expand_one(link, {}))
# @{component:name} -> <script>embed component name</script>
html = replace_components(html, self.env)
html = html + self.render_tags(page.tags).xml()
return html
@staticmethod
def component(text):
"""
In wiki docs allows `@{component:controller/function/args}`
which renders as a `LOAD(..., ajax=True)`
"""
items = text.split('/')
controller, function, args = items[0], items[1], items[2:]
return LOAD(controller, function, args=args, ajax=True).xml()
def get_renderer(self):
if isinstance(self.settings.render, basestring):
r = getattr(self, "%s_render" % self.settings.render)
elif callable(self.settings.render):
r = self.settings.render
elif isinstance(self.settings.render, dict):
def custom_render(page):
if page.render:
if page.render in self.settings.render.keys():
my_render = self.settings.render[page.render]
else:
my_render = getattr(self, "%s_render" % page.render)
else:
my_render = self.markmin_render
return my_render(page)
r = custom_render
else:
raise ValueError(
"Invalid render type %s" % type(self.settings.render))
return r
def __init__(self, auth, env=None, render='markmin',
manage_permissions=False, force_prefix='',
restrict_search=False, extra=None,
menu_groups=None, templates=None, migrate=True,
controller=None, function=None, groups=None):
settings = self.settings = auth.settings.wiki
"""
Args:
render:
- "markmin"
- "html"
- `<function>` : Sets a custom render function
- `dict(html=<function>, markmin=...)`: dict(...) allows
multiple custom render functions
- "multiple" : Is the same as `{}`. It enables per-record
formats using builtins
"""
engines = set(['markmin', 'html'])
show_engine = False
if render == "multiple":
render = {}
if isinstance(render, dict):
[engines.add(key) for key in render]
show_engine = True
settings.render = render
perms = settings.manage_permissions = manage_permissions
settings.force_prefix = force_prefix
settings.restrict_search = restrict_search
settings.extra = extra or {}
settings.menu_groups = menu_groups
settings.templates = templates
settings.controller = controller
settings.function = function
settings.groups = list(auth.user_groups.values()) \
if groups is None else groups
db = auth.db
self.env = env or {}
self.env['component'] = Wiki.component
self.auth = auth
self.wiki_menu_items = None
if self.auth.user:
self.settings.force_prefix = force_prefix % self.auth.user
else:
self.settings.force_prefix = force_prefix
self.host = current.request.env.http_host
table_definitions = [
('wiki_page', {
'args': [
Field('slug',
requires=[IS_SLUG(),
IS_NOT_IN_DB(db, 'wiki_page.slug')],
writable=False),
Field('title', length=255, unique=True),
Field('body', 'text', notnull=True),
Field('tags', 'list:string'),
Field('can_read', 'list:string',
writable=perms,
readable=perms,
default=[Wiki.everybody]),
Field('can_edit', 'list:string',
writable=perms, readable=perms,
default=[Wiki.everybody]),
Field('changelog'),
Field('html', 'text',
compute=self.get_renderer(),
readable=False, writable=False),
Field('render', default="markmin",
readable=show_engine,
writable=show_engine,
requires=IS_EMPTY_OR(
IS_IN_SET(engines))),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
('wiki_tag', {
'args': [
Field('name'),
Field('wiki_page', 'reference wiki_page'),
auth.signature],
'vars':{'format': '%(title)s', 'migrate': migrate}}),
('wiki_media', {
'args': [
Field('wiki_page', 'reference wiki_page'),
Field('title', required=True),
Field('filename', 'upload', required=True),
auth.signature],
'vars': {'format': '%(title)s', 'migrate': migrate}}),
]
# define only non-existent tables
for key, value in table_definitions:
args = []
if key not in db.tables():
# look for wiki_ extra fields in auth.settings
extra_fields = auth.settings.extra_fields
if extra_fields:
if key in extra_fields:
if extra_fields[key]:
for field in extra_fields[key]:
args.append(field)
args += value['args']
db.define_table(key, *args, **value['vars'])
if self.settings.templates is None and not self.settings.manage_permissions:
self.settings.templates = \
db.wiki_page.tags.contains('template') & db.wiki_page.can_read.contains('everybody')
def update_tags_insert(page, id, db=db):
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=id)
def update_tags_update(dbset, page, db=db):
page = dbset.select(limitby=(0, 1)).first()
db(db.wiki_tag.wiki_page == page.id).delete()
for tag in page.tags or []:
tag = tag.strip().lower()
if tag:
db.wiki_tag.insert(name=tag, wiki_page=page.id)
db.wiki_page._after_insert.append(update_tags_insert)
db.wiki_page._after_update.append(update_tags_update)
if (auth.user and
check_credentials(current.request, gae_login=False) and
'wiki_editor' not in auth.user_groups.values() and
self.settings.groups == list(auth.user_groups.values())):
group = db.auth_group(role='wiki_editor')
gid = group.id if group else db.auth_group.insert(
role='wiki_editor')
auth.add_membership(gid)
settings.lock_keys = True
# WIKI ACCESS POLICY
def not_authorized(self, page=None):
raise HTTP(401)
def can_read(self, page):
if 'everybody' in page.can_read or not self.settings.manage_permissions:
return True
elif self.auth.user:
groups = self.settings.groups
if ('wiki_editor' in groups or
set(groups).intersection(set(page.can_read + page.can_edit)) or
page.created_by == self.auth.user.id):
return True
return False
def can_edit(self, page=None):
if not self.auth.user:
redirect(self.auth.settings.login_url)
groups = self.settings.groups
return ('wiki_editor' in groups or
(page is None and 'wiki_author' in groups) or
page is not None and (set(groups).intersection(set(page.can_edit)) or
page.created_by == self.auth.user.id))
def can_manage(self):
if not self.auth.user:
return False
groups = self.settings.groups
return 'wiki_editor' in groups
def can_search(self):
return True
def can_see_menu(self):
if self.auth.user:
if self.settings.menu_groups is None:
return True
else:
groups = self.settings.groups
if any(t in self.settings.menu_groups for t in groups):
return True
return False
# END POLICY
def automenu(self):
"""adds the menu if not present"""
if (not self.wiki_menu_items and self.settings.controller and self.settings.function):
self.wiki_menu_items = self.menu(self.settings.controller,
self.settings.function)
current.response.menu += self.wiki_menu_items
def __call__(self):
request = current.request
settings = self.settings
settings.controller = settings.controller or request.controller
settings.function = settings.function or request.function
self.automenu()
zero = request.args(0) or 'index'
if zero and zero.isdigit():
return self.media(int(zero))
elif not zero or not zero.startswith('_'):
return self.read(zero)
elif zero == '_edit':
return self.edit(request.args(1) or 'index', request.args(2) or 0)
elif zero == '_editmedia':
return self.editmedia(request.args(1) or 'index')
elif zero == '_create':
return self.create()
elif zero == '_pages':
return self.pages()
elif zero == '_search':
return self.search()
elif zero == '_recent':
ipage = int(request.vars.page or 0)
query = self.auth.db.wiki_page.created_by == request.args(
1, cast=int)
return self.search(query=query,
orderby=~self.auth.db.wiki_page.created_on,
limitby=(ipage * self.rows_page,
(ipage + 1) * self.rows_page),
)
elif zero == '_cloud':
return self.cloud()
elif zero == '_preview':
return self.preview(self.get_renderer())
def first_paragraph(self, page):
if not self.can_read(page):
mm = (page.body or '').replace('\r', '')
ps = [p for p in mm.split('\n\n') if not p.startswith('#') and p.strip()]
if ps:
return ps[0]
return ''
def fix_hostname(self, body):
return (body or '').replace('://HOSTNAME', '://%s' % self.host)
def read(self, slug, force_render=False):
if slug in '_cloud':
return self.cloud()
elif slug in '_search':
return self.search()
page = self.auth.db.wiki_page(slug=slug)
if page and (not self.can_read(page)):
return self.not_authorized(page)
if current.request.extension == 'html':
if not page:
url = URL(args=('_create', slug))
return dict(content=A('Create page "%s"' % slug, _href=url, _class="btn"))
else:
html = page.html if not force_render else self.get_renderer()(page)
content = XML(self.fix_hostname(html))
return dict(title=page.title,
slug=page.slug,
page=page,
content=content,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
elif current.request.extension == 'load':
return self.fix_hostname(page.html) if page else ''
else:
if not page:
raise HTTP(404)
else:
return dict(title=page.title,
slug=page.slug,
page=page,
content=page.body,
tags=page.tags,
created_on=page.created_on,
modified_on=page.modified_on)
def edit(self, slug, from_template=0):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not self.can_edit(page):
return self.not_authorized(page)
title_guess = ' '.join(c.capitalize() for c in slug.split('-'))
if not page:
if not (self.can_manage() or
slug.startswith(self.settings.force_prefix)):
current.session.flash = 'slug must have "%s" prefix' \
% self.settings.force_prefix
redirect(URL(args=('_create')))
db.wiki_page.can_read.default = [Wiki.everybody]
db.wiki_page.can_edit.default = [auth.user_group_role()]
db.wiki_page.title.default = title_guess
db.wiki_page.slug.default = slug
if slug == 'wiki-menu':
db.wiki_page.body.default = \
'- Menu Item > @////index\n- - Submenu > http://web2py.com'
else:
db.wiki_page.body.default = db(db.wiki_page.id == from_template).select(db.wiki_page.body)[0].body \
if int(from_template) > 0 else '## %s\n\npage content' % title_guess
vars = current.request.post_vars
if vars.body:
vars.body = vars.body.replace('://%s' % self.host, '://HOSTNAME')
form = SQLFORM(db.wiki_page, page, deletable=True,
formstyle='table2cols', showid=False).process()
if form.deleted:
current.session.flash = 'page deleted'
redirect(URL())
elif form.accepted:
current.session.flash = 'page created'
redirect(URL(args=slug))
script = """
jQuery(function() {
if (!jQuery('#wiki_page_body').length) return;
var pagecontent = jQuery('#wiki_page_body');
pagecontent.css('font-family',
'Monaco,Menlo,Consolas,"Courier New",monospace');
var prevbutton = jQuery('<button class="btn nopreview">Preview</button>');
var preview = jQuery('<div id="preview"></div>').hide();
var previewmedia = jQuery('<div id="previewmedia"></div>');
var form = pagecontent.closest('form');
preview.insertBefore(form);
prevbutton.insertBefore(form);
if(%(link_media)s) {
var mediabutton = jQuery('<button class="btn nopreview">Media</button>');
mediabutton.insertBefore(form);
previewmedia.insertBefore(form);
mediabutton.click(function() {
if (mediabutton.hasClass('nopreview')) {
web2py_component('%(urlmedia)s', 'previewmedia');
} else {
previewmedia.empty();
}
mediabutton.toggleClass('nopreview');
});
}
prevbutton.click(function(e) {
e.preventDefault();
if (prevbutton.hasClass('nopreview')) {
prevbutton.addClass('preview').removeClass(
'nopreview').html('Edit Source');
try{var wiki_render = jQuery('#wiki_page_render').val()}
catch(e){var wiki_render = null;}
web2py_ajax_page('post', \
'%(url)s', {body: jQuery('#wiki_page_body').val(), \
render: wiki_render}, 'preview');
form.fadeOut('fast', function() {preview.fadeIn()});
} else {
prevbutton.addClass(
'nopreview').removeClass('preview').html('Preview');
preview.fadeOut('fast', function() {form.fadeIn()});
}
})
})
""" % dict(url=URL(args=('_preview', slug)), link_media=('true' if page else 'false'),
urlmedia=URL(extension='load',
args=('_editmedia', slug),
vars=dict(embedded=1)))
return dict(content=TAG[''](form, SCRIPT(script)))
def editmedia(self, slug):
auth = self.auth
db = auth.db
page = db.wiki_page(slug=slug)
if not (page and self.can_edit(page)):
return self.not_authorized(page)
self.auth.db.wiki_media.id.represent = lambda id, row: \
id if not row.filename else \
SPAN('@////%i/%s.%s' % (id, IS_SLUG.urlify(row.title.split('.')[0]), row.filename.split('.')[-1]))
self.auth.db.wiki_media.wiki_page.default = page.id
self.auth.db.wiki_media.wiki_page.writable = False
links = []
csv = True
create = True
if current.request.vars.embedded:
script = "var c = jQuery('#wiki_page_body'); c.val(c.val() + jQuery('%s').text()); return false;"
fragment = self.auth.db.wiki_media.id.represent
csv = False
create = False
links = [lambda row: A('copy into source', _href='#', _onclick=script % (fragment(row.id, row)))]
content = SQLFORM.grid(
self.auth.db.wiki_media.wiki_page == page.id,
orderby=self.auth.db.wiki_media.title,
links=links,
csv=csv,
create=create,
args=['_editmedia', slug],
user_signature=False)
return dict(content=content)
def create(self):
if not self.can_edit():
return self.not_authorized()
db = self.auth.db
slugs = db(db.wiki_page.id > 0).select(db.wiki_page.id, db.wiki_page.slug)
options = [OPTION(row.slug, _value=row.id) for row in slugs]
options.insert(0, OPTION('', _value=''))
fields = [Field("slug", default=current.request.args(1) or
self.settings.force_prefix,
requires=(IS_SLUG(), IS_NOT_IN_DB(db, db.wiki_page.slug))), ]
if self.settings.templates:
fields.append(
Field("from_template", "reference wiki_page",
requires=IS_EMPTY_OR(IS_IN_DB(db(self.settings.templates), db.wiki_page._id, '%(slug)s')),
comment=current.T("Choose Template or empty for new Page")))
form = SQLFORM.factory(*fields, **dict(_class="well"))
form.element("[type=submit]").attributes["_value"] = \
current.T("Create Page from Slug")
if form.process().accepted:
form.vars.from_template = 0 if not form.vars.from_template else form.vars.from_template
redirect(URL(args=('_edit', form.vars.slug, form.vars.from_template or 0))) # added param
return dict(content=form)
def pages(self):
if not self.can_manage():
return self.not_authorized()
self.auth.db.wiki_page.slug.represent = lambda slug, row: SPAN(
'@////%s' % slug)
self.auth.db.wiki_page.title.represent = lambda title, row: \
A(title, _href=URL(args=row.slug))
wiki_table = self.auth.db.wiki_page
content = SQLFORM.grid(
wiki_table,
fields=[wiki_table.slug,
wiki_table.title, wiki_table.tags,
wiki_table.can_read, wiki_table.can_edit],
links=[
lambda row:
A('edit', _href=URL(args=('_edit', row.slug)), _class='btn'),
lambda row:
A('media', _href=URL(args=('_editmedia', row.slug)), _class='btn')],
details=False, editable=False, deletable=False, create=False,
orderby=self.auth.db.wiki_page.title,
args=['_pages'],
user_signature=False)
return dict(content=content)
def media(self, id):
request, response, db = current.request, current.response, self.auth.db
media = db.wiki_media(id)
if media:
if self.settings.manage_permissions:
page = db.wiki_page(media.wiki_page)
if not self.can_read(page):
return self.not_authorized(page)
request.args = [media.filename]
m = response.download(request, db)
current.session.forget() # get rid of the cookie
response.headers['Last-Modified'] = \
request.utcnow.strftime("%a, %d %b %Y %H:%M:%S GMT")
if 'Content-Disposition' in response.headers:
del response.headers['Content-Disposition']
response.headers['Pragma'] = 'cache'
response.headers['Cache-Control'] = 'private'
return m
else:
raise HTTP(404)
def menu(self, controller='default', function='index'):
db = self.auth.db
request = current.request
menu_page = db.wiki_page(slug='wiki-menu')
menu = []
if menu_page:
tree = {'': menu}
regex = re.compile('[\r\n\t]*(?P<base>(\s*\-\s*)+)(?P<title>\w.*?)\s+\>\s+(?P<link>\S+)')
for match in regex.finditer(self.fix_hostname(menu_page.body)):
base = match.group('base').replace(' ', '')
title = match.group('title')
link = match.group('link')
title_page = None
if link.startswith('@'):
items = link[2:].split('/')
if len(items) > 3:
title_page = items[3]
link = URL(a=items[0] or None, c=items[1] or controller,
f=items[2] or function, args=items[3:])
parent = tree.get(base[1:], tree[''])
subtree = []
tree[base] = subtree
parent.append((current.T(title),
request.args(0) == title_page,
link, subtree))
if self.can_see_menu():
submenu = []
menu.append((current.T('[Wiki]'), None, None, submenu))
if URL() == URL(controller, function):
if not str(request.args(0)).startswith('_'):
slug = request.args(0) or 'index'
mode = 1
elif request.args(0) == '_edit':
slug = request.args(1) or 'index'
mode = 2
elif request.args(0) == '_editmedia':
slug = request.args(1) or 'index'
mode = 3
else:
mode = 0
if mode in (2, 3):
submenu.append((current.T('View Page'), None,
URL(controller, function, args=slug)))
if mode in (1, 3):
submenu.append((current.T('Edit Page'), None,
URL(controller, function, args=('_edit', slug))))
if mode in (1, 2):
submenu.append((current.T('Edit Page Media'), None,
URL(controller, function, args=('_editmedia', slug))))
submenu.append((current.T('Create New Page'), None,
URL(controller, function, args=('_create'))))
# Moved next if to inside self.auth.user check
if self.can_manage():
submenu.append((current.T('Manage Pages'), None,
URL(controller, function, args=('_pages'))))
submenu.append((current.T('Edit Menu'), None,
URL(controller, function, args=('_edit', 'wiki-menu'))))
# Also moved inside self.auth.user check
submenu.append((current.T('Search Pages'), None,
URL(controller, function, args=('_search'))))
return menu
def search(self, tags=None, query=None, cloud=True, preview=True,
limitby=(0, 100), orderby=None):
if not self.can_search():
return self.not_authorized()
request = current.request
content = CAT()
if tags is None and query is None:
form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(),
value=request.vars.q),
INPUT(_type="submit", _value=current.T('Search')),
_method='GET')
content.append(DIV(form, _class='w2p_wiki_form'))
if request.vars.q:
tags = [v.strip() for v in request.vars.q.split(',')]
tags = [v.lower() for v in tags if v]
if tags or query is not None:
db = self.auth.db
count = db.wiki_tag.wiki_page.count()
fields = [db.wiki_page.id, db.wiki_page.slug,
db.wiki_page.title, db.wiki_page.tags,
db.wiki_page.can_read, db.wiki_page.can_edit]
if preview:
fields.append(db.wiki_page.body)
if query is None:
query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\
(db.wiki_tag.name.belongs(tags))
query = query | db.wiki_page.title.contains(request.vars.q)
if self.settings.restrict_search and not self.can_manage():
query = query & (db.wiki_page.created_by == self.auth.user_id)
pages = db(query).select(count,
*fields, **dict(orderby=orderby or ~count,
groupby=reduce(lambda a, b: a | b, fields),
distinct=True,
limitby=limitby))
if request.extension in ('html', 'load'):
if not pages:
content.append(DIV(current.T("No results"),
_class='w2p_wiki_form'))
def link(t):
return A(t, _href=URL(args='_search', vars=dict(q=t)))
items = [DIV(H3(A(p.wiki_page.title, _href=URL(
args=p.wiki_page.slug))),
MARKMIN(self.first_paragraph(p.wiki_page))
if preview else '',
DIV(_class='w2p_wiki_tags',
*[link(t.strip()) for t in
p.wiki_page.tags or [] if t.strip()]),
_class='w2p_wiki_search_item')
for p in pages]
content.append(DIV(_class='w2p_wiki_pages', *items))
else:
cloud = False
content = [p.wiki_page.as_dict() for p in pages]
elif cloud:
content.append(self.cloud()['content'])
if request.extension == 'load':
return content
return dict(content=content)
def cloud(self):
db = self.auth.db
count = db.wiki_tag.wiki_page.count(distinct=True)
ids = db(db.wiki_tag).select(
db.wiki_tag.name, count,
distinct=True,
groupby=db.wiki_tag.name,
orderby=~count, limitby=(0, 20))
if ids:
a, b = ids[0](count), ids[-1](count)
def style(c):
STYLE = 'padding:0 0.2em;line-height:%.2fem;font-size:%.2fem'
size = (1.5 * (c - b) / max(a - b, 1) + 1.3)
return STYLE % (1.3, size)
items = []
for item in ids:
items.append(A(item.wiki_tag.name,
_style=style(item(count)),
_href=URL(args='_search',
vars=dict(q=item.wiki_tag.name))))
items.append(' ')
return dict(content=DIV(_class='w2p_cloud', *items))
def preview(self, render):
request = current.request
# FIXME: This is an ugly hack to ensure a default render
# engine if not specified (with multiple render engines)
if 'render' not in request.post_vars:
request.post_vars.render = None
return render(request.post_vars)
class Config(object):
def __init__(
self,
filename,
section,
default_values={}
):
self.config = configparser.ConfigParser(default_values)
self.config.read(filename)
if not self.config.has_section(section):
self.config.add_section(section)
self.section = section
self.filename = filename
def read(self):
if not(isinstance(current.session['settings_%s' % self.section], dict)):
settings = dict(self.config.items(self.section))
else:
settings = current.session['settings_%s' % self.section]
return settings
def save(self, options):
for option, value in options:
self.config.set(self.section, option, value)
try:
self.config.write(open(self.filename, 'w'))
result = True
except:
current.session['settings_%s' % self.section] = dict(self.config.items(self.section))
result = False
return result
if __name__ == '__main__':
import doctest
doctest.testmod()
| 41.665736 | 171 | 0.53599 |
682765589a45c42800bd9b53bb7ec7ff549d1fcd | 107 | py | Python | lang/py/cookbook/v2/source/cb2_17_8_exm_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_17_8_exm_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_17_8_exm_1.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | static PyObject*
empty5(PyObject* self, PyObject* args)
{
return Py_None; /* ***WRONG*** */
}
| 17.833333 | 46 | 0.579439 |
8262bcf4f09ca8a541f73e9573d1ff42211d08ed | 1,131 | py | Python | scripts/vrf_scripts/01_deploy_vrf.py | garyb9/chainlink-mix | c4458c99cd13f1ec6fbed84a05c2a9c683842eb0 | [
"MIT"
] | 2 | 2021-12-06T10:12:46.000Z | 2022-03-23T23:06:46.000Z | scripts/vrf_scripts/01_deploy_vrf.py | garyb9/chainlink-mix | c4458c99cd13f1ec6fbed84a05c2a9c683842eb0 | [
"MIT"
] | 2 | 2022-02-27T22:08:43.000Z | 2022-02-27T22:08:48.000Z | scripts/vrf_scripts/01_deploy_vrf.py | garyb9/chainlink-mix | c4458c99cd13f1ec6fbed84a05c2a9c683842eb0 | [
"MIT"
] | 2 | 2021-11-08T17:54:16.000Z | 2021-12-02T02:28:27.000Z | #!/usr/bin/python3
from brownie import VRFConsumer, VRFCoordinatorMock, LinkToken, config, network
from scripts.helpful_scripts import (
get_account,
get_verify_status,
LOCAL_BLOCKCHAIN_ENVIRONMENTS,
)
from scripts.deploy_mocks import deploy_mocks
def depoly_vrf():
account = get_account()
print(f"On network {network.show_active()}")
keyhash = config["networks"][network.show_active()]["keyhash"]
fee = config["networks"][network.show_active()]["fee"]
account = get_account()
if network.show_active() in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
if len(VRFCoordinatorMock) <= 0:
deploy_mocks()
vrf_coordinator = VRFCoordinatorMock[-1].address
link_token = LinkToken[-1].address
else:
vrf_coordinator = config["networks"][network.show_active()]["vrf_coordinator"]
link_token = config["networks"][network.show_active()]["link_token"]
return VRFConsumer.deploy(
keyhash,
vrf_coordinator,
link_token,
fee,
{"from": account},
publish_source=get_verify_status(),
)
def main():
depoly_vrf()
| 29.763158 | 86 | 0.678161 |
e04a0eeae7612cc2beb93d2ca084fb1209c83800 | 1,221 | py | Python | aln2fasta.py | ba1/BioParsing | 8a0257d4765a7bc86fef7688762abbeaaf3cef07 | [
"MIT"
] | 1 | 2017-06-19T15:15:26.000Z | 2017-06-19T15:15:26.000Z | aln2fasta.py | ba1/BioParsing | 8a0257d4765a7bc86fef7688762abbeaaf3cef07 | [
"MIT"
] | null | null | null | aln2fasta.py | ba1/BioParsing | 8a0257d4765a7bc86fef7688762abbeaaf3cef07 | [
"MIT"
] | null | null | null | '''
Created on Oct 20, 2015
@author: bardya
'''
import os
import argparse
import sys
from Bio import SeqIO
def parse_args():
parser = argparse.ArgumentParser(description='build multifasta files from aln files')
parser.add_argument('-i', dest='infilepath', metavar='<aln_file_path>', type=argparse.FileType('rt'),
help='path to an aln file')
parser.add_argument('--version', action='version', version='0.1')
return parser.parse_args()
def makegapless(fasta_sequences):
for fasta in fasta_sequences:
fasta.seq = fasta.seq.ungap('-')
return fasta_sequences
def getSubjectName(inputname):
if len(inputname.split('.')) == 2:
return inputname.split('.')[0]
if __name__ == '__main__':
args = parse_args()
try:
inputfile = open(args.infilepath.name, 'r')
fasta_sequences = SeqIO.parse(inputfile,'fasta')
except:
print('IOError occured')
seqname = getSubjectName(os.path.basename(args.infilepath.name))
# import itertools
# it1, it2 = itertools.tee(fasta_sequences, n=2)
gapless_fasta_seqs = makegapless(list(fasta_sequences))
SeqIO.write(gapless_fasta_seqs, sys.stdout, 'fasta') | 27.133333 | 105 | 0.665848 |
14232426432fec2a2289cd9484a3bf23dd5c2c14 | 2,110 | py | Python | drone/9_games/42_43_44_run_on_course/droneapp/models/course.py | onselaydin/pytry | 314aa50b6f8535e275dc8a2edd0c21637fb5a745 | [
"Apache-2.0"
] | null | null | null | drone/9_games/42_43_44_run_on_course/droneapp/models/course.py | onselaydin/pytry | 314aa50b6f8535e275dc8a2edd0c21637fb5a745 | [
"Apache-2.0"
] | null | null | null | drone/9_games/42_43_44_run_on_course/droneapp/models/course.py | onselaydin/pytry | 314aa50b6f8535e275dc8a2edd0c21637fb5a745 | [
"Apache-2.0"
] | null | null | null | import logging
import time
from droneapp.models.base import Singleton
logger = logging.getLogger(__name__)
class BaseCourse(metaclass=Singleton):
def __init__(self, name, drone):
self.name = name
self.status = 0
self.is_running = False
self.start_time = None
self.elapsed = None
self.drone = drone
def start(self):
self.start_time = time.time()
self.is_running = True
def stop(self):
if not self.is_running:
return False
self.is_running = False
self.status = 0
def update_elapsed(self):
if not self.is_running:
return None
self.elapsed = time.time() - self.start_time
return self.elapsed
def _run(self):
raise NotImplementedError
def run(self):
if not self.is_running:
return False
self.status += 1
self._run()
self.update_elapsed()
class CourseA(BaseCourse):
def _run(self):
if self.status == 1:
self.drone.takeoff()
if (self.status == 10 or self.status == 15 or
self.status == 20 or self.status == 25):
self.drone.clockwise(90)
if self.status == 30:
self.drone.flip_front()
if self.status == 40:
self.drone.flip_back()
if self.status == 50:
self.drone.land()
self.stop()
class CourseB(BaseCourse):
def _run(self):
if self.status == 1:
self.drone.takeoff()
if self.status == 10:
self.drone.flip_front()
if self.status == 20:
self.drone.flip_back()
if self.elapsed and 10 < self.elapsed < 15:
self.status = 45
if self.status == 30:
self.drone.flip_right()
if self.status == 40:
self.drone.flip_left()
if self.status == 50:
self.drone.land()
self.stop()
def get_courses(drone):
return {
1: CourseA('Course A', drone),
2: CourseB('Course B', drone),
}
| 21.530612 | 56 | 0.54218 |
174efd8a1027b52cf2bd0ee721dd8fce07a29924 | 1,474 | py | Python | nipype/interfaces/minc/tests/test_auto_Convert.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/minc/tests/test_auto_Convert.py | vferat/nipype | 536c57da150d157dcb5c121af43aaeab71cdbd5f | [
"Apache-2.0"
] | 2 | 2018-04-17T19:18:16.000Z | 2020-03-04T22:05:02.000Z | nipype/interfaces/minc/tests/test_auto_Convert.py | oesteban/nipype | c14f24eba1da08711bbb894e049ee858ed740096 | [
"Apache-2.0"
] | null | null | null | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..minc import Convert
def test_Convert_inputs():
input_map = dict(
args=dict(argstr='%s', ),
chunk=dict(argstr='-chunk %d', ),
clobber=dict(
argstr='-clobber',
usedefault=True,
),
compression=dict(argstr='-compress %s', ),
environ=dict(
nohash=True,
usedefault=True,
),
input_file=dict(
argstr='%s',
extensions=None,
mandatory=True,
position=-2,
),
output_file=dict(
argstr='%s',
extensions=None,
genfile=True,
hash_files=False,
name_source=['input_file'],
name_template='%s_convert_output.mnc',
position=-1,
),
template=dict(argstr='-template', ),
two=dict(argstr='-2', ),
)
inputs = Convert.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_Convert_outputs():
output_map = dict(output_file=dict(extensions=None, ), )
outputs = Convert.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 30.081633 | 67 | 0.559701 |
29a2cff9f7c4c0bbc20c0f1a93391ce66e31c224 | 623 | py | Python | words.py | arnavpshah/c0deex-python-2 | 4336ac93191cc1bd258936fa139ce64f536fd090 | [
"MIT"
] | null | null | null | words.py | arnavpshah/c0deex-python-2 | 4336ac93191cc1bd258936fa139ce64f536fd090 | [
"MIT"
] | null | null | null | words.py | arnavpshah/c0deex-python-2 | 4336ac93191cc1bd258936fa139ce64f536fd090 | [
"MIT"
] | null | null | null | '''Function to get random word from list'''
import random
WORDLIST = 'wordlist.txt'
def get_random_word(min_word_length):
"""Gets a random word without using extra memory """
num_words_processed = 0
curr_word = None
without opne(WORDLIST 'r') as f:
for word in f:
if '(' in word or ')' in word:
continue
word = wordstrip().lower()
if len(word) < min_word_length:
continue
num_words_processed += 1
if rando.randint(1, num_words_processed) == 1:
curr_word = word
return curr_word
| 27.086957 | 58 | 0.571429 |
87bd091723b49a59c33acd8b8d6c26f91371c084 | 1,325 | py | Python | custom_components/ge_home/entities/common/ge_water_heater.py | 87racer/ha_gehome | 8b8741aec9b07bc13c270c1377a5e68c149d6f91 | [
"MIT"
] | null | null | null | custom_components/ge_home/entities/common/ge_water_heater.py | 87racer/ha_gehome | 8b8741aec9b07bc13c270c1377a5e68c149d6f91 | [
"MIT"
] | null | null | null | custom_components/ge_home/entities/common/ge_water_heater.py | 87racer/ha_gehome | 8b8741aec9b07bc13c270c1377a5e68c149d6f91 | [
"MIT"
] | null | null | null | import abc
import logging
from typing import Any, Dict, List, Optional
from homeassistant.components.water_heater import WaterHeaterEntity
from homeassistant.const import (
TEMP_FAHRENHEIT,
TEMP_CELSIUS
)
from gehomesdk import ErdCode, ErdMeasurementUnits
from ...const import DOMAIN
from .ge_erd_entity import GeEntity
_LOGGER = logging.getLogger(__name__)
class GeWaterHeater(GeEntity, WaterHeaterEntity, metaclass=abc.ABCMeta):
"""Mock temperature/operation mode supporting device as a water heater"""
@property
def heater_type(self) -> str:
raise NotImplementedError
@property
def operation_list(self) -> List[str]:
raise NotImplementedError
@property
def unique_id(self) -> str:
return f"{DOMAIN}_{self.serial_or_mac}_{self.heater_type}"
@property
def name(self) -> Optional[str]:
return f"{self.serial_or_mac} {self.heater_type.title()}"
@property
def temperature_unit(self):
#It appears that the GE API is alwasy Fehrenheit
#measurement_system = self.appliance.get_erd_value(ErdCode.TEMPERATURE_UNIT)
#if measurement_system == ErdMeasurementUnits.METRIC:
# return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def supported_features(self):
raise NotImplementedError
| 28.804348 | 84 | 0.725283 |
3a97941960ade3f2eae4d4c1d0d4d8068010f244 | 5,448 | py | Python | src/skmultiflow/trees/numeric_attribute_regression_observer_multi_target.py | PGijsbers/scikit-multiflow | f18a433cb7247aee0c5e7028536cc05045eec88f | [
"BSD-3-Clause"
] | null | null | null | src/skmultiflow/trees/numeric_attribute_regression_observer_multi_target.py | PGijsbers/scikit-multiflow | f18a433cb7247aee0c5e7028536cc05045eec88f | [
"BSD-3-Clause"
] | null | null | null | src/skmultiflow/trees/numeric_attribute_regression_observer_multi_target.py | PGijsbers/scikit-multiflow | f18a433cb7247aee0c5e7028536cc05045eec88f | [
"BSD-3-Clause"
] | null | null | null | from skmultiflow.trees.attribute_class_observer import AttributeClassObserver
from skmultiflow.trees.numeric_attribute_binary_test import \
NumericAttributeBinaryTest
from skmultiflow.trees.attribute_split_suggestion import \
AttributeSplitSuggestion
class NumericAttributeRegressionObserverMultiTarget(AttributeClassObserver):
"""iSoup-Tree's Extended Binary Search Tree (E-BST)
This class implements the Extended Binary Search Tree (E-BST)
structure, using the variant employed by Osojnik et al. [1]_ in the
iSoup-Tree algorithm. This structure is employed to observe the target
space distribution.
In this variant, only the left branch statistics are stored.
References:
.. [1] Osojnik, Aljaž. 2017. Structured output prediction on Data
Streams (Doctoral Dissertation). Retrieved from:
http://kt.ijs.si/theses/phd_aljaz_osojnik.pdf
"""
class Node:
def __init__(self, att_val, target):
self.att_val = att_val
self.k = 1
self.sum_target = target
self.sum_sq_target = target * target
self._left = None
self._right = None
# Incremental implementation of the insert method. Avoiding unecessary
# stack tracing must decrease memory costs
def insert_value(self, att_val, target):
current = self
antecedent = None
while current is not None:
antecedent = current
if current.att_val == att_val:
current.k += 1
current.sum_target += target
current.sum_sq_target += target * target
return
elif att_val < current.att_val:
current.k += 1
current.sum_target += target
current.sum_sq_target += target * target
current = current._left
is_right = False
else:
current = current._right
is_right = True
# Value was not yet added to the tree
if is_right:
antecedent._right = \
NumericAttributeRegressionObserverMultiTarget.\
Node(att_val, target)
else:
antecedent._left = \
NumericAttributeRegressionObserverMultiTarget.\
Node(att_val, target)
def __init__(self):
super().__init__()
self._root = None
def observe_attribute_class(self, att_val, class_val, weight):
if att_val is None:
return
else:
if self._root is None:
self._root = NumericAttributeRegressionObserverMultiTarget.\
Node(att_val, class_val)
else:
self._root.insert_value(att_val, class_val)
def probability_of_attribute_value_given_class(self, att_val, class_val):
return 0.0
def get_best_evaluated_split_suggestion(self, criterion, pre_split_dist,
att_idx, binary_only=True):
self._criterion = criterion
# self._pre_split_dist = list(pre_split_dist.values())
self._pre_split_dist = pre_split_dist
self._att_idx = att_idx
self._aux_k = 0
self._aux_sum = 0.0
self._aux_sq_sum = 0.0
candidate = AttributeSplitSuggestion(None, [{}], -float('inf'))
best_split = self._find_best_split(self._root, candidate)
return best_split
def _find_best_split(self, node, candidate):
if node._left is not None:
candidate = self._find_best_split(node._left, candidate)
# Left post split distribution
left_dist = {}
left_dist[0] = node.k + self._aux_k
left_dist[1] = node.sum_target + self._aux_sum
left_dist[2] = node.sum_sq_target + self._aux_sq_sum
# The right split distribution is calculated as the difference
# between the total distribution (pre split distribution) and
# the left distribution
right_dist = {}
right_dist[0] = self._pre_split_dist[0] - left_dist[0]
right_dist[1] = self._pre_split_dist[1] - left_dist[1]
right_dist[2] = self._pre_split_dist[2] - left_dist[2]
post_split_dists = [left_dist, right_dist]
merit = self._criterion.get_merit_of_split(self._pre_split_dist,
post_split_dists)
if merit > candidate.merit:
num_att_binary_test = NumericAttributeBinaryTest(self._att_idx,
node.att_val,
True)
candidate = AttributeSplitSuggestion(num_att_binary_test,
post_split_dists, merit)
if node._right is not None:
self._aux_k += node.k
self._aux_sum += node.sum_target
self._aux_sq_sum += node.sum_sq_target
right_candidate = self._find_best_split(node._right, candidate)
if right_candidate.merit > candidate.merit:
candidate = right_candidate
self._aux_k -= node.k
self._aux_sum -= node.sum_target
self._aux_sq_sum -= node.sum_sq_target
return candidate
| 37.061224 | 78 | 0.590675 |
82203e170e590bca946f9ba5f2105a59e4dd1259 | 12,223 | py | Python | homeassistant/components/ezviz/config_flow.py | jonasjeeliasson/core | 0301706fc631ad1f2cd2532667ba9dfe2f856198 | [
"Apache-2.0"
] | 1 | 2020-12-18T12:23:04.000Z | 2020-12-18T12:23:04.000Z | homeassistant/components/ezviz/config_flow.py | jonasjeeliasson/core | 0301706fc631ad1f2cd2532667ba9dfe2f856198 | [
"Apache-2.0"
] | 51 | 2020-07-06T14:35:48.000Z | 2022-03-31T06:01:47.000Z | homeassistant/components/ezviz/config_flow.py | jonasjeeliasson/core | 0301706fc631ad1f2cd2532667ba9dfe2f856198 | [
"Apache-2.0"
] | 2 | 2020-06-03T20:24:39.000Z | 2020-06-06T19:52:09.000Z | """Config flow for ezviz."""
import logging
from pyezviz.client import EzvizClient, HTTPError, InvalidURL, PyEzvizError
from pyezviz.test_cam_rtsp import AuthTestResultFailed, InvalidHost, TestRTSPAuth
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_CLOUD_POLL, ConfigFlow, OptionsFlow
from homeassistant.const import (
CONF_CUSTOMIZE,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_TYPE,
CONF_URL,
CONF_USERNAME,
)
from homeassistant.core import callback
from .const import (
ATTR_SERIAL,
ATTR_TYPE_CAMERA,
ATTR_TYPE_CLOUD,
CONF_FFMPEG_ARGUMENTS,
DEFAULT_CAMERA_USERNAME,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_TIMEOUT,
DOMAIN,
EU_URL,
RUSSIA_URL,
)
_LOGGER = logging.getLogger(__name__)
def _get_ezviz_client_instance(data):
"""Initialize a new instance of EzvizClientApi."""
ezviz_client = EzvizClient(
data[CONF_USERNAME],
data[CONF_PASSWORD],
data.get(CONF_URL, EU_URL),
data.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
ezviz_client.login()
return ezviz_client
def _test_camera_rtsp_creds(data):
"""Try DESCRIBE on RTSP camera with credentials."""
test_rtsp = TestRTSPAuth(
data[CONF_IP_ADDRESS], data[CONF_USERNAME], data[CONF_PASSWORD]
)
test_rtsp.main()
class EzvizConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Ezviz."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_POLL
async def _validate_and_create_auth(self, data):
"""Try to login to ezviz cloud account and create entry if successful."""
await self.async_set_unique_id(data[CONF_USERNAME])
self._abort_if_unique_id_configured()
# Verify cloud credentials by attempting a login request.
try:
await self.hass.async_add_executor_job(_get_ezviz_client_instance, data)
except InvalidURL as err:
raise InvalidURL from err
except HTTPError as err:
raise InvalidHost from err
except PyEzvizError as err:
raise PyEzvizError from err
auth_data = {
CONF_USERNAME: data[CONF_USERNAME],
CONF_PASSWORD: data[CONF_PASSWORD],
CONF_URL: data.get(CONF_URL, EU_URL),
CONF_TYPE: ATTR_TYPE_CLOUD,
}
return self.async_create_entry(title=data[CONF_USERNAME], data=auth_data)
async def _validate_and_create_camera_rtsp(self, data):
"""Try DESCRIBE on RTSP camera with credentials."""
# Get Ezviz cloud credentials from config entry
ezviz_client_creds = {
CONF_USERNAME: None,
CONF_PASSWORD: None,
CONF_URL: None,
}
for item in self._async_current_entries():
if item.data.get(CONF_TYPE) == ATTR_TYPE_CLOUD:
ezviz_client_creds = {
CONF_USERNAME: item.data.get(CONF_USERNAME),
CONF_PASSWORD: item.data.get(CONF_PASSWORD),
CONF_URL: item.data.get(CONF_URL),
}
# Abort flow if user removed cloud account before adding camera.
if ezviz_client_creds[CONF_USERNAME] is None:
return self.async_abort(reason="ezviz_cloud_account_missing")
# We need to wake hibernating cameras.
# First create EZVIZ API instance.
try:
ezviz_client = await self.hass.async_add_executor_job(
_get_ezviz_client_instance, ezviz_client_creds
)
except InvalidURL as err:
raise InvalidURL from err
except HTTPError as err:
raise InvalidHost from err
except PyEzvizError as err:
raise PyEzvizError from err
# Secondly try to wake hybernating camera.
try:
await self.hass.async_add_executor_job(
ezviz_client.get_detection_sensibility, data[ATTR_SERIAL]
)
except HTTPError as err:
raise InvalidHost from err
# Thirdly attempts an authenticated RTSP DESCRIBE request.
try:
await self.hass.async_add_executor_job(_test_camera_rtsp_creds, data)
except InvalidHost as err:
raise InvalidHost from err
except AuthTestResultFailed as err:
raise AuthTestResultFailed from err
return self.async_create_entry(
title=data[ATTR_SERIAL],
data={
CONF_USERNAME: data[CONF_USERNAME],
CONF_PASSWORD: data[CONF_PASSWORD],
CONF_TYPE: ATTR_TYPE_CAMERA,
},
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return EzvizOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
# Check if ezviz cloud account is present in entry config,
# abort if already configured.
for item in self._async_current_entries():
if item.data.get(CONF_TYPE) == ATTR_TYPE_CLOUD:
return self.async_abort(reason="already_configured_account")
errors = {}
if user_input is not None:
if user_input[CONF_URL] == CONF_CUSTOMIZE:
self.context["data"] = {
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
}
return await self.async_step_user_custom_url()
if CONF_TIMEOUT not in user_input:
user_input[CONF_TIMEOUT] = DEFAULT_TIMEOUT
try:
return await self._validate_and_create_auth(user_input)
except InvalidURL:
errors["base"] = "invalid_host"
except InvalidHost:
errors["base"] = "cannot_connect"
except PyEzvizError:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
data_schema = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_URL, default=EU_URL): vol.In(
[EU_URL, RUSSIA_URL, CONF_CUSTOMIZE]
),
}
)
return self.async_show_form(
step_id="user", data_schema=data_schema, errors=errors
)
async def async_step_user_custom_url(self, user_input=None):
"""Handle a flow initiated by the user for custom region url."""
errors = {}
if user_input is not None:
user_input[CONF_USERNAME] = self.context["data"][CONF_USERNAME]
user_input[CONF_PASSWORD] = self.context["data"][CONF_PASSWORD]
if CONF_TIMEOUT not in user_input:
user_input[CONF_TIMEOUT] = DEFAULT_TIMEOUT
try:
return await self._validate_and_create_auth(user_input)
except InvalidURL:
errors["base"] = "invalid_host"
except InvalidHost:
errors["base"] = "cannot_connect"
except PyEzvizError:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
data_schema_custom_url = vol.Schema(
{
vol.Required(CONF_URL, default=EU_URL): str,
}
)
return self.async_show_form(
step_id="user_custom_url", data_schema=data_schema_custom_url, errors=errors
)
async def async_step_discovery(self, discovery_info):
"""Handle a flow for discovered camera without rtsp config entry."""
await self.async_set_unique_id(discovery_info[ATTR_SERIAL])
self._abort_if_unique_id_configured()
self.context["title_placeholders"] = {"serial": self.unique_id}
self.context["data"] = {CONF_IP_ADDRESS: discovery_info[CONF_IP_ADDRESS]}
return await self.async_step_confirm()
async def async_step_confirm(self, user_input=None):
"""Confirm and create entry from discovery step."""
errors = {}
if user_input is not None:
user_input[ATTR_SERIAL] = self.unique_id
user_input[CONF_IP_ADDRESS] = self.context["data"][CONF_IP_ADDRESS]
try:
return await self._validate_and_create_camera_rtsp(user_input)
except (InvalidHost, InvalidURL):
errors["base"] = "invalid_host"
except (PyEzvizError, AuthTestResultFailed):
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
discovered_camera_schema = vol.Schema(
{
vol.Required(CONF_USERNAME, default=DEFAULT_CAMERA_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
return self.async_show_form(
step_id="confirm",
data_schema=discovered_camera_schema,
errors=errors,
description_placeholders={
"serial": self.unique_id,
CONF_IP_ADDRESS: self.context["data"][CONF_IP_ADDRESS],
},
)
async def async_step_import(self, import_config):
"""Handle config import from yaml."""
_LOGGER.debug("import config: %s", import_config)
# Check importing camera.
if ATTR_SERIAL in import_config:
return await self.async_step_import_camera(import_config)
# Validate and setup of main ezviz cloud account.
try:
return await self._validate_and_create_auth(import_config)
except InvalidURL:
_LOGGER.error("Error importing Ezviz platform config: invalid host")
return self.async_abort(reason="invalid_host")
except InvalidHost:
_LOGGER.error("Error importing Ezviz platform config: cannot connect")
return self.async_abort(reason="cannot_connect")
except (AuthTestResultFailed, PyEzvizError):
_LOGGER.error("Error importing Ezviz platform config: invalid auth")
return self.async_abort(reason="invalid_auth")
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
"Error importing ezviz platform config: unexpected exception"
)
return self.async_abort(reason="unknown")
async def async_step_import_camera(self, data):
"""Create RTSP auth entry per camera in config."""
await self.async_set_unique_id(data[ATTR_SERIAL])
self._abort_if_unique_id_configured()
_LOGGER.debug("Create camera with: %s", data)
cam_serial = data.pop(ATTR_SERIAL)
data[CONF_TYPE] = ATTR_TYPE_CAMERA
return self.async_create_entry(title=cam_serial, data=data)
class EzvizOptionsFlowHandler(OptionsFlow):
"""Handle Ezviz client options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage Ezviz options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
): int,
vol.Optional(
CONF_FFMPEG_ARGUMENTS,
default=self.config_entry.options.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
),
): str,
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
| 32.594667 | 88 | 0.624315 |
a1fcaaf45af33a39e9b376617294eb3a3d009ad4 | 8,464 | py | Python | cinder/tests/unit/attachments/test_attachments_api.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | null | null | null | cinder/tests/unit/attachments/test_attachments_api.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | 2 | 2018-10-25T13:04:01.000Z | 2019-08-17T13:15:24.000Z | cinder/tests/unit/attachments/test_attachments_api.py | mail2nsrajesh/cinder | a688b872bec6d1abd4dcd852bdb8e8a921369d2e | [
"Apache-2.0"
] | 2 | 2018-10-17T13:32:50.000Z | 2018-11-08T08:39:39.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from cinder import context
from cinder import db
from cinder import exception
from cinder import objects
from cinder import test
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit import utils as tests_utils
from cinder.volume import api as volume_api
from cinder.volume import configuration as conf
CONF = cfg.CONF
class AttachmentManagerTestCase(test.TestCase):
"""Attachment related test for volume/api.py."""
def setUp(self):
"""Setup test class."""
super(AttachmentManagerTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.context = context.get_admin_context()
self.context.user_id = fake.USER_ID
self.project_id = fake.PROJECT3_ID
self.context.project_id = self.project_id
self.volume_api = volume_api.API()
@mock.patch('cinder.volume.api.check_policy')
def test_attachment_create_no_connector(self, mock_policy):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertIsNone(aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
@mock.patch('cinder.volume.api.check_policy')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_with_connector(self,
mock_rpc_attachment_update,
mock_policy):
"""Test attachment_create with connector."""
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
connector = {'fake': 'connector'}
attachment = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2,
connector)
mock_rpc_attachment_update.assert_called_once_with(self.context,
mock.ANY,
connector,
mock.ANY)
new_attachment = objects.VolumeAttachment.get_by_id(self.context,
attachment.id)
self.assertEqual(connection_info, new_attachment.connection_info)
@mock.patch('cinder.volume.api.check_policy')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
def test_attachment_delete_reserved(self,
mock_rpc_attachment_delete,
mock_policy):
"""Test attachment_delete with reserved."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aobj = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual('reserved', aref.attach_status)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aobj)
# Since it's just reserved and never finalized, we should never make an
# rpc call
mock_rpc_attachment_delete.assert_not_called()
@mock.patch('cinder.volume.api.check_policy')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_delete')
@mock.patch('cinder.volume.rpcapi.VolumeAPI.attachment_update')
def test_attachment_create_update_and_delete(
self,
mock_rpc_attachment_update,
mock_rpc_attachment_delete,
mock_policy):
"""Test attachment_delete."""
volume_params = {'status': 'available'}
connection_info = {'fake_key': 'fake_value',
'fake_key2': ['fake_value1', 'fake_value2']}
mock_rpc_attachment_update.return_value = connection_info
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
vref = objects.Volume.get_by_id(self.context,
vref.id)
connector = {'fake': 'connector'}
self.volume_api.attachment_update(self.context,
aref,
connector)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(connection_info, aref.connection_info)
# We mock the actual call that updates the status
# so force it here
values = {'volume_id': vref.id,
'volume_host': vref.host,
'attach_status': 'attached',
'instance_uuid': fake.UUID2}
aref = db.volume_attach(self.context, values)
aref = objects.VolumeAttachment.get_by_id(self.context,
aref.id)
self.assertEqual(vref.id, aref.volume_id)
self.volume_api.attachment_delete(self.context,
aref)
mock_rpc_attachment_delete.assert_called_once_with(self.context,
aref.id,
mock.ANY)
@mock.patch('cinder.volume.api.check_policy')
def test_additional_attachment_create_no_connector(self, mock_policy):
"""Test attachment_create no connector."""
volume_params = {'status': 'available'}
vref = tests_utils.create_volume(self.context, **volume_params)
aref = self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
self.assertEqual(fake.UUID2, aref.instance_uuid)
self.assertIsNone(aref.attach_time)
self.assertEqual('reserved', aref.attach_status)
self.assertIsNone(aref.attach_mode)
self.assertEqual(vref.id, aref.volume_id)
self.assertEqual({}, aref.connection_info)
self.assertRaises(exception.InvalidVolume,
self.volume_api.attachment_create,
self.context,
vref,
fake.UUID1)
self.volume_api.attachment_create(self.context,
vref,
fake.UUID2)
vref = objects.Volume.get_by_id(self.context,
vref.id)
self.assertEqual(2, len(vref.volume_attachment))
| 46.762431 | 79 | 0.562736 |
09f9d4af5df074b3a38475aef8a5465d3054bcbf | 18,941 | py | Python | kicost/distributors/api_partinfo_kitspace.py | Miceuz/KiCost | e442524ca152be1cc406395c771d7047bfc990bf | [
"MIT"
] | 1 | 2020-03-06T08:08:24.000Z | 2020-03-06T08:08:24.000Z | kicost/distributors/api_partinfo_kitspace.py | Miceuz/KiCost | e442524ca152be1cc406395c771d7047bfc990bf | [
"MIT"
] | null | null | null | kicost/distributors/api_partinfo_kitspace.py | Miceuz/KiCost | e442524ca152be1cc406395c771d7047bfc990bf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# MIT license
#
# Copyright (C) 2018 by XESS Corporation / Max Maisel / Hildo Guillardi Júnior
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Author information.
__author__ = 'Hildo Guillardi Júnior'
__webpage__ = 'https://github.com/hildogjr/'
__company__ = 'University of Campinas - Brazil'
# Libraries.
import json, requests
import logging, tqdm
import copy, re
from collections import Counter
#from urllib.parse import quote_plus as urlquote
# KiCost definitions.
from .global_vars import * # Debug information, `distributor_dict` and `SEPRTR`.
# Distributors definitions.
from .distributor import distributor_class
MAX_PARTS_PER_QUERY = 20 # Maximum number of parts in a single query.
# Information to return from PartInfo KitSpace server.
QUERY_AVAIABLE_CURRENCIES = {'GBP', 'EUR', 'USD'}
#DEFAULT_CURRENCY
QUERY_ANSWER = '''
mpn{manufacturer, part},
datasheet,
description,
specs{key, value},
offers{
product_url,
sku {vendor, part},
description,
moq,
in_stock_quantity,
prices{''' + ','.join(QUERY_AVAIABLE_CURRENCIES) + '''}
}
'''
#Informations not used: type,specs{key, name, value},image {url, credit_string, credit_url},stock_location
QUERY_ANSWER = re.sub('[\s\n]', '', QUERY_ANSWER)
QUERY_PART = 'query ($input: MpnInput!) { part(mpn: $input) {' + QUERY_ANSWER + '} }'
QUERY_MATCH = 'query ($input: [MpnOrSku]!){ match(parts: $input) {' + QUERY_ANSWER + '} }'
QUERY_SEARCH = 'query ($input: String!){ search(term: $input) {' + QUERY_ANSWER + '} }'
QUERY_URL = 'https://dev-partinfo.kitspace.org/graphql'
__all__ = ['api_partinfo_kitspace']
class api_partinfo_kitspace(distributor_class):
@staticmethod
def init_dist_dict():
dists = {
'digikey': {
'module': 'digikey', 'type': 'web',
'order': {
'cols': ['purch', 'part_num', 'refs'],
'delimiter': ',', 'not_allowed_char': ',', 'replace_by_char': ';',
},
'label': {
'name': 'Digi-Key',
'format': {'font_color': 'white', 'bg_color': '#CC0000'}, # Digi-Key red.
'url': 'https://www.digikey.com/',
},
},
'farnell': {
'module': 'farnell', 'type': 'web',
'order': {
'cols': ['part_num', 'purch', 'refs'],
'delimiter': ' ', 'not_allowed_char': ' ', 'replace_by_char': ';',
},
'label': {
'name': 'Farnell',
'format': {'font_color': 'white', 'bg_color': '#FF6600'}, # Farnell/E14 orange.
'url': 'https://www.newark.com/',
},
},
'mouser': {
'module': 'mouser', 'type': 'web',
'order': {
'cols': ['part_num', 'purch', 'refs'],
'delimiter': '|', 'not_allowed_char': '| ', 'replace_by_char': ';_',
},
'label': {
'name': 'Mouser',
'format': {'font_color': 'white', 'bg_color': '#004A85'}, # Mouser blue.
'url': 'https://www.mouser.com',
},
},
'newark': {
'module': 'newark', 'type': 'web',
'order': {
'cols': ['part_num', 'purch', 'refs'],
'delimiter': ',', 'not_allowed_char': ',', 'replace_by_char': ';',
},
'label': {
'name': 'Newark',
'format': {'font_color': 'white', 'bg_color': '#A2AE06'}, # Newark/E14 olive green.
'url': 'https://www.newark.com/',
},
},
'rs': {
'module': 'rs', 'type': 'web',
'order': {
'cols': ['part_num', 'purch', 'refs'],
'delimiter': ' ', 'not_allowed_char': ' ', 'replace_by_char': ';',
},
'label': {
'name': 'RS Components',
'format': {'font_color': 'white', 'bg_color': '#FF0000'}, # RS Components red.
'url': 'https://uk.rs-online.com/',
},
},
}
if not 'enabled' in distributors_modules_dict['api_partinfo_kitspace']:
# First module load.
distributors_modules_dict.update({'api_partinfo_kitspace':{
'type': 'api', 'url': 'https://kitspace.org/', # Web site API information.
'distributors': dists.keys(), # Avaliable web distributors in this api.
'enabled': True, # Default status of the module (it's load but can be not calle).
'param': None, # Configuration parameters.
'dist_translation': { # Distributor translation.
'Digikey': 'digikey',
'Farnell': 'farnell',
'Mouser': 'mouser',
'Newark': 'newark',
'RS': 'rs'
}
}
})
# Update the `distributor_dict` with the available distributor in this module with the module is enabled.
# It can be not enabled by the GUI saved configurations.
if distributors_modules_dict['api_partinfo_kitspace']['enabled']:
distributor_dict.update(dists)
@staticmethod
def query(query_parts, query_type=QUERY_MATCH):
'''Send query to server and return results.'''
#r = requests.post(QUERY_URL, {"query": QUERY_SEARCH, "variables": variables}) #TODO future use for ISSUE #17
variables = re.sub('\'', '\"', str(query_parts))
variables = re.sub('\s', '', variables)
# Python 2 prepends a 'u' before the query strings and this makes PartInfo
# complain, so remove them.
variables = re.sub(':u"', ':"', variables)
variables = re.sub('{u"', '{"', variables)
variables = '{{"input":{}}}'.format(variables)
response = requests.post(QUERY_URL, {'query': query_type, "variables": variables})
if response.status_code == requests.codes['ok']: #200
results = json.loads(response.text)
return results
elif response.status_code == requests.codes['not_found']: #404
raise Exception('Kitspace server not found check your internet connection.')
elif response.status_code == requests.codes['request_timeout']: #408
raise Exception('KitSpace is not responding.')
elif response.status_code == requests.codes['bad_request']: #400
raise Exception('Bad request to Kitspace server probably due to an incorrect string format check your `manf#` codes and contact the suport team.')
elif response.status_code == requests.codes['gateway_timeout']: # 504
raise Exception('One of the internal Kitspace services may experiencing problems. Contact the Kitspace support.')
else:
raise Exception('Kitspace error: ' + str(response.status_code))
@staticmethod
def get_value(data, item, default=None):
'''Get the value of `value` field of a dictionary if the `name`field identifier.
Used to get information from the JSON response.'''
try:
for d in data:
try:
if d['key'] == item:
return d['value']
except:
continue
return default
except:
return default
@staticmethod
def query_part_info(parts, distributors, currency=DEFAULT_CURRENCY):
'''Fill-in the parts with price/qty/etc info from KitSpace.'''
logger.log(DEBUG_OVERVIEW, '# Getting part data from KitSpace...')
# Change the logging print channel to `tqdm` to keep the process bar to the end of terminal.
class TqdmLoggingHandler(logging.Handler):
'''Overload the class to write the logging through the `tqdm`.'''
def __init__(self, level=logging.NOTSET):
super(self.__class__, self).__init__(level)
def emit(self, record):
try:
msg = self.format(record)
tqdm.tqdm.write(msg)
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
pass
# Get handles to default sys.stdout logging handler and the
# new "tqdm" logging handler.
logDefaultHandler = logger.handlers[0]
logTqdmHandler = TqdmLoggingHandler()
# Replace default handler with "tqdm" handler.
logger.addHandler(logTqdmHandler)
logger.removeHandler(logDefaultHandler)
# Translate from PartInfo distributor names to the names used internally by kicost.
dist_xlate = distributors_modules_dict['api_partinfo_kitspace']['dist_translation']
def get_part_info(query, parts):
'''Query PartInfo for quantity/price info and place it into the parts list'''
results = api_partinfo_kitspace.query(query)
# Loop through the response to the query and enter info into the parts list.
for part_query, part, result in zip(query, parts, results['data']['match']):
if not result:
logger.warning('No information found for part {}'.format(str(part_query)))
else:
# Get the information of the part.
part.datasheet = result.get('datasheet')
part.lifecycle = api_partinfo_kitspace.get_value(result['specs'], 'lifecycle_status', 'active')
# Loop through the offers from various dists for this particular part.
for offer in result['offers']:
# Get the distributor who made the offer and add their
# price/qty info to the parts list if its one of the accepted distributors.
dist = dist_xlate.get(offer['sku']['vendor'], '')
if dist in distributors:
# Get pricing information from this distributor.
try:
price_tiers = {} # Empty dict in case of exception.
dist_currency = list(offer['prices'].keys())
# Get the price tiers prioritizing:
# 1) The asked currency by KiCOst user;
# 2) The default currency given by `DEFAULT_CURRENCY` in root `global_vars.py`;
# 3) The first not null tier.s
prices = None
if currency in dist_currency and offer['prices'][currency]:
prices = offer['prices'][currency]
part.currency[dist] = currency
elif DEFAULT_CURRENCY in dist_currency and offer['prices'][DEFAULT_CURRENCY]:# and DEFAULT_CURRENCY!=currency:
prices = offer['prices'][DEFAULT_CURRENCY]
part.currency[dist] = DEFAULT_CURRENCY
else:
for dist_c in dist_currency:
if offer['prices'][dist_c]:
prices = offer['prices'][dist_c]
part.currency[dist] = dist_c
break
# Some times the API returns minimum purchase 0 and a not valid `price_tiers`.
if prices:
price_tiers = {qty: float(price)
for qty, price in list(prices)
}
# Combine price lists for multiple offers from the same distributor
# to build a complete list of cut-tape and reeled components.
if not dist in part.price_tiers:
part.price_tiers[dist] = {}
part.price_tiers[dist].update(price_tiers)
except TypeError:
pass # Price list is probably missing so leave empty default dict in place.
# Compute the quantity increment between the lowest two prices.
# This will be used to distinguish the cut-tape from the reeled components.
try:
part_break_qtys = sorted(price_tiers.keys())
part_qty_increment = part_break_qtys[1] - part_break_qtys[0]
except IndexError:
# This will happen if there are not enough entries in the price/qty list.
# As a stop-gap measure, just assign infinity to the part increment.
# A better alternative may be to examine the packaging field of the offer.
part_qty_increment = float("inf")
# Use the qty increment to select the part SKU, web page, and available quantity.
# Do this if this is the first part offer from this dist. Each distributor can have
# differente stock codes for the same part in different quantities / delivery package
# style: cut-tape, reel, ...
if not part.qty_avail[dist] or (offer.get('in_stock_quantity') and part.qty_avail[dist]<offer.get('in_stock_quantity')):
# Keeps the information of more availability.
part.qty_avail[dist] = offer.get('in_stock_quantity') # In stock.
if not part.moq[dist] or (offer.get('moq') and part.moq[dist]>offer.get('moq')):
# Save the link, stock code, ... of the page for minimum purchase.
part.moq[dist] = offer.get('moq') # Minimum order qty.
part.url[dist] = offer.get('product_url', '') # Page to purchase the minimum quantity.
part.part_num[dist] = offer.get('sku', '').get('part', '')
part.qty_increment[dist] = part_qty_increment
# Don't bother with any extra info from the distributor.
part.info_dist[dist] = {}
# Get the valid distributor names used by them part catalog
# that may be index by PartInfo. This is used to remove the
# local distributors and future not implemented in the PartInfo
# definition.
distributors_name_api = distributors_modules_dict['api_partinfo_kitspace']['dist_translation'].values()
# Create queries to get part price/quantities from PartInfo.
queries = []
query_parts = []
for part in parts:
# Create a PartInfo query using the manufacturer's part number or
# the distributor's SKU.
query = None
part_code = part.fields.get('manf#')
if part_code:
query = {'mpn': {'manufacturer': '', 'part': part_code}}
else:
# No MPN, so use the first distributor SKU that's found.
for dist_name in distributors_name_api:
part_code = part.fields.get(dist_name + '#')
if part_code:
query = {'sku': {'vendor': dist_name, 'part': part_code}}
break
if query:
# Add query for this part to the list of part queries.
# part_query = {code_type: {'manufacturer': '', 'part': urlquote(part_code)}} # TODO
queries.append(query)
query_parts.append(part)
# Setup progress bar to track progress of server queries.
progress = tqdm.tqdm(desc='Progress', total=len(query_parts), unit='part', miniters=1)
# Slice the queries into batches of the largest allowed size and gather
# the part data for each batch.
for i in range(0, len(queries), MAX_PARTS_PER_QUERY):
slc = slice(i, i+MAX_PARTS_PER_QUERY)
query_batch = queries[slc]
part_batch = query_parts[slc]
get_part_info(query_batch, part_batch)
progress.update(len(query_batch))
# Restore the logging print channel now that the progress bar is no longer needed.
logger.addHandler(logDefaultHandler)
logger.removeHandler(logTqdmHandler)
# Done with the scraping progress bar so delete it or else we get an
# error when the program terminates.
del progress
| 51.191892 | 158 | 0.52352 |
34328efc26d6ae2c3126d8e770807525e689f942 | 3,641 | py | Python | homeassistant/components/sharkiq/config_flow.py | paoloantinori/core | 4bbc737954325e84e42572e8ce7e40116d1a271e | [
"Apache-2.0"
] | null | null | null | homeassistant/components/sharkiq/config_flow.py | paoloantinori/core | 4bbc737954325e84e42572e8ce7e40116d1a271e | [
"Apache-2.0"
] | 45 | 2020-07-23T07:13:34.000Z | 2022-03-31T06:01:55.000Z | homeassistant/components/sharkiq/config_flow.py | ajschmidt8/home-assistant | 75153dd4a3061f27674f4adbd9283e6c46534e66 | [
"Apache-2.0"
] | null | null | null | """Config flow for Shark IQ integration."""
import asyncio
from typing import Dict, Optional
import aiohttp
import async_timeout
from sharkiqpy import SharkIqAuthError, get_ayla_api
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import DOMAIN, LOGGER # pylint:disable=unused-import
SHARKIQ_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect."""
ayla_api = get_ayla_api(
username=data[CONF_USERNAME],
password=data[CONF_PASSWORD],
websession=hass.helpers.aiohttp_client.async_get_clientsession(hass),
)
try:
with async_timeout.timeout(10):
LOGGER.debug("Initialize connection to Ayla networks API")
await ayla_api.async_sign_in()
except (asyncio.TimeoutError, aiohttp.ClientError) as errors:
raise CannotConnect from errors
except SharkIqAuthError as error:
raise InvalidAuth from error
# Return info that you want to store in the config entry.
return {"title": data[CONF_USERNAME]}
class SharkIqConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Shark IQ."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _async_validate_input(self, user_input):
"""Validate form input."""
errors = {}
info = None
if user_input is not None:
# noinspection PyBroadException
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return info, errors
async def async_step_user(self, user_input: Optional[Dict] = None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
info, errors = await self._async_validate_input(user_input)
if info:
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=SHARKIQ_SCHEMA, errors=errors
)
async def async_step_reauth(self, user_input: Optional[dict] = None):
"""Handle re-auth if login is invalid."""
errors = {}
if user_input is not None:
_, errors = await self._async_validate_input(user_input)
if not errors:
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
self.hass.config_entries.async_update_entry(
entry, data=user_input
)
return self.async_abort(reason="reauth_successful")
if errors["base"] != "invalid_auth":
return self.async_abort(reason=errors["base"])
return self.async_show_form(
step_id="reauth",
data_schema=SHARKIQ_SCHEMA,
errors=errors,
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
| 33.1 | 84 | 0.642406 |
8f30f7440b3e96009d37d162643c3d876dfe0fdc | 4,231 | py | Python | DQM/Integration/python/clients/dt4ml_dqm_sourceclient-live_cfg.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-30T16:24:46.000Z | 2021-11-30T16:24:46.000Z | DQM/Integration/python/clients/dt4ml_dqm_sourceclient-live_cfg.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 4 | 2021-11-29T13:57:56.000Z | 2022-03-29T06:28:36.000Z | DQM/Integration/python/clients/dt4ml_dqm_sourceclient-live_cfg.py | PKUfudawei/cmssw | 8fbb5ce74398269c8a32956d7c7943766770c093 | [
"Apache-2.0"
] | 1 | 2021-11-23T09:25:45.000Z | 2021-11-23T09:25:45.000Z | from __future__ import print_function
import FWCore.ParameterSet.Config as cms
import sys
import os
from Configuration.Eras.Era_Run3_cff import Run3
process = cms.Process("DTDQM", Run3)
unitTest = False
if 'unitTest=True' in sys.argv:
unitTest=True
#----------------------------
#### Event Source
#----------------------------
if unitTest:
process.load("DQM.Integration.config.unittestinputsource_cfi")
from DQM.Integration.config.unittestinputsource_cfi import options
else:
# for live online DQM in P5
process.load("DQM.Integration.config.inputsource_cfi")
from DQM.Integration.config.inputsource_cfi import options
# for testing in lxplus
#process.load("DQM.Integration.config.fileinputsource_cfi")
#from DQM.Integration.config.fileinputsource_cfi import options
#----------------------------
#### DQM Environment
#----------------------------
process.load("DQM.Integration.config.environment_cfi")
#----------------------------
#### DQM Live Environment
#----------------------------
process.dqmEnv.subSystemFolder = 'DT'
process.dqmSaver.tag = "DT"
process.dqmSaver.runNumber = options.runNumber
process.dqmSaverPB.tag = "DT"
process.dqmSaverPB.runNumber = options.runNumber
#-----------------------------
### CUSTOMIZE FOR ML
# prepare the output directory
filePath = "/globalscratch/dqm4ml_" + process.dqmRunConfig.type.value()
if unitTest:
filePath = "./dqm4ml_" + process.dqmRunConfig.type.value()
try:
os.makedirs(filePath)
except:
pass
process.dqmSaver.backupLumiCount = 10
process.dqmSaver.keepBackupLumi = True
process.dqmSaver.path = filePath
process.dqmSaverPB.path = filePath + "/pb"
# disable DQM gui
print("old:",process.DQM.collectorHost)
process.DQM.collectorHost = 'dqm-blackhole.cms'
print("new:",process.DQM.collectorHost)
### END OF CUSTOMIZE FOR ML
# DT reco and DQM sequences
process.load("Configuration.StandardSequences.GeometryRecoDB_cff")
process.load("Configuration/StandardSequences/MagneticField_cff")
process.load("DQM.DTMonitorModule.dt_dqm_sourceclient_common_cff")
#---- for P5 (online) DB access
process.load("DQM.Integration.config.FrontierCondition_GT_cfi")
#---- for offline DB: change and possibly customise the GT
#from Configuration.AlCa.GlobalTag import GlobalTag as gtCustomise
#process.GlobalTag = gtCustomise(process.GlobalTag, 'auto:run2_data', '')
# message logger
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('cout'),
cout = cms.untracked.PSet(threshold = cms.untracked.string('WARNING'))
)
process.dqmmodules = cms.Sequence(process.dqmEnv + process.dqmSaver + process.dqmSaverPB)
process.dtDQMPathPhys = cms.Path(process.unpackers + process.dqmmodules + process.physicsEventsFilter * process.dtDQMPhysSequence)
#process.dtDQMPathCalib = cms.Path(process.unpackers + process.dqmmodules + process.calibrationEventsFilter * process.dtDQMCalib)
process.twinMuxStage2Digis.DTTM7_FED_Source = "rawDataCollector"
process.dtunpacker.inputLabel = "rawDataCollector"
process.gtDigis.DaqGtInputTag = "rawDataCollector"
process.scalersRawToDigi.scalersInputTag = "rawDataCollector"
print("Running with run type = ", process.runType.getRunType())
#----------------------------
#### pp run settings
#----------------------------
if (process.runType.getRunType() == process.runType.pp_run):
pass
#----------------------------
#### cosmic run settings
#----------------------------
if (process.runType.getRunType() == process.runType.cosmic_run):
pass
#----------------------------
#### HI run settings
#----------------------------
if (process.runType.getRunType() == process.runType.hi_run):
process.twinMuxStage2Digis.DTTM7_FED_Source = "rawDataRepacker"
process.dtunpacker.inputLabel = "rawDataRepacker"
process.gtDigis.DaqGtInputTag = "rawDataRepacker"
process.scalersRawToDigi.scalersInputTag = "rawDataRepacker"
process.dtDigiMonitor.ResetCycle = 9999
### process customizations included here
from DQM.Integration.config.online_customizations_cfi import *
print("Final Source settings:", process.source)
process = customise(process)
| 32.79845 | 131 | 0.691562 |
2286a682b9fd6ae067ff4eeabea3ac54f348f8bc | 318 | py | Python | LGWebOSRemote/apps/openDisney+.py | ibrad3/small-projects | 786a8a215fa63df63901d2eeb70653bfd2ceb1f7 | [
"MIT"
] | null | null | null | LGWebOSRemote/apps/openDisney+.py | ibrad3/small-projects | 786a8a215fa63df63901d2eeb70653bfd2ceb1f7 | [
"MIT"
] | null | null | null | LGWebOSRemote/apps/openDisney+.py | ibrad3/small-projects | 786a8a215fa63df63901d2eeb70653bfd2ceb1f7 | [
"MIT"
] | null | null | null | from pylgtv import WebOsClient
import sys
import logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
try:
tv = WebOsClient('192.168.0.54')
for x in tv.get_apps():
if x['title'] == 'Disney+':
tv.launch_app(x['id'])
except:
print("Error connecting to TV") | 21.2 | 59 | 0.622642 |
fdb47aaa102522cd0864c26f2e558e8f1425db65 | 15,163 | py | Python | app/project/settings.py | giordan83/dockdj | 8d57057b89cf2414e18e6cda733af944e32dbbe7 | [
"MIT"
] | 64 | 2015-10-04T02:54:06.000Z | 2021-03-30T04:02:47.000Z | app/project/settings.py | kodani/elasticdock | a413be1075a0b3c5ac77bb2686b97ac39ef5cb1d | [
"MIT"
] | 7 | 2015-10-05T15:11:34.000Z | 2017-10-30T03:44:03.000Z | app/project/settings.py | kodani/elasticdock | a413be1075a0b3c5ac77bb2686b97ac39ef5cb1d | [
"MIT"
] | 15 | 2015-11-13T21:02:48.000Z | 2019-10-01T08:06:01.000Z | # -*- coding: utf-8 -*-
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# We'll need to interact with the OS/system to check paths,
# load environment variables, and so on.
import os
#############################################################################
#
# PATHS
#
#############################################################################
# What is the base directory of the project?
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
#############################################################################
#
# ENVIRONMENT VARIABLES
#
#############################################################################
# There should be a file called 'env.vars.manifest' in the same directory
# as this settings file. That manifest should have a number of KEY=VALUE
# declarations (one per line). That's the list of all the environment
# variables that need to be defined in order for this application to run.
# We'll parse that list and check that all the variables are set.
# If we detect that an environment variable is missing, we'll raise an error
# saying so. We'll use Django's ImproperlyConfigured for that.
from django.core.exceptions import ImproperlyConfigured
# Where is the manifest located?
SETTINGS_CWD = os.path.dirname(os.path.abspath(__file__))
ENV_VARS_MANIFEST = SETTINGS_CWD + '/env.vars.manifest'
# We'll build our dictionary of environment variables here:
ENV_VARS = {}
def split_up_key_and_value(key_value_string):
"""Split up 'key=value' into 'key' and 'value'."""
parts = key_value_string.split('=')
key = parts[0] if len(parts) > 0 else None
value = parts[1] if len(parts) > 1 else None
return (key, value)
def booleanize_if_possible(sample):
"""Boolean-ize truthy/falsey strings."""
if sample.lower() in ['true', 'yes', 'on', 1]:
sample = True
elif sample.lower() in ['false', 'no', 'off', 0]:
sample = False
return sample
def get_env_variable(key, default=None):
"""Get an environment variable from the system."""
result = None
try:
result = os.environ[key]
except KeyError:
if default is not None:
result = default
else:
msg = "Set the {} environment variable" % (key)
raise ImproperlyConfigured(msg)
return booleanize_if_possible(result)
# Read each line in the manifest.
with open(ENV_VARS_MANIFEST) as f:
lines = f.read().splitlines()
for line in lines:
# Ignoring empty lines and comments,
# add the key/value pair to our dictionary.
if line and line[0:1] != '#':
key, value = split_up_key_and_value(line)
ENV_VARS[key] = get_env_variable(key, value)
#############################################################################
#
# DEBUG SETTINGS
#
#############################################################################
# By default, DEBUG should be turned off (False). Only in a dev
# dev environment should it be turned on. While it is on (True),
# Django spits out detailed tracebacks when it sees errors, and
# we don't want to reveal that to the world. Note also that while
# this is True, Django remembers every SQL query it runs.
# In production, that would consume a lot of memory.
# https://docs.djangoproject.com/en/1.8/ref/settings/#debug
DEBUG = ENV_VARS['ENV_DEBUG']
INTERNAL_IPS = (
'127.0.0.1',
'192.168.59.106',
'192.168.59.3',
)
#############################################################################
#
# CRYPTOGRAPHIC SIGNING
#
#############################################################################
# This is used to sign sensitive data (like cookies and sessions).
# This MUST be kept secret. If it is compromised, change it immediately.
# Users will need to logout before it takes affect, but it's worth it.
SECRET_KEY = 'bad_c0de'
if ENV_VARS['ENV_SECRET_KEY']:
SECRET_KEY = ENV_VARS['ENV_SECRET_KEY']
#############################################################################
#
# HOSTS
#
#############################################################################
# If DEBUG=False, Django will only respond to requests that set
# their HTTP `Host` header to one of the following values (so if
# the `Host` header in the request is not set to one of these values,
# Django will respond with a `400 Bad Request`. Note, however, that
# if the USE_X_FORWARDED_HOST setting is enabled, then Django will look
# at that value first.
# See https://docs.djangoproject.com/en/1.8/ref/settings/#allowed-hosts
ALLOWED_HOSTS = [
'*',
]
# If your Django site lives behind another server/proxy (like nginx
# or a load balancer), then that proxy may send the `Host` header
# it initially received as the `X-Forwaded-Host` header. If you want
# your Django site to use that header in preference to the `Host` header,
# then set this value to True. It defaults to False.
# WARNING: Do not set this unless you know for sure that your server/proxy
# is setting the `X-Forwarded-Host` header correctly.
# See https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-USE_X_FORWARDED_HOST
# USE_X_FORWARDED_HOST = False
#############################################################################
#
# CLICKJACKING, XSS, Etc.
#
#############################################################################
# Sets the HTTP `X-Frame-Options` header to prevent clickjacking.
# Note that django.middleware.clickjacking.XFrameOptionsMiddleware
# must be added to the MIDDLEWARE_CLASSES list.
# See https://docs.djangoproject.com/en/1.8/ref/clickjacking/.
X_FRAME_OPTIONS = 'DENY'
# Sets the HTTP `X-XSS-Protection: 1; mode=block' header so modern
# browsers who are able to try and catch malicious XSS attacks can do so.
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-browser-xss-filter
SECURE_BROWSER_XSS_FILTER = True
# Sets the HTTP `X-Content-Type-Options: nosniff` header so that
# browsers don't try to guess the Content-Type of files. If the browser
# did guess, it might guess wrong, and open a malicious file.
# https://docs.djangoproject.com/en/1.8/ref/settings/#secure-content-type-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = True
#############################################################################
#
# SSL
#
#############################################################################
# Should we permanently redirect all HTTP requests to HTTPS?
# It is much more efficient to let nginx do this, rather than Django.
# https://docs.djangoproject.com/en/1.8/ref/middleware/#ssl-redirect
# SECURE_SSL_REDIRECT = True
#############################################################################
#
# Strict Transport Security
#
#############################################################################
# Set the HTTP `Strict-Transport-Security` header for a year.
# Note that django.middleware.security.SecurityMiddleware
# must be added to the MIDDLEWARE_CLASSES list.
# See https://docs.djangoproject.com/en/1.8/ref/middleware/#module-django.middleware.security
SECURE_HSTS_SECONDS = 60 # Inch this up to 31536000 (1 year) when we know it works.
SECURE_HSTS_INCLUDE_SUBDOMAINS = True
#############################################################################
#
# CSRF
#
#############################################################################
# When an incoming request is rejected by the CSRF protection,
# which view should display the 403 page?
# See https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-CSRF_FAILURE_VIEW
# CSRF_FAILURE_VIEW = 'apps.pages.errors.csrf_failure'
# Restrict CSRF cookies to be HTTP only, and only send them over HTTPS.
# See https://docs.djangoproject.com/en/1.8/ref/csrf/#settings
CSRF_COOKIE_HTTPONLY = True
CSRF_COOKIE_SECURE = True
if ENV_VARS['ENV_SECURE_COOKIES'] is False:
CSRF_COOKIE_SECURE = False
# TO DO: Set this to something.
CSRF_COOKIE_DOMAIN = None
#############################################################################
#
# SESSION
#
#############################################################################
# Restrict session cookies to be read by HTTP only (not javascript),
# and only send them over HTTPS.
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SESSION_COOKIE_HTTPONLY
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-SESSION_COOKIE_SECURE
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
if ENV_VARS['ENV_SECURE_COOKIES'] is False:
SESSION_COOKIE_SECURE = False
#############################################################################
#
# WSGI
#
#############################################################################
# WSGI stands for `Web Server Gateway Interface`. It specifies how a web
# server (like Apache, Gunicorn, etc) should interact with a Python
# application. In effect, this is how the web server talks to the Django
# project. So, which module should the webserver talk to?
# See https://docs.djangoproject.com/en/1.8/ref/settings/#wsgi-application
WSGI_APPLICATION = 'project.wsgi.application'
#############################################################################
#
# URLS
#
#############################################################################
# Where are all the URLs/routes?
# See https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-ROOT_URLCONF
ROOT_URLCONF = 'project.urls'
#############################################################################
#
# APPS
#
#############################################################################
# Which apps should this project use?
# See https://docs.djangoproject.com/en/1.8/ref/settings/#installed-apps
INSTALLED_APPS = (
# Default django apps.
'django.contrib.staticfiles',
# Adds lots of commands to manage.py:
'django_extensions',
'apps.health',
)
#############################################################################
#
# MIDDLEWARE
#
#############################################################################
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
#############################################################################
#
# TEMPLATES
#
#############################################################################
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
'apps/base/templates/base',
'apps',
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'string_if_invalid': 'INVALID EXPRESSION: %s',
},
},
]
#############################################################################
#
# INTERNATIONALIZATION
#
#############################################################################
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
#############################################################################
#
# ASSETS/STATIC FILES
#
#############################################################################
# For more information about Django's static files management, see
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# What URL should we prepend to assets? If you have a CDN,
# you should set that here.
# https://docs.djangoproject.com/en/1.8/ref/settings/#static-url
if ENV_VARS['ENV_CDN']:
STATIC_URL = ENV_VARS['ENV_CDN']
else:
STATIC_URL = '/static/'
# Where do we want to put static files? When we run the command
# `manage.py collectstatic`, Django will collect all the static files
# it can find and put them here. They can then be served from this
# location, or uploaded to a CDN, or whathave you.
# https://docs.djangoproject.com/en/1.8/ref/settings/#std:setting-STATIC_ROOT
STATIC_ROOT = 'app/dist'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "apps/webroot/static"),
)
# How should the `collectstatic` command gather the files together?
# To just copy them over to the STATIC_ROOT, use StaticFilesStorage.
# https://docs.djangoproject.com/en/1.8/ref/contrib/staticfiles/#staticfilesstorage
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.StaticFilesStorage'
#############################################################################
#
# LOGGING
#
#############################################################################
# 12factor methodology is just to stream logs to STDOUT, and let deployment
# runtime environments collect the stream for post-processing.
# That way the application itself doesn't know or care about where the logs
# go/end up. The deployment architecture handles that instead.
LOG_LEVEL = 'INFO'
if ENV_VARS['ENV_LOG_LEVEL']:
LOG_LEVEL = ENV_VARS['ENV_LOG_LEVEL']
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
# What kinds of formatters should we use to format each log line?
'formatters': {
# Verbose log formatting:
# "[03/Nov/2015 18:23:03] INFO [simple:simple.py:73] Its healthy!"
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(module)s:%(filename)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
# simple and terse verbosity:
# "INFO Its healthy!"
'simple': {
'format': '%(levelname)s %(message)s',
}
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'nara': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'django': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'django.request': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'django.db.backends': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
'django.security': {
'handlers': ['console'],
'level': LOG_LEVEL,
},
},
}
| 33.179431 | 101 | 0.563411 |
547fecc33b6b1b6d3f216782b15983150561ce11 | 3,897 | py | Python | src/api/datalab/api/queryengine.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 84 | 2021-06-30T06:20:23.000Z | 2022-03-22T03:05:49.000Z | src/api/datalab/api/queryengine.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 7 | 2021-06-30T06:21:16.000Z | 2022-03-29T07:36:13.000Z | src/api/datalab/api/queryengine.py | Chromico/bk-base | be822d9bbee544a958bed4831348185a75604791 | [
"MIT"
] | 40 | 2021-06-30T06:21:26.000Z | 2022-03-29T12:42:26.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.api.base import DataAPI, DataDRFAPISet
from common.api.modules.utils import add_app_info_before_request
from datalab.pizza_settings import QUERYENGINE_API_ROOT
class _QueryEngineApi(object):
def __init__(self):
self.query_async = DataAPI(
url=QUERYENGINE_API_ROOT + "/query_async/",
method="POST",
module="queryengine",
description="获取query_id",
before_request=add_app_info_before_request,
)
self.query_sync = DataAPI(
url=QUERYENGINE_API_ROOT + "/query_sync/",
method="POST",
module="queryengine",
description="查询数据",
before_request=add_app_info_before_request,
)
self.get_result = DataDRFAPISet(
url=QUERYENGINE_API_ROOT + "/query_async/result/",
primary_key="query_id",
module="queryengine",
description="获取结果集",
custom_headers={"Content-Type": "application/json"},
)
self.get_stage = DataDRFAPISet(
url=QUERYENGINE_API_ROOT + "/query_async/stage/",
primary_key="query_id",
module="queryengine",
description="获取SQL作业轨迹",
custom_headers={"Content-Type": "application/json"},
)
self.get_info_list = DataAPI(
url=QUERYENGINE_API_ROOT + "/query_async/info_list/",
method="POST",
module="queryengine",
description="获取info列表",
custom_headers={"Content-Type": "application/json"},
)
self.get_schema = DataDRFAPISet(
url=QUERYENGINE_API_ROOT + "/dataset/",
primary_key="query_id",
module="queryengine",
description="获取表结构信息",
custom_headers={"Content-Type": "application/json"},
)
self.get_sqltype_and_result_tables = DataAPI(
url=QUERYENGINE_API_ROOT + "/sqlparse/sqltype_and_result_tables/",
method="POST",
module="queryengine",
description="获取结果表名",
before_request=add_app_info_before_request,
)
self.delete_job_conf = DataAPI(
url=QUERYENGINE_API_ROOT + "/batch/custom_jobs/{result_table_id}",
method="DELETE",
module="queryengine",
description="清除job配置",
url_keys=["result_table_id"],
custom_headers={"Content-Type": "application/json"},
)
QueryEngineApi = _QueryEngineApi()
| 38.97 | 111 | 0.649731 |
299d32633c4cbe8a9b5e95661fb5f19e488951fa | 25 | py | Python | tests/sample_ext/default_settings.py | pyapp-org/pyapp | 46c9e8e3dbf1a872628fad8224b99b458972c92c | [
"BSD-3-Clause"
] | 5 | 2020-01-09T14:46:33.000Z | 2021-04-23T15:25:11.000Z | tests/sample_ext/default_settings.py | pyapp-org/pyapp | 46c9e8e3dbf1a872628fad8224b99b458972c92c | [
"BSD-3-Clause"
] | 111 | 2019-06-30T05:45:57.000Z | 2022-03-28T11:15:29.000Z | tests/sample_ext/default_settings.py | pyapp-org/pyapp | 46c9e8e3dbf1a872628fad8224b99b458972c92c | [
"BSD-3-Clause"
] | 2 | 2019-05-29T09:01:10.000Z | 2021-04-23T15:25:32.000Z | SAMPLE_EXT_VALUE = "123"
| 12.5 | 24 | 0.76 |
f7b197a518643842d062e79abf6a9ff8c1d2a9ed | 44,391 | py | Python | pandas/tests/frame/test_dtypes.py | AakankshaAshok/pandas | 6498bc1e8a12003640139db4794bd5cd2462c116 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/frame/test_dtypes.py | AakankshaAshok/pandas | 6498bc1e8a12003640139db4794bd5cd2462c116 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/frame/test_dtypes.py | AakankshaAshok/pandas | 6498bc1e8a12003640139db4794bd5cd2462c116 | [
"BSD-3-Clause"
] | null | null | null | from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range("2010-01-04", periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(["1.0", "2", "3.14", "4", "5.4"])
df = DataFrame({"a": a, "b": b, "c": c, "d": d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({"b": "str", "d": "float32"})
result = df.astype(dt1)
expected = DataFrame(
{
"a": a,
"b": Series(["0", "1", "2", "3", "4"]),
"c": c,
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64})
result = df.astype(dt2)
expected = DataFrame(
{
"a": a,
"b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"),
"c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"),
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str})
tm.assert_frame_equal(df.astype(dt3), df.astype(str))
tm.assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
msg = "Only a column name can be used for the key in a dtype mappings argument"
with pytest.raises(KeyError, match=msg):
df.astype(dt4)
with pytest.raises(KeyError, match=msg):
df.astype(dt5)
tm.assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name="a")
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b")
a2 = Series([0, 1, 2, 3, 4], name="a")
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a")
b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b")
a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a")
expected = concat([a1_str, b_str, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
result = df.astype({"a": "str"})
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list("abcdef")),
CategoricalDtype(categories=list("edba"), ordered=False),
CategoricalDtype(categories=list("edcb"), ordered=True),
],
ids=repr,
)
def test_astype_categorical(self, dtype):
# GH 18099
d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cls",
[
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype,
],
)
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
with pytest.raises(TypeError, match=xpr):
df["A"].astype(cls)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
expected1 = pd.DataFrame(
{
"a": integer_array([1, 3, 5], dtype=dtype),
"b": integer_array([2, 4, 6], dtype=dtype),
}
)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype("float64"), df)
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
df["b"] = df["b"].astype(dtype)
expected2 = pd.DataFrame(
{"a": [1.0, 3.0, 5.0], "b": integer_array([2, 4, 6], dtype=dtype)}
)
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
expected1 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
df["a"] = df["a"].astype(dtype)
expected2 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["category", "Int64"])
def test_astype_extension_dtypes_duplicate_col(self, dtype):
# GH 24704
a1 = Series([0, np.nan, 4], name="a")
a2 = Series([np.nan, 3, 5], name="a")
df = concat([a1, a2], axis=1)
result = df.astype(dtype)
expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("kwargs", [dict(), dict(other=None)])
def test_df_where_with_category(self, kwargs):
# GH 16979
df = DataFrame(np.arange(2 * 3).reshape(2, 3), columns=list("ABC"))
mask = np.array([[True, False, True], [False, True, True]])
# change type to category
df.A = df.A.astype("category")
df.B = df.B.astype("category")
df.C = df.C.astype("category")
result = df.A.where(mask[:, 0], **kwargs)
expected = Series(pd.Categorical([0, np.nan], categories=[0, 3]), name="A")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [{100: "float64", 200: "uint64"}, "category", "float64"]
)
def test_astype_column_metadata(self, dtype):
# GH 19920
columns = pd.UInt64Index([100, 200, 300], name="foo")
df = DataFrame(np.arange(15).reshape(5, 3), columns=columns)
df = df.astype(dtype)
tm.assert_index_equal(df.columns, columns)
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_from_datetimelike_to_objectt(self, dtype, unit):
# tests astype to object dtype
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(object)
assert (result.dtypes == object).all()
if dtype.startswith("M8"):
assert result.iloc[0, 0] == pd.to_datetime(1, unit=unit)
else:
assert result.iloc[0, 0] == pd.to_timedelta(1, unit=unit)
@pytest.mark.parametrize("arr_dtype", [np.int64, np.float64])
@pytest.mark.parametrize("dtype", ["M8", "m8"])
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetimelike_unit(self, arr_dtype, dtype, unit):
# tests all units from numeric origination
# gh-19223 / gh-12425
dtype = "{}[{}]".format(dtype, unit)
arr = np.array([[1, 2, 3]], dtype=arr_dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_datetime_unit(self, unit):
# tests all units from datetime origination
# gh-19223
dtype = "M8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns"])
def test_astype_to_timedelta_unit_ns(self, unit):
# preserver the timedelta conversion
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(arr.astype(dtype))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["us", "ms", "s", "h", "m", "D"])
def test_astype_to_timedelta_unit(self, unit):
# coerce to float
# gh-19223
dtype = "m8[{}]".format(unit)
arr = np.array([[1, 2, 3]], dtype=dtype)
df = DataFrame(arr)
result = df.astype(dtype)
expected = DataFrame(df.values.astype(dtype).astype(float))
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s", "h", "m", "D"])
def test_astype_to_incorrect_datetimelike(self, unit):
# trying to astype a m to a M, or vice-versa
# gh-19224
dtype = "M8[{}]".format(unit)
other = "m8[{}]".format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=dtype))
msg = (
r"cannot astype a datetimelike from \[datetime64\[ns\]\] to"
r" \[timedelta64\[{}\]\]"
).format(unit)
with pytest.raises(TypeError, match=msg):
df.astype(other)
msg = (
r"cannot astype a timedelta from \[timedelta64\[ns\]\] to"
r" \[datetime64\[{}\]\]"
).format(unit)
df = DataFrame(np.array([[1, 2, 3]], dtype=other))
with pytest.raises(TypeError, match=msg):
df.astype(dtype)
def test_timedeltas(self):
df = DataFrame(
dict(
A=Series(date_range("2012-1-1", periods=3, freq="D")),
B=Series([timedelta(days=i) for i in range(3)]),
)
)
result = df.dtypes
expected = Series(
[np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")], index=list("AB")
)
tm.assert_series_equal(result, expected)
df["C"] = df["A"] + df["B"]
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
],
index=list("ABC"),
)
tm.assert_series_equal(result, expected)
# mixed int types
df["D"] = 1
result = df.dtypes
expected = Series(
[
np.dtype("datetime64[ns]"),
np.dtype("timedelta64[ns]"),
np.dtype("datetime64[ns]"),
np.dtype("int64"),
],
index=list("ABCD"),
)
tm.assert_series_equal(result, expected)
def test_arg_for_errors_in_astype(self):
# issue #14878
df = DataFrame([1, 2, 3])
with pytest.raises(ValueError):
df.astype(np.float64, errors=True)
df.astype(np.int8, errors="ignore")
def test_arg_for_errors_in_astype_dictlist(self):
# GH-25905
df = pd.DataFrame(
[
{"a": "1", "b": "16.5%", "c": "test"},
{"a": "2.2", "b": "15.3", "c": "another_test"},
]
)
expected = pd.DataFrame(
[
{"a": 1.0, "b": "16.5%", "c": "test"},
{"a": 2.2, "b": "15.3", "c": "another_test"},
]
)
type_dict = {"a": "float64", "b": "float64", "c": "object"}
result = df.astype(dtype=type_dict, errors="ignore")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_vals",
[
([1, 2]),
(["1", "2"]),
(list(pd.date_range("1/1/2011", periods=2, freq="H"))),
(list(pd.date_range("1/1/2011", periods=2, freq="H", tz="US/Eastern"))),
([pd.Interval(left=0, right=5)]),
],
)
def test_constructor_list_str(self, input_vals, string_dtype):
# GH 16605
# Ensure that data elements are converted to strings when
# dtype is str, 'str', or 'U'
result = DataFrame({"A": input_vals}, dtype=string_dtype)
expected = DataFrame({"A": input_vals}).astype({"A": string_dtype})
tm.assert_frame_equal(result, expected)
def test_constructor_list_str_na(self, string_dtype):
result = DataFrame({"A": [1.0, 2.0, None]}, dtype=string_dtype)
expected = DataFrame({"A": ["1.0", "2.0", None]}, dtype=object)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"data, expected",
[
# empty
(DataFrame(), True),
# multi-same
(DataFrame({"A": [1, 2], "B": [1, 2]}), True),
# multi-object
(
DataFrame(
{
"A": np.array([1, 2], dtype=object),
"B": np.array(["a", "b"], dtype=object),
}
),
True,
),
# multi-extension
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["a", "b"])}
),
True,
),
# differ types
(DataFrame({"A": [1, 2], "B": [1.0, 2.0]}), False),
# differ sizes
(
DataFrame(
{
"A": np.array([1, 2], dtype=np.int32),
"B": np.array([1, 2], dtype=np.int64),
}
),
False,
),
# multi-extension differ
(
DataFrame(
{"A": pd.Categorical(["a", "b"]), "B": pd.Categorical(["b", "c"])}
),
False,
),
],
)
def test_is_homogeneous_type(self, data, expected):
assert data._is_homogeneous_type is expected
def test_asarray_homogenous(self):
df = pd.DataFrame({"A": pd.Categorical([1, 2]), "B": pd.Categorical([1, 2])})
result = np.asarray(df)
# may change from object in the future
expected = np.array([[1, 1], [2, 2]], dtype="object")
tm.assert_numpy_array_equal(result, expected)
def test_str_to_small_float_conversion_type(self):
# GH 20388
np.random.seed(13)
col_data = [str(np.random.random() * 1e-12) for _ in range(5)]
result = pd.DataFrame(col_data, columns=["A"])
expected = pd.DataFrame(col_data, columns=["A"], dtype=object)
tm.assert_frame_equal(result, expected)
# change the dtype of the elements from object to float one by one
result.loc[result.index, "A"] = [float(x) for x in col_data]
expected = pd.DataFrame(col_data, columns=["A"], dtype=float)
tm.assert_frame_equal(result, expected)
class TestDataFrameDatetimeWithTZ:
def test_interleave(self, timezone_frame):
# interleave with object
result = timezone_frame.assign(D="foo").values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
["foo", "foo", "foo"],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
# interleave with only datetime64[ns]
result = timezone_frame.values
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
tm.assert_numpy_array_equal(result, expected)
def test_astype(self, timezone_frame):
# astype
expected = np.array(
[
[
Timestamp("2013-01-01 00:00:00"),
Timestamp("2013-01-02 00:00:00"),
Timestamp("2013-01-03 00:00:00"),
],
[
Timestamp("2013-01-01 00:00:00-0500", tz="US/Eastern"),
pd.NaT,
Timestamp("2013-01-03 00:00:00-0500", tz="US/Eastern"),
],
[
Timestamp("2013-01-01 00:00:00+0100", tz="CET"),
pd.NaT,
Timestamp("2013-01-03 00:00:00+0100", tz="CET"),
],
],
dtype=object,
).T
expected = DataFrame(
expected,
index=timezone_frame.index,
columns=timezone_frame.columns,
dtype=object,
)
result = timezone_frame.astype(object)
tm.assert_frame_equal(result, expected)
result = timezone_frame.astype("datetime64[ns]")
expected = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": (
date_range("20130101", periods=3, tz="US/Eastern")
.tz_convert("UTC")
.tz_localize(None)
),
"C": (
date_range("20130101", periods=3, tz="CET")
.tz_convert("UTC")
.tz_localize(None)
),
}
)
expected.iloc[1, 1] = pd.NaT
expected.iloc[1, 2] = pd.NaT
tm.assert_frame_equal(result, expected)
def test_astype_str(self, timezone_frame):
# str formatting
result = timezone_frame.astype(str)
expected = DataFrame(
[
[
"2013-01-01",
"2013-01-01 00:00:00-05:00",
"2013-01-01 00:00:00+01:00",
],
["2013-01-02", "NaT", "NaT"],
[
"2013-01-03",
"2013-01-03 00:00:00-05:00",
"2013-01-03 00:00:00+01:00",
],
],
columns=timezone_frame.columns,
)
tm.assert_frame_equal(result, expected)
with option_context("display.max_columns", 20):
result = str(timezone_frame)
assert (
"0 2013-01-01 2013-01-01 00:00:00-05:00 2013-01-01 00:00:00+01:00"
) in result
assert (
"1 2013-01-02 NaT NaT"
) in result
assert (
"2 2013-01-03 2013-01-03 00:00:00-05:00 2013-01-03 00:00:00+01:00"
) in result
| 36.090244 | 88 | 0.516816 |
c259000b3f89c20345cd97d13c6a3eb2bd222fa3 | 4,723 | py | Python | absql/__init__.py | chriscardillo/ABSQL | d911a15daee46a2f4e867f09e85c582fa987e5ed | [
"MIT"
] | null | null | null | absql/__init__.py | chriscardillo/ABSQL | d911a15daee46a2f4e867f09e85c582fa987e5ed | [
"MIT"
] | null | null | null | absql/__init__.py | chriscardillo/ABSQL | d911a15daee46a2f4e867f09e85c582fa987e5ed | [
"MIT"
] | null | null | null | from inspect import cleandoc
from absql.files import parse, accepted_file_types
from absql.files.loader import generate_loader
from jinja2 import Template, DebugUndefined
from absql.functions import default_functions
from absql.text import (
clean_spacing,
create_replacements,
flatten_inputs,
pretty_encode_sql,
)
from absql.utils import nested_apply, get_function_arg_names, partialize_engine_func
class Runner:
def __init__(
self,
extra_constructors=[],
replace_only=False,
file_context_from=None,
**extra_context,
):
self.extra_context = dict(extra_context)
self.loader = generate_loader(extra_constructors)
self.replace_only = replace_only
self.file_context_from = file_context_from
@staticmethod
def render_text(text, replace_only=False, pretty_encode=False, **vars):
"""
Given some text, render the template with the vars.
If a templated variable is unknown, leave it alone.
"""
# Allows an instantiated SQLAlchemy engine to be utilized
# in any function with a engine argument, without the user needing
# to specify the engine in the function call.
engine = vars.get("engine", None)
for k, v in vars.items():
if v.__class__.__name__ == "function":
if "engine" in get_function_arg_names(v):
vars[k] = partialize_engine_func(v, engine=engine)
if replace_only:
text = clean_spacing(text)
flat_vars = flatten_inputs(**vars)
replacements = create_replacements(**flat_vars)
for k, v in replacements.items():
text = text.replace(k, str(v))
text = cleandoc(text)
else:
template = Template(text, undefined=DebugUndefined)
text = cleandoc(template.render(**vars))
if pretty_encode:
return pretty_encode_sql(text)
else:
return text
@staticmethod
def render_context(extra_context=None, file_contents=None):
"""
Render context dictionaries passed through a function call or
file frontmatter (file_contents), with file_contents taking
precedence over other all other provided context.
"""
rendered_context = default_functions.copy()
if extra_context:
rendered_context.update(**extra_context)
if file_contents:
rendered_context.update(**file_contents)
rendered_context = nested_apply(
rendered_context,
lambda x: Runner.render_text(x, **rendered_context),
)
return rendered_context
@staticmethod
def render_file(
file_path,
loader=None,
replace_only=False,
extra_constructors=[],
file_context_from=None,
pretty_encode=False,
**extra_context,
):
"""
Given a file path, render SQL with a combination of
the vars in the file and any extras passed to extra_context.
"""
if loader is None:
loader = generate_loader(extra_constructors)
file_contents = parse(file_path, loader=loader)
sql = file_contents["sql"]
file_contents.pop("sql")
if file_context_from:
file_contents.update(file_contents.get(file_context_from, {}))
file_contents.pop(file_context_from, {})
rendered_context = Runner.render_context(extra_context, file_contents)
rendered = Runner.render_text(
text=sql,
replace_only=replace_only,
pretty_encode=pretty_encode,
**rendered_context,
)
return rendered
def render(self, text, pretty_encode=False):
"""
Given text or a file path, render SQL with the a combination of
the vars in the file and any extras passed to extra_context during
the instantiation of the runner.
"""
if text.endswith(accepted_file_types):
rendered = self.render_file(
file_path=text,
loader=self.loader,
replace_only=self.replace_only,
file_context_from=self.file_context_from,
pretty_encode=pretty_encode,
**self.extra_context,
)
else:
rendered = self.render_text(
text=text,
replace_only=self.replace_only,
pretty_encode=pretty_encode,
**self.render_context(self.extra_context),
)
return rendered
def set_context(self, **context):
self.extra_context.update(context)
| 33.735714 | 84 | 0.623333 |
d38413e2e1059b9b04eb4d96927356d3f0e4f82d | 6,408 | py | Python | pirates/effects/HellFire.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/effects/HellFire.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/effects/HellFire.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.effects.HellFire
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.particles import ParticleEffect
from direct.particles import Particles
from direct.particles import ForceGroup
from PooledEffect import PooledEffect
from EffectController import EffectController
import random
class HellFire(PooledEffect, EffectController):
__module__ = __name__
cardScale = 128.0
def __init__(self):
PooledEffect.__init__(self)
EffectController.__init__(self)
model = loader.loadModel('models/effects/particleMaps')
self.card = model.find('**/effectSoftGlow')
self.card2 = model.find('**/particleFire')
self.speed = 20.0
self.setDepthWrite(0)
self.setLightOff()
self.f = ParticleEffect.ParticleEffect('HellFire')
self.f.reparentTo(self)
self.p0 = Particles.Particles('particles-1')
self.p0.setFactory('PointParticleFactory')
self.p0.setRenderer('SpriteParticleRenderer')
self.p0.setEmitter('RingEmitter')
self.p1 = Particles.Particles('particles-2')
self.p1.setFactory('ZSpinParticleFactory')
self.p1.setRenderer('SpriteParticleRenderer')
self.p1.setEmitter('SphereSurfaceEmitter')
self.f.addParticles(self.p0)
self.f.addParticles(self.p1)
self.p0.setPoolSize(128)
self.p0.setBirthRate(0.03)
self.p0.setLitterSize(16)
self.p0.setLitterSpread(0)
self.p0.setSystemLifespan(0.0)
self.p0.setLocalVelocityFlag(1)
self.p0.setSystemGrowsOlderFlag(0)
self.p0.factory.setLifespanBase(1.5)
self.p0.factory.setLifespanSpread(0.5)
self.p0.factory.setMassBase(1.0)
self.p0.factory.setMassSpread(0.0)
self.p0.factory.setTerminalVelocityBase(400.0)
self.p0.factory.setTerminalVelocitySpread(0.0)
self.p0.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAINOUT)
self.p0.renderer.setUserAlpha(1.0)
self.p0.renderer.setFromNode(self.card)
self.p0.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p0.renderer.setXScaleFlag(1)
self.p0.renderer.setYScaleFlag(1)
self.p0.renderer.setAnimAngleFlag(0)
self.p0.renderer.setInitialXScale(0.015 * self.cardScale)
self.p0.renderer.setFinalXScale(0.03 * self.cardScale)
self.p0.renderer.setInitialYScale(0.015 * self.cardScale)
self.p0.renderer.setFinalYScale(0.03 * self.cardScale)
self.p0.renderer.setNonanimatedTheta(0.0)
self.p0.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPBLENDLINEAR)
self.p0.renderer.setAlphaDisable(0)
self.p0.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OIncomingColor, ColorBlendAttrib.OOneMinusIncomingAlpha)
self.p0.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(1.0, 0.35, 0.0, 1.0), Vec4(0.0, 0.0, 0.0, 0.0), 1)
self.p0.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p0.emitter.setAmplitude(10.0)
self.p0.emitter.setAmplitudeSpread(0.0)
self.p0.emitter.setOffsetForce(Vec3(0.0, 0.0, -4.0))
self.p0.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p0.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p0.emitter.setRadius(0.5)
self.p1.setPoolSize(128)
self.p1.setBirthRate(0.2)
self.p1.setLitterSize(1)
self.p1.setLitterSpread(0)
self.p1.setSystemLifespan(0.0)
self.p1.setLocalVelocityFlag(1)
self.p1.setSystemGrowsOlderFlag(0)
self.p1.factory.setLifespanBase(1.2)
self.p1.factory.setLifespanSpread(0.5)
self.p1.factory.setMassBase(1.0)
self.p1.factory.setMassSpread(0.2)
self.p1.factory.setTerminalVelocityBase(400.0)
self.p1.factory.setTerminalVelocitySpread(0.0)
self.p1.factory.setInitialAngle(0.0)
self.p1.factory.setInitialAngleSpread(20.0)
self.p1.factory.enableAngularVelocity(1)
self.p1.factory.setAngularVelocity(0.0)
self.p1.factory.setAngularVelocitySpread(0.0)
self.p1.renderer.setAlphaMode(BaseParticleRenderer.PRALPHAOUT)
self.p1.renderer.setUserAlpha(1.0)
self.p1.renderer.setFromNode(self.card2)
self.p1.renderer.setColor(Vec4(1.0, 1.0, 1.0, 1.0))
self.p1.renderer.setXScaleFlag(1)
self.p1.renderer.setYScaleFlag(1)
self.p1.renderer.setAnimAngleFlag(1)
self.p1.renderer.setInitialXScale(0.02 * self.cardScale)
self.p1.renderer.setInitialYScale(0.02 * self.cardScale)
self.p1.renderer.setFinalXScale(0.001 * self.cardScale)
self.p1.renderer.setFinalYScale(0.001 * self.cardScale)
self.p1.renderer.setNonanimatedTheta(0.0)
self.p1.renderer.setAlphaBlendMethod(BaseParticleRenderer.PPNOBLEND)
self.p1.renderer.setAlphaDisable(0)
self.p1.renderer.setColorBlendMode(ColorBlendAttrib.MAdd, ColorBlendAttrib.OOneMinusFbufferAlpha, ColorBlendAttrib.OOneMinusIncomingAlpha)
self.p1.renderer.getColorInterpolationManager().addLinear(0.0, 1.0, Vec4(1.0, 1.0, 1.0, 1.0), Vec4(0, 0, 0, 1.0), 1)
self.p1.emitter.setEmissionType(BaseParticleEmitter.ETRADIATE)
self.p1.emitter.setAmplitude(1.5)
self.p1.emitter.setAmplitudeSpread(0.5)
self.p1.emitter.setOffsetForce(Vec3(0.0, 0.0, 10.0))
self.p1.emitter.setExplicitLaunchVector(Vec3(1.0, 0.0, 0.0))
self.p1.emitter.setRadiateOrigin(Point3(0.0, 0.0, 0.0))
self.p1.emitter.setRadius(0.5)
def createTrack(self):
self.startEffect = Sequence(Func(self.p0.setBirthRate, 0.01), Func(self.p0.clearToInitial), Func(self.p1.setBirthRate, 0.01), Func(self.p1.clearToInitial), Func(self.f.start, self, self))
self.endEffect = Sequence(Func(self.p0.setBirthRate, 100), Func(self.p1.setBirthRate, 100), Wait(5.0), Func(self.cleanUpEffect))
self.track = Sequence(self.startEffect, Wait(0.5), self.endEffect)
def cleanUpEffect(self):
EffectController.cleanUpEffect(self)
self.checkInEffect(self)
def destroy(self):
EffectController.destroy(self)
PooledEffect.destroy(self) | 50.0625 | 195 | 0.697253 |
7c5ba6d4dbeddae2a70b083315ef63989c99b818 | 2,121 | py | Python | logprog_ctl.py | gbhering/log-prog-2019 | f9605aa44b7a46789aac0fac7a94f4fb52d36bf8 | [
"Unlicense"
] | null | null | null | logprog_ctl.py | gbhering/log-prog-2019 | f9605aa44b7a46789aac0fac7a94f4fb52d36bf8 | [
"Unlicense"
] | 3 | 2019-03-19T17:16:43.000Z | 2019-03-21T20:48:32.000Z | logprog_ctl.py | gbhering/log-prog-2019 | f9605aa44b7a46789aac0fac7a94f4fb52d36bf8 | [
"Unlicense"
] | null | null | null | from collections import defaultdict, deque
from logprog_scc import find_sccs
class Kripke:
""" K = < S(tates), R(elationships), L(abels) > """
def __init__(self, S=set(), R=set(), L=dict()):
self.S = set(S)
self.R = set(R)
self.L = defaultdict(set, L)
def checkNOT(phi):
phi = ' '.join(phi)
for s in self.S:
if phi not in L[s]: L[s].add('NOT '+phi)
def checkIMP(phi1, phi2):
phi1, phi2 = ' '.join(phi1), ' '.join(phi2)
for s in self.S:
if phi1 not in L[s] or phi2 in L[s]: L[s].add(phi1+' IMP '+phi2)
def checkEX(phi):
phi = ' '.join(phi)
for s in [ s for s in self.S if phi in self.L[s] ]:
for t in [ t for t in self.S if (t,s) in self.R ]:
self.L[t].add('EX '+phi)
def checkEU(phi1, phi2):
phi1, phi2 = ' '.join(phi1), ' '.join(phi2)
T = [ s for s in self.S if phi2 in self.L[s] ]
for s in T: self.L[s].add( phi1+' EU '+phi2 )
while len(T) > 0:
s = T.pop()
for t in [ t for t in self.S if (t,s) in self.R ]:
if phi1+' EU '+phi2 not in self.L[t] and phi1 in self.L[t]:
self.L[t].add( phi1+' EU '+phi2 )
T.append(t)
def checkEG(phi):
phi = ' '.join(phi)
Sphi = { s for s in self.S if phi in self.L[s] }
SCC = [ C for C in self.find_sccs(self.S, self.R) if len(C) > 1 and not Sphi.isdisjoint(C) ]
T = [s for component in SCC for s in component]
for s in T: self.L[s].add('EG '+phi)
while len(T) > 0:
s = T.pop()
for t in [ t for t in Sphi if (t,s) in self.R ]:
if 'EG '+phi not in self.L[t]:
self.L[t].add('EG '+phi)
T.append(t)
def check(phi):
""" phi is expected to be a list of tokens """
if len(phi) == 1: return
if 'NOT' == phi[0]:
self.check( phi[1:] )
self.checkNOT( phi[1:] )
elif 'EG' == phi[0]:
self.check( phi[1:] )
self.checkEG( phi[1:] )
elif 'IMP' in phi:
i = phi.index('IMP')
self.check( phi[:i] )
self.check( phi[i+1:] )
self.checkIMP( phi[:i], phi[i+1:] )
elif 'EX' == phi[0]:
self.check( phi[1:] )
self.checkEX( phi[1:] )
elif 'EU' in phi:
i = phi.index('EU')
self.check( phi[:i] )
self.check( phi[i+1:] )
self.checkEU( phi[:i], phi[i+1:] )
| 28.662162 | 94 | 0.558227 |
a35a551d751a52052da347d168aa6cdf0256e8d3 | 4,889 | py | Python | step3/src/server/shakesapp_pb2.py | rick-c-goog/opentelemetry-trace-codelab-python | 0ef58f2dd35363bc27c604b8ca861cd15ca108a9 | [
"Apache-2.0"
] | 3 | 2021-06-24T23:05:19.000Z | 2022-02-24T16:02:29.000Z | step3/src/server/shakesapp_pb2.py | rick-c-goog/opentelemetry-trace-codelab-python | 0ef58f2dd35363bc27c604b8ca861cd15ca108a9 | [
"Apache-2.0"
] | null | null | null | step3/src/server/shakesapp_pb2.py | rick-c-goog/opentelemetry-trace-codelab-python | 0ef58f2dd35363bc27c604b8ca861cd15ca108a9 | [
"Apache-2.0"
] | 4 | 2021-07-21T15:03:52.000Z | 2021-12-25T17:32:05.000Z | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: shakesapp.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='shakesapp.proto',
package='shakesapp',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0fshakesapp.proto\x12\tshakesapp\"*\n\x13ShakespeareResponse\x12\x13\n\x0bmatch_count\x18\x01 \x01(\x03\"#\n\x12ShakespeareRequest\x12\r\n\x05query\x18\x01 \x01(\t2f\n\x12ShakespeareService\x12P\n\rGetMatchCount\x12\x1d.shakesapp.ShakespeareRequest\x1a\x1e.shakesapp.ShakespeareResponse\"\x00\x62\x06proto3'
)
_SHAKESPEARERESPONSE = _descriptor.Descriptor(
name='ShakespeareResponse',
full_name='shakesapp.ShakespeareResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='match_count', full_name='shakesapp.ShakespeareResponse.match_count', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=30,
serialized_end=72,
)
_SHAKESPEAREREQUEST = _descriptor.Descriptor(
name='ShakespeareRequest',
full_name='shakesapp.ShakespeareRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='query', full_name='shakesapp.ShakespeareRequest.query', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=74,
serialized_end=109,
)
DESCRIPTOR.message_types_by_name['ShakespeareResponse'] = _SHAKESPEARERESPONSE
DESCRIPTOR.message_types_by_name['ShakespeareRequest'] = _SHAKESPEAREREQUEST
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ShakespeareResponse = _reflection.GeneratedProtocolMessageType('ShakespeareResponse', (_message.Message,), {
'DESCRIPTOR' : _SHAKESPEARERESPONSE,
'__module__' : 'shakesapp_pb2'
# @@protoc_insertion_point(class_scope:shakesapp.ShakespeareResponse)
})
_sym_db.RegisterMessage(ShakespeareResponse)
ShakespeareRequest = _reflection.GeneratedProtocolMessageType('ShakespeareRequest', (_message.Message,), {
'DESCRIPTOR' : _SHAKESPEAREREQUEST,
'__module__' : 'shakesapp_pb2'
# @@protoc_insertion_point(class_scope:shakesapp.ShakespeareRequest)
})
_sym_db.RegisterMessage(ShakespeareRequest)
_SHAKESPEARESERVICE = _descriptor.ServiceDescriptor(
name='ShakespeareService',
full_name='shakesapp.ShakespeareService',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=111,
serialized_end=213,
methods=[
_descriptor.MethodDescriptor(
name='GetMatchCount',
full_name='shakesapp.ShakespeareService.GetMatchCount',
index=0,
containing_service=None,
input_type=_SHAKESPEAREREQUEST,
output_type=_SHAKESPEARERESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_SHAKESPEARESERVICE)
DESCRIPTOR.services_by_name['ShakespeareService'] = _SHAKESPEARESERVICE
# @@protoc_insertion_point(module_scope)
| 32.164474 | 329 | 0.776028 |
7a7585083380526fe4616adc5fd4c9fafb14b529 | 1,237 | py | Python | r3/app/utils.py | timgates42/r3 | cc6b4eb55c7ae30a8f75af2be165504565dbeb79 | [
"Unlicense",
"MIT"
] | 49 | 2015-01-06T19:10:41.000Z | 2021-08-01T03:39:39.000Z | r3/app/utils.py | timgates42/r3 | cc6b4eb55c7ae30a8f75af2be165504565dbeb79 | [
"Unlicense",
"MIT"
] | 3 | 2015-03-30T12:37:00.000Z | 2021-06-09T20:31:54.000Z | r3/app/utils.py | timgates42/r3 | cc6b4eb55c7ae30a8f75af2be165504565dbeb79 | [
"Unlicense",
"MIT"
] | 12 | 2015-02-27T13:51:09.000Z | 2021-06-09T20:30:59.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
import logging
from datetime import datetime
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
TIMEOUT = 15
def real_import(name):
if '.' in name:
return reduce(getattr, name.split('.')[1:], __import__(name))
return __import__(name)
logger = logging.getLogger('R3ServiceApp')
def flush_dead_mappers(redis, mappers_key, ping_key):
mappers = redis.smembers(mappers_key)
for mapper in mappers:
last_ping = redis.get(ping_key % mapper)
if last_ping:
now = datetime.now()
last_ping = datetime.strptime(last_ping, DATETIME_FORMAT)
if ((now - last_ping).seconds > TIMEOUT):
logging.warning('MAPPER %s found to be inactive after %d seconds of not pinging back' % (mapper, TIMEOUT))
redis.srem(mappers_key, mapper)
redis.delete(ping_key % mapper)
def kls_import(fullname):
if not '.' in fullname:
return __import__(fullname)
name_parts = fullname.split('.')
klass_name = name_parts[-1]
module_parts = name_parts[:-1]
module = reduce(getattr, module_parts[1:], __import__('.'.join(module_parts)))
klass = getattr(module, klass_name)
return klass
| 29.452381 | 122 | 0.643492 |
ed13148af7d21352997b0a2c8b8dedbd5edd4fb6 | 10,742 | py | Python | svg_ultralight/query.py | ShayHill/svg_ultralight | 27e42350bd5304dabb0dd65e991e9cd3f8f1a3ae | [
"MIT"
] | 1 | 2019-10-26T01:49:58.000Z | 2019-10-26T01:49:58.000Z | svg_ultralight/query.py | ShayHill/svg_writer | 27e42350bd5304dabb0dd65e991e9cd3f8f1a3ae | [
"MIT"
] | 1 | 2020-01-29T13:15:58.000Z | 2020-02-03T14:30:13.000Z | svg_ultralight/query.py | ShayHill/svg_ultralight | 27e42350bd5304dabb0dd65e991e9cd3f8f1a3ae | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# _*_ coding: utf-8 _*_
""" Query an SVG file for bounding boxes
:author: Shay Hill
:created: 7/25/2020
Bounding boxes are generated with a command-line call to Inkscape, so an Inkscape
installation is required for this to work. The bounding boxes are returned as
BoundingBox instances, which are a big help with aligning objects (e.g., text on a
business card). Getting bounding boxes from Inkscape is not exceptionally fast.
"""
from __future__ import annotations
import os
import re
from dataclasses import dataclass
from subprocess import PIPE, Popen
from tempfile import NamedTemporaryFile
from typing import Dict
from lxml import etree # type: ignore
from svg_ultralight import write_svg
from .constructors import deepcopy_element
from .strings import format_number
from .svg_ultralight import new_svg_root
@dataclass
class BoundingBox:
"""
Mutable bounding box object for svg_ultralight.
Functions that return a bounding box will return a BoundingBox instance. This
instance can be transformed (uniform scale and translate only). Transformations
will be combined and scored to be passed to new_element as a transform value.
Define the bbox with x=, y=, width=, height=
Transform the BoundingBox by setting these variables. Each time you set x, x2, y,
y2, width, or height, private transformation values (_scale, _transform_x,
and _transform_y) will be updated.
The ultimate transformation can be accessed through ``.transformation_string``.
So the workflow will look like :
1. Get the bounding box of an svg element
2. Update the bounding box x, y, width, and height
3. Transform the original svg element with
update_element(elem, transform=bbox.transform_string)
4. The transformed element will lie in the transformed BoundingBox
In addition to x, y, width, and height, x2 and y2 can be set to establish the
right x value or bottom y value.
The point of all of this is to simplify stacking and aligning elements. To stack:
```
elem_a = new_element(*args)
bbox_a = get_bounding_box(elem_a)
elem_b = new_element(*args)
bbox_b = get_bounding_box(elem_b)
# align at same x
bbox_b.x = bbox_a.x
# make the same width
bbox_b.width = bbox_a.width
# stack a on top of b
bbox_a.y2 = bbox_b.y
update_element(elem_a, transform=bbox_a.transform_string)
update_element(elem_b, transform=bbox_b.transform_string)
"""
def __init__(self, x: float, y: float, width: float, height: float) -> None:
"""
Pass input values to private members. Initialize the transformation variables
These private members will store the untransformed bbox position and size.
"""
self._x = x
self._y = y
self._width = width
self._height = height
# transformation values
self._scale: float = 1
self._translation_x: float = 0
self._translation_y: float = 0
@property
def scale(self) -> float:
"""
Read-only scale.
self.scale is publicly visible, because it's convenient to fit a (usually
text) element somewhere then scale other elements to the same size--even
though element width and height may be different. This is a read-only
attribute, because writing it would cause too many errors of intuition (would
the scaled element stay anchored to x and y?).
To match the scale of two elements:
``elem_b.width = elem_b.width * elem_a.scale / elem_b.scale``
This is consistent with setting width any other way: the element will still
be anchored at self.x and self.y.
"""
return self._scale
@property
def x(self) -> float:
"""
x left value of bounding box
"""
return (self._translation_x + self._x) * self._scale
@x.setter
def x(self, x) -> None:
"""
Update transform values (do not alter self._x)
"""
self._add_transform(1, x - self.x, 0)
@property
def y(self) -> float:
"""
y top value of bounding box
"""
return (self._translation_y + self._y) * self._scale
@y.setter
def y(self, y) -> None:
"""
Update transform values (do not alter self._y)
"""
self._add_transform(1, 0, y - self.y)
@property
def x2(self) -> float:
"""
x right value of bounding box
"""
return self.x + self.width
@x2.setter
def x2(self, x2) -> None:
"""
Update transform values (do not alter self._x)
"""
self.x = x2 - self.width
@property
def y2(self) -> float:
"""
y bottom value of bounding box
"""
return self.y + self.height
@y2.setter
def y2(self, y2) -> None:
"""
Update transform values (do not alter self._y)
"""
self.y = y2 - self.height
@property
def width(self) -> float:
"""
Width of transformed bounding box
"""
return self._width * self._scale
@width.setter
def width(self, width: float) -> None:
"""
Update transform values, Do not alter self._width.
Here transformed x and y value will be preserved. That is, the bounding box
is scaled, but still anchored at (transformed) self.x and self.y
"""
current_x = self.x
current_y = self.y
self._scale *= width / self.width
self.x = current_x
self.y = current_y
@property
def height(self) -> float:
"""
Height of transformed bounding box
"""
return self._height * self._scale
@height.setter
def height(self, height: float) -> None:
"""
Update transform values, Do not alter self._height.
Here transformed x and y value will be preserved. That is, the bounding box
is scaled, but still anchored at (transformed) self.x and self.y
"""
self.width = height * self.width / self.height
def _asdict(self):
"""
For passing transformed bounding box values into a rect element or another bbox
I would make this a public (no underscore) property (no parenthesis). Keeping
it this way to mirror the ``_asdict`` method of namedtuple.
"""
return {x: getattr(self, x) for x in ("x", "y", "width", "height")}
def _add_transform(self, scale: float, translation_x: float, translation_y: float):
"""
Transform the bounding box by updating the transformation attributes
Transformation attributes are _translation_x, _translation_y, and _scale
"""
self._translation_x += translation_x / self._scale
self._translation_y += translation_y / self._scale
self._scale *= scale
@property
def transform_string(self) -> str:
"""
Transformation property string value for svg element.
:return: string value for an svg transform attribute.
Use with
``update_element(elem, transform=bbox.transform_string)``
"""
transformation_values = (
format_number(x)
for x in (self._scale, self._translation_x, self._translation_y)
)
return "scale({}) translate({} {})".format(*transformation_values)
def merge(self, *others) -> BoundingBox:
"""
Create a bounding box around all other bounding boxes.
:param others: one or more bounding boxes to merge with self
:return: a bounding box around self and other bounding boxes
"""
bboxes = (self,) + others
min_x = min(x.x for x in bboxes)
max_x = max(x.x + x.width for x in bboxes)
min_y = min(x.y for x in bboxes)
max_y = max(x.y + x.height for x in bboxes)
return BoundingBox(min_x, min_y, max_x - min_x, max_y - min_y)
def map_ids_to_bounding_boxes(
inkscape: str,
xml: etree.Element,
) -> Dict[str, BoundingBox]:
# noinspection SpellCheckingInspection
"""
Query an svg file for bounding-box dimensions
:param inkscape: path to an inkscape executable on your local file system
IMPORTANT: path cannot end with ``.exe``.
Use something like ``"C:\\Program Files\\Inkscape\\inkscape"``
PROVIDE ONE OF:
:param xml: xml element (written to a temporary file then queried)
:return: svg element ids (and a bounding box for the entire svg file as ``svg``)
mapped to (x, y, width, height)
Bounding boxes are relative to svg viewbox. If viewbox x == -10,
all bounding-box x values will be offset -10.
The ``inkscape --query-all svg`` call will return a tuple:
(b'svg1,x,y,width,height\\r\\elem1,x,y,width,height\\r\\n', None)
where x, y, width, and height are strings of numbers.
This calls the command and formats the output into a dictionary.
dpu_ arguments to new_svg_root transform the bounding boxes in non-useful ways.
This copies all elements except the root element in to a (0, 0, 1, 1) root. This
will put the boxes where you'd expect them to be, no matter what root you use.
"""
xml_prime = new_svg_root(0, 0, 1, 1)
xml_prime.extend((deepcopy_element(x) for x in xml))
with NamedTemporaryFile(mode="wb", delete=False, suffix=".svg") as svg_file:
svg = write_svg(svg_file, xml_prime)
bb_process = Popen(f'"{inkscape}" --query-all {svg}', stdout=PIPE)
bb_data = str(bb_process.communicate()[0])[2:-1]
bb_strings = re.split(r"[\\r]*\\n", bb_data)[:-1]
os.unlink(svg_file.name)
id2bbox = {}
for id_, *bounds in (x.split(",") for x in bb_strings):
id2bbox[id_] = BoundingBox(*(float(x) for x in bounds))
return id2bbox
def get_bounding_box(inkscape: str, elem: etree.Element) -> BoundingBox:
"""
Get bounding box around a single element.
:param inkscape: path to an inkscape executable on your local file system
IMPORTANT: path cannot end with ``.exe``.
Use something like ``"C:\\Program Files\\Inkscape\\inkscape"``
:param elem: xml element
:return: a BoundingBox instance around elem.
This will work most of the time, but if you're missing an nsmap, you'll need to
create an entire xml file with a custom nsmap (using
`svg_ultralight.new_svg_root`) then call `map_ids_to_bounding_boxes` directly.
"""
temp_screen = new_svg_root(0, 0, 1, 1)
temp_screen.append(deepcopy_element(elem))
return list(map_ids_to_bounding_boxes(inkscape, xml=temp_screen).values())[1]
| 33.154321 | 87 | 0.643456 |
c9fa3468016a0958a637b28de68a93eb5f827c96 | 15,552 | py | Python | src/aoiktracecall/plugin/printing_plugin.py | AoiKuiyuyou/AoikTraceCall | 8c09b69743fa4a29f83d9ed83290c33c928f5411 | [
"MIT"
] | 8 | 2016-09-25T08:28:23.000Z | 2018-11-30T10:07:09.000Z | src/aoiktracecall/plugin/printing_plugin.py | AoiKuiyuyou/AoikTraceCall | 8c09b69743fa4a29f83d9ed83290c33c928f5411 | [
"MIT"
] | null | null | null | src/aoiktracecall/plugin/printing_plugin.py | AoiKuiyuyou/AoikTraceCall | 8c09b69743fa4a29f83d9ed83290c33c928f5411 | [
"MIT"
] | null | null | null | # coding: utf-8
from __future__ import absolute_import
# Standard imports
from inspect import isclass
from pprint import pformat
from traceback import format_exc
# Internal imports
from aoiktracecall.config import get_config
from aoiktracecall.logging import print_error
from aoiktracecall.logging import print_info
from aoiktracecall.spec import find_matched_spec_info
from aoiktracecall.state import count_get
from aoiktracecall.state import get_simple_thread_id
from aoiktracecall.util import format_func_args
from aoiktracecall.util import format_func_name
from aoiktracecall.util import indent_by_level
from aoiktracecall.util import to_uri
from aoiktracecall.wrap import get_wrapped_obj
from aoiktracecall.wrap import STATICMETHOD_TYPE
# Local imports
from ..aoikinspectargs import format_inspect_info
from ..aoikinspectargs import inspect_arguments
def _repr_safe(obj, default='<?>'):
try:
#
if hasattr(obj, '__repr__'):
text = repr(obj)
else:
text = str(obj)
#
return text
except Exception as e:
#
error_msg = (
"# Warning: Failed getting argument's repr text:\n"
'---\n{}---\n'
).format(format_exc())
#
print_error(error_msg)
return default
#
INFO_K_HIGHLIGHT = 'highlight'
def printing_filter(info, parsed_specs):
#
spec_info = find_matched_spec_info(info=info, parsed_specs=parsed_specs)
#
if spec_info is None:
#
return info
else:
#
highlight_info = None
#
spec_arg = spec_info['spec_arg']
#
if isinstance(spec_arg, list):
if INFO_K_HIGHLIGHT in spec_arg:
highlight_info = {
'enabled': True,
}
#
elif isinstance(spec_arg, dict):
highlight_info = spec_arg.copy()
#
else:
raise ValueError(spec_arg)
#
info[INFO_K_HIGHLIGHT] = highlight_info
#
return info
#
def printing_handler(info, filter_func=None):
#
trace_hook_type = info['trace_hook_type']
#
info_type = info['info_type']
#
level = info['level']
#
count = info['count']
#
module = info['module']
#
cls = info['class']
#
func = info['func']
#
args = info['args']
#
kwargs = info['kwargs']
#
onwrap_uri = info['onwrap_uri']
#
attr_name = info['attr_name']
# `self` argument value
self_arg_value = None
#
try:
# Inspect function arguments
inspect_info = inspect_arguments(
func=func,
args=args,
kwargs=kwargs,
)
#
except Exception:
inspect_info = None
#
args_inspect_info_debug_msg = None
# If need debug arguments inspect info
if get_config('PRINTING_HANDLER_DEBUG_ARGS_INSPECT_INFO'):
# If hook type is `pre_call`
if trace_hook_type == 'pre_call':
# Get message
args_inspect_info_debug_msg = \
'# PRINTING_HANDLER_DEBUG_ARGS_INSPECT_INFO:\n{}'.format(
pformat(inspect_info, indent=4, width=1)
)
#
repr_func = info.pop('repr_func', _repr_safe)
#
if inspect_info is None:
# If have filter function
if filter_func is not None:
# Add arguments inspect info to info dict
info['arguments_inspect_info'] = None
# Call filter function
info = filter_func(info)
# If returned info is not None
if info is not None:
# Remove arguments inspect info from info dict
info.pop('arguments_inspect_info', None)
# If returned info is None
if info is None:
# Ignore
return
# Get positional arguments to be printed
args_printed = info.pop('args_printed', args)
# Get keyword arguments to be printed
kwargs_printed = info.pop('kwargs_printed', kwargs)
# Format arguments to text
args_text = format_func_args(
args=args_printed,
kwargs=kwargs_printed,
repr_func=repr_func,
)
#
else:
# First argument name
first_arg_name = None
# Get fixed argument infos dict
fixed_arg_infos = inspect_info['fixed_arg_infos']
# If fixed argument infos dict is not empty
if fixed_arg_infos:
# Get the first fixed argument name
first_arg_name = next(iter(fixed_arg_infos))
# If the first fixed argument name is `self`
if first_arg_name == 'self':
# Get `self` argument info
arg_info = fixed_arg_infos['self']
# Get `self` argument value
self_arg_value = arg_info.value
# If have filter function
if filter_func is not None:
# Add arguments inspect info to info dict
info['arguments_inspect_info'] = inspect_info
# Call filter function
info = filter_func(info)
# If returned info is not None
if info is not None:
# Remove arguments inspect info from info dict
info.pop('arguments_inspect_info', None)
# If returned info is None
if info is None:
# Ignore
return
# If the first fixed argument name is `self`
if first_arg_name == 'self':
# Remove `self` argument info
fixed_arg_infos.pop('self', None)
# Format function arguments
args_text = format_inspect_info(
inspect_info,
repr_func=repr,
)
#
simple_thread_id = get_simple_thread_id()
#
self_arg_cls = None
#
self_attr_uri = None
#
if cls is None:
self_attr_uri = onwrap_uri
else:
if self_arg_value is not None:
self_arg_cls = self_arg_value.__class__
else:
if attr_name == '__new__':
if args:
self_arg_cls = args[0]
#
if self_arg_cls is not cls:
#
if isclass(self_arg_cls) and issubclass(self_arg_cls, cls):
#
self_attr_uri = to_uri(
module=module, cls=self_arg_cls, attr_name=attr_name
)
#
if self_attr_uri is None:
#
self_attr_uri = onwrap_uri
#
origin_attr_uri = info.get('origin_attr_uri', None)
#
if origin_attr_uri and origin_attr_uri != self_attr_uri:
self_cls_uri, _, _ = self_attr_uri.rpartition('.')
func_name_text = '{} -> {}'.format(self_cls_uri, origin_attr_uri)
else:
func_name_text = self_attr_uri
#
indent_unit = get_config('INDENT_UNIT_TEXT') or ''
#
indent_text = indent_unit * level
#
if simple_thread_id != 0 or get_config('SHOW_MAIN_THREAD_ID'):
thread_text = ' T{}:'.format(simple_thread_id)
else:
thread_text = ''
#
count_text = ' {}: '.format(count)
#
if trace_hook_type == 'pre_call':
call_msg = (
'{indent}+{thread}{count}----- {func_name} ----- => {args_text}'
).format(
indent=indent_text,
thread=thread_text,
count=count_text,
func_name=func_name_text,
args_text='( {} )'.format(args_text) if args_text else ''
)
elif trace_hook_type == 'post_call':
#
call_result = info['call_result']
#
call_result_text = repr_func(call_result)
#
call_result_text = call_result_text.replace('\n', '\n' + indent_text)
#
post_call_count = count_get()
if post_call_count == count:
next_count_text = ''
else:
next_count_text = '\n{indent}Next: {next_count}'.format(
indent=indent_text,
next_count=post_call_count + 1
)
# 5JKGC
call_msg = (
'{indent}-{thread}{count}===== {func_name} ===== <= {result}'
'{next_count}\n'
).format(
indent=indent_text,
thread=thread_text,
count=count_text,
func_name=func_name_text,
result=call_result_text,
next_count=next_count_text,
)
else:
raise ValueError(trace_hook_type)
#
if self_arg_cls is not None:
highlighted_cls = self_arg_cls
else:
highlighted_cls = cls
# Get origin attribute class
origin_attr_class = info.get('origin_attr_class', None)
#
onself_func = None
# If have origin attribute class
if origin_attr_class is not None:
# If origin attribute class is not highlighted class
if origin_attr_class is not highlighted_cls:
# If the function is constructor
if attr_name == '__init__':
# Use origin attribute class as highlighted class
highlighted_cls = origin_attr_class
# If the config says not use `self` class
elif not get_config('HIGHLIGHT_TITLE_SHOW_SELF_CLASS'):
# Use origin attribute class as highlighted class
highlighted_cls = origin_attr_class
# If info type is `class_attr`
elif info_type == 'class_attr':
# If have `self` class
if self_arg_cls is not None:
# If `self` class is not origin attribute class
if self_arg_cls is not origin_attr_class:
# Get function on `self` class
onself_func = vars(self_arg_cls).get(
attr_name, None
)
# If have function on `self` class
if onself_func is not None:
# Get wrapped object if it is a wrapper
onself_func = get_wrapped_obj(
onself_func, onself_func
)
# If the function on `self` class is not the origin
# function.
# It means the `self` class has defined same-name
# attribute. But the origin class' attribute is
# called. This is the case of calling super method.
if onself_func is not func:
# Use origin attribute class as highlighted
# class
highlighted_cls = origin_attr_class
#
highlighted_title = to_uri(
module_name='',
cls=highlighted_cls,
attr_name=attr_name,
)
#
pre_figlet_title = None
post_figlet_title = None
#
highlight_info = info.get(INFO_K_HIGHLIGHT, None)
#
if highlight_info is not None and highlight_info.get('enabled', True):
#
title = highlight_info.get('title', None)
if not title:
title = highlighted_title
#
if trace_hook_type == 'pre_call':
pre_figlet_title = title
else:
post_figlet_title = title
# Message list
msg_s = []
#
if pre_figlet_title is not None:
# Get message
msg = format_func_name(
'+ {}'.format(pre_figlet_title), count=count, figlet=True
)
# Add message to list
msg_s.append(msg)
# Add message to list
msg_s.append(call_msg)
#
need_print_lineno = False
#
if trace_hook_type == 'pre_call':
#
if get_config('SHOW_FUNC_FILE_PATH_LINENO_PRE_CALL'):
#
need_print_lineno = True
#
if trace_hook_type == 'post_call':
#
if get_config('SHOW_FUNC_FILE_PATH_LINENO_POST_CALL'):
#
need_print_lineno = True
#
if need_print_lineno:
#
file_path_lineno = ''
#
func_to_show_lineno = func
# Loop at most 5 times to avoid circle
for _ in range(5):
#
if isinstance(func_to_show_lineno, STATICMETHOD_TYPE):
if hasattr(func_to_show_lineno, '__func__'):
func_to_show_lineno = func_to_show_lineno.__func__
#
if hasattr(func_to_show_lineno, '__code__'):
#
func_code_obj = func_to_show_lineno.__code__
#
if func_code_obj:
#
file_path_lineno += 'File: {} Line: {}'.format(
func_code_obj.co_filename,
func_code_obj.co_firstlineno,
)
#
if hasattr(func_to_show_lineno, '__wrapped__'):
#
func_to_show_lineno = func_to_show_lineno.__wrapped__
#
continue
#
break
#
if file_path_lineno:
# Add message to list
msg_s.append(indent_by_level(file_path_lineno))
# If hook type is `pre_call`
if trace_hook_type == 'pre_call':
#
need_debug = get_config('PRINTING_HANDLER_DEBUG_INFO_DICT')
#
need_debug_safe = get_config('PRINTING_HANDLER_DEBUG_INFO_DICT_SAFE')
# If need print debug info
if need_debug or need_debug_safe:
# Get info dict copy
debug_info = info.copy()
#
if need_debug_safe:
#
debug_info.pop('args', None)
#
debug_info.pop('kwargs', None)
# Set internal variables
debug_info['_INTERNAL_VARIABLES_'] = {
'self_arg_cls': self_arg_cls,
'self_attr_uri': self_attr_uri,
'highlighted_cls': highlighted_cls,
'highlighted_title': highlighted_title,
}
#
if onself_func is not None:
#
debug_info['_INTERNAL_VARIABLES_']['onself_func'] = onself_func
#
if not need_debug_safe:
debug_info['_INTERNAL_VARIABLES_']['self_arg_value'] = \
self_arg_value
# Get message
msg = '# {}:\n{}'.format(
'PRINTING_HANDLER_DEBUG_INFO_DICT_SAFE' if
need_debug_safe else 'PRINTING_HANDLER_DEBUG_INFO_DICT',
pformat(debug_info, indent=4),
)
# Add message to list
msg_s.append(indent_by_level(msg))
#
if args_inspect_info_debug_msg:
# Add message to list
msg_s.append(indent_by_level(args_inspect_info_debug_msg))
#
if post_figlet_title is not None:
# Get message
msg = format_func_name(
'- {}'.format(post_figlet_title), count=count, figlet=True
)
# Add message to list
msg_s.append(msg)
# If have messages
if msg_s:
# Add a space to get one more newline when joined
msg_s.append('')
# Join messages
msg = '\n'.join(msg_s)
# Print message
print_info(msg, indent=False)
# Return info dict
return info
| 26.539249 | 79 | 0.550604 |
0fad287e23bee913fbf895ffd7b8d186a05cbe56 | 1,698 | py | Python | federatedscope/nlp/loss/character_loss.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 9 | 2022-03-24T07:59:37.000Z | 2022-03-31T06:47:52.000Z | federatedscope/nlp/loss/character_loss.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | 1 | 2022-03-28T13:52:17.000Z | 2022-03-28T13:52:17.000Z | federatedscope/nlp/loss/character_loss.py | alibaba/FederatedScope | fcf6d237624769ea094cfd68803901622f14fc23 | [
"Apache-2.0"
] | null | null | null | import torch
from federatedscope.register import register_criterion
"""
Norm for Letters freq from FedEM:
https://github.com/omarfoq/FedEM/blob/13f366c41c14b234147c2662c258b8a9db2f38cc/utils/constants.py
"""
CHARACTERS_WEIGHTS = {
'\n': 0.43795308843799086,
' ': 0.042500849608091536,
',': 0.6559597911540539,
'.': 0.6987226398690805,
'I': 0.9777491725556848,
'a': 0.2226022051965085,
'c': 0.813311655455682,
'd': 0.4071860494572223,
'e': 0.13455606165058104,
'f': 0.7908671114133974,
'g': 0.9532922255751889,
'h': 0.2496906467588955,
'i': 0.27444893060347214,
'l': 0.37296488139109546,
'm': 0.569937324017103,
'n': 0.2520734570378263,
'o': 0.1934141300462555,
'r': 0.26035705948768273,
's': 0.2534775933879391,
't': 0.1876471355731429,
'u': 0.47430062920373184,
'w': 0.7470615815733715,
'y': 0.6388302610200002
}
ALL_LETTERS = "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
def create_character_loss(type, device):
"""
Character_loss from FedEM:
https://github.com/omarfoq/FedEM/blob/13f366c41c14b234147c2662c258b8a9db2f38cc/utils/utils.py
"""
if type == 'character_loss':
all_characters = ALL_LETTERS
labels_weight = torch.ones(len(all_characters), device=device)
for character in CHARACTERS_WEIGHTS:
labels_weight[all_characters.index(
character)] = CHARACTERS_WEIGHTS[character]
labels_weight = labels_weight * 8
criterion = torch.nn.CrossEntropyLoss(weight=labels_weight).to(device)
return criterion
register_criterion('character_loss', create_character_loss) | 30.872727 | 98 | 0.690224 |
43e8244daa4a15b85c340442e7d0ff5605958cc2 | 2,270 | py | Python | models/test_model.py | xinetzone/CSA-inpaintin | 00aeb87526323e3b9952ce5e68eaef1f471c437b | [
"CC-BY-4.0"
] | 1 | 2021-02-27T11:14:15.000Z | 2021-02-27T11:14:15.000Z | models/test_model.py | xinetzone/CSA-inpainting | 00aeb87526323e3b9952ce5e68eaef1f471c437b | [
"CC-BY-4.0"
] | null | null | null | models/test_model.py | xinetzone/CSA-inpainting | 00aeb87526323e3b9952ce5e68eaef1f471c437b | [
"CC-BY-4.0"
] | null | null | null | from torch import ByteTensor
from torch.autograd import Variable
from collections import OrderedDict
import util.util as util
from .base_model import BaseModel
from . import networks
class TestModel(BaseModel):
def __init__(self, name, checkpoints_dir, gpu_ids, which_model_netG, which_epoch, ngf, batch_size,
fine_size, init_gain, input_nc, input_nc_g, output_nc, norm, use_dropout, init_type):
'''
kw = (fine_size, init_gain, input_nc, input_nc_g, output_nc, norm, use_dropout, init_type)
'''
super().__init__(name, False, checkpoints_dir, gpu_ids)
self._initialize(which_model_netG, which_epoch, ngf, batch_size,
fine_size, init_gain, input_nc, input_nc_g, output_nc, norm, use_dropout, init_type)
def _initialize(self, which_model_netG, which_epoch, ngf, batch_size,
fine_size, init_gain, input_nc, input_nc_g, output_nc, norm, use_dropout, init_type):
# batchsize should be 1 for mask_global
self.mask_global = ByteTensor(1, 1, fine_size, fine_size)
self.input_A = self.Tensor(batch_size, input_nc,
fine_size, fine_size)
self.netG = networks.define_G(input_nc_g, output_nc, ngf,
which_model_netG, self.mask_global,
norm, use_dropout, init_type, self.gpu_ids, init_gain)
self.load_network(self.netG, 'G', which_epoch)
print('---------- Networks initialized -------------')
networks.print_network(self.netG)
print('-----------------------------------------------')
def set_input(self, input):
# we need to use single_dataset mode
input_A = input['A']
self.input_A.resize_(input_A.size()).copy_(input_A)
self.image_paths = input['A_paths']
def test(self):
self.real_A = Variable(self.input_A)
self.fake_B = self.netG(self.real_A)
# get image paths
def get_image_paths(self):
return self.image_paths
def get_current_visuals(self):
real_A = util.tensor2im(self.real_A.data)
fake_B = util.tensor2im(self.fake_B.data)
return OrderedDict([('real_A', real_A), ('fake_B', fake_B)])
| 42.830189 | 109 | 0.630396 |
1471dd5ef0e6b51d81e40d2e670c8405cc61b006 | 141 | py | Python | ot2protocol/__init__.py | pozzo-group-robots/OT2Protocols2 | 2386dae2b7f18a8a42fb8b4a0b8d2c6b2f3ff440 | [
"MIT"
] | 2 | 2020-06-01T16:32:43.000Z | 2021-12-01T16:57:36.000Z | ot2protocol/__init__.py | pozzo-group-robots/OT2Protocols2 | 2386dae2b7f18a8a42fb8b4a0b8d2c6b2f3ff440 | [
"MIT"
] | 3 | 2020-01-22T02:06:31.000Z | 2020-07-19T18:58:51.000Z | ot2protocol/__init__.py | pozzo-group-robots/OT2Protocols2 | 2386dae2b7f18a8a42fb8b4a0b8d2c6b2f3ff440 | [
"MIT"
] | null | null | null | from __future__ import absolute_import, division, print_function
from .version import __version__ # noqa
from .ot2protocol import * # noqa
| 35.25 | 64 | 0.808511 |
bbb6eb991430a4866bd0c4b90a332b2ae24c6645 | 16,459 | py | Python | pybatch/info_mapBatchCommands.py | Nizarazo/instl | d04e9aede292caa1174447189a423726fa3bb97f | [
"BSD-3-Clause"
] | null | null | null | pybatch/info_mapBatchCommands.py | Nizarazo/instl | d04e9aede292caa1174447189a423726fa3bb97f | [
"BSD-3-Clause"
] | null | null | null | pybatch/info_mapBatchCommands.py | Nizarazo/instl | d04e9aede292caa1174447189a423726fa3bb97f | [
"BSD-3-Clause"
] | null | null | null | from typing import List, Any
import os
import stat
import zlib
from pathlib import Path
import logging
log = logging.getLogger(__name__)
from configVar import config_vars
import aYaml
import utils
from .baseClasses import PythonBatchCommandBase
from .fileSystemBatchCommands import MakeDirs
from .fileSystemBatchCommands import Chmod
from .wtarBatchCommands import Wzip
from .copyBatchCommands import CopyFileToFile
from db import DBManager
"""
batch commands that need access to the db and the info_map table
"""
class CheckDownloadFolderChecksum(DBManager, PythonBatchCommandBase):
""" check checksums in download folder
"""
def __init__(self, print_report=False, raise_on_bad_checksum=False, **kwargs) -> None:
super().__init__(**kwargs)
self.print_report = print_report
self.raise_on_bad_checksum = raise_on_bad_checksum
if not self.raise_on_bad_checksum:
self.exceptions_to_ignore.append(ValueError)
self.bad_checksum_list = list()
self.missing_files_list = list()
self.bad_checksum_list_exception_message = ""
self.missing_files_exception_message = ""
def repr_own_args(self, all_args: List[str]) -> None:
all_args.append(self.optional_named__init__param("print_report", self.print_report, False))
all_args.append(self.optional_named__init__param("raise_on_bad_checksum", self.raise_on_bad_checksum, False))
def progress_msg_self(self) -> str:
return f'''Check download folder checksum'''
def __call__(self, *args, **kwargs) -> None:
super().__call__(*args, **kwargs) # read the info map file from TO_SYNC_INFO_MAP_PATH - if provided
dl_file_items = self.info_map_table.get_download_items(what="file")
for file_item in dl_file_items:
if os.path.isfile(file_item.download_path):
file_checksum = utils.get_file_checksum(file_item.download_path)
if not utils.compare_checksums(file_checksum, file_item.checksum):
self.bad_checksum_list.append(" ".join(("Bad checksum:", file_item.download_path, "expected", file_item.checksum, "found", file_checksum)))
else:
self.missing_files_list.append(" ".join((file_item.download_path, "was not found")))
if not self.is_checksum_ok():
report_lines = self.report()
if self.print_report:
print("\n".join(report_lines))
if self.raise_on_bad_checksum:
exception_message = "\n".join((self.bad_checksum_list_exception_message, self.missing_files_exception_message))
raise ValueError(exception_message)
def is_checksum_ok(self) -> bool:
retVal = len(self.bad_checksum_list) + len(self.missing_files_list) == 0
return retVal
def report(self):
report_lines = list()
if self.bad_checksum_list:
report_lines.extend(self.bad_checksum_list)
self.bad_checksum_list_exception_message = f"Bad checksum for {len(self.bad_checksum_list)} files"
report_lines.append(self.bad_checksum_list_exception_message)
if self.missing_files_list:
report_lines.extend(self.missing_files_list)
self.missing_files_exception_message = f"Missing {len(self.missing_files_list)} files"
report_lines.append(self.missing_files_exception_message)
return report_lines
class SetExecPermissionsInSyncFolder(DBManager, PythonBatchCommandBase):
""" set execute permissions for files that need such permission in the download folder """
def __init__(self, info_map_file=None, **kwargs) -> None:
super().__init__(**kwargs)
def repr_own_args(self, all_args: List[str]) -> None:
pass
def progress_msg_self(self) -> str:
return f'''Set exec permissions in download folder'''
def __call__(self, *args, **kwargs) -> None:
super().__call__(*args, **kwargs) # read the info map file from REQUIRED_INFO_MAP_PATH - if provided
exec_file_paths = self.info_map_table.get_exec_file_paths()
for file_item_path in exec_file_paths:
if os.path.isfile(file_item_path):
Chmod(file_item_path, "a+x", own_progress_count=0)()
class CreateSyncFolders(DBManager, PythonBatchCommandBase):
""" create the download folder hierarchy
"""
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
def repr_own_args(self, all_args: List[str]) -> None:
pass
def progress_msg_self(self) -> str:
return f'''Create download directories'''
def __call__(self, *args, **kwargs) -> None:
super().__call__(*args, **kwargs)
dl_dir_items = self.info_map_table.get_download_items(what="dir")
for dl_dir in dl_dir_items:
self.doing = f"""creating sync folder '{dl_dir}'"""
if dl_dir.download_path: # direct_sync items have absolute path in member .download_path
MakeDirs(dl_dir.download_path)()
else: # cache items have relative path in member .path
MakeDirs(dl_dir.path)()
class SetBaseRevision(DBManager, PythonBatchCommandBase):
def __init__(self, base_rev, **kwargs):
super().__init__(**kwargs)
self.base_rev = base_rev
def repr_own_args(self, all_args: List[str]):
all_args.append(self.unnamed__init__param(self.base_rev))
def progress_msg_self(self):
return f"Set base-repo-rev to repo-rev#{self.base_rev}"
def __call__(self, *args, **kwargs) -> None:
super().__call__(*args, **kwargs)
self.info_map_table.set_base_revision(self.base_rev)
class InfoMapFullWriter(DBManager, PythonBatchCommandBase):
""" write all info map table lines to a single file """
fields_relevant_to_info_map = ('path', 'flags', 'revision', 'checksum', 'size')
def __init__(self, out_file, in_format='text', **kwargs):
super().__init__(**kwargs)
self.out_file = Path(out_file)
self.format = format
def repr_own_args(self, all_args: List[str]) -> None:
all_args.append(self.unnamed__init__param(self.out_file))
all_args.append(self.optional_named__init__param("format", self.format, 'text'))
def progress_msg_self(self) -> str:
return f'''Create full info_map file'''
def __call__(self, *args, **kwargs) -> None:
self.info_map_table.write_to_file(self.out_file, field_to_write=InfoMapFullWriter.fields_relevant_to_info_map)
class InfoMapSplitWriter(DBManager, PythonBatchCommandBase):
""" write all info map table to files according to info_map: field in index.yaml """
fields_relevant_to_info_map = ('path', 'flags', 'revision', 'checksum', 'size')
def __init__(self, work_folder, in_format='text', **kwargs):
super().__init__(**kwargs)
self.work_folder = Path(work_folder)
self.format = format
def repr_own_args(self, all_args: List[str]) -> None:
all_args.append(self.unnamed__init__param(self.work_folder))
all_args.append(self.optional_named__init__param("format", self.format, 'text'))
def progress_msg_self(self) -> str:
return f'''Create split info_map files'''
def __call__(self, *args, **kwargs) -> None:
# fill the iid_to_svn_item_t table
self.info_map_table.populate_IIDToSVNItem()
# get the list of info map file names
info_map_to_item = dict()
all_info_map_names = self.items_table.get_unique_detail_values('info_map')
for infomap_file_name in all_info_map_names:
info_map_file_path = self.work_folder.joinpath(infomap_file_name)
if info_map_file_path.is_file():
log.info(f"{infomap_file_name} was found so no need to create it")
# file already exists, probably copied from the "Common" repository
# just checking that the fie is also zipped
zip_infomap_file_name = config_vars.resolve_str(infomap_file_name+"$(WZLIB_EXTENSION)")
zip_info_map_file_path = self.work_folder.joinpath(zip_infomap_file_name)
if not zip_info_map_file_path.is_file():
raise FileNotFoundError(f"found {info_map_file_path} but not {zip_info_map_file_path}")
else:
self.info_map_table.mark_items_required_by_infomap(infomap_file_name)
info_map_items = self.info_map_table.get_required_items()
info_map_to_item[infomap_file_name] = info_map_items
files_to_add_to_default_info_map = list() # the named info_map files and their wzip version should be added to the default info_map
# write each info map to file
for infomap_file_name, info_map_items in info_map_to_item.items():
if info_map_items: # could be that no items are linked to the info map file
info_map_file_path = self.work_folder.joinpath(infomap_file_name)
self.info_map_table.write_to_file(in_file=info_map_file_path, items_list=info_map_items, field_to_write=self.fields_relevant_to_info_map)
files_to_add_to_default_info_map.append(info_map_file_path)
zip_infomap_file_name = config_vars.resolve_str(infomap_file_name+"$(WZLIB_EXTENSION)")
zip_info_map_file_path = self.work_folder.joinpath(zip_infomap_file_name)
with Wzip(info_map_file_path, self.work_folder, own_progress_count=0) as wzipper:
wzipper()
files_to_add_to_default_info_map.append(zip_info_map_file_path)
# add the default info map
default_info_map_file_name = str(config_vars["MAIN_INFO_MAP_FILE_NAME"])
default_info_map_file_path = self.work_folder.joinpath(default_info_map_file_name)
info_map_items = self.info_map_table.get_items_for_default_infomap()
self.info_map_table.write_to_file(in_file=default_info_map_file_path, items_list=info_map_items, field_to_write=self.fields_relevant_to_info_map)
with Wzip(default_info_map_file_path, self.work_folder, own_progress_count=0) as wzipper:
wzipper()
# add a line to default info map for each non default info_map created above
with open(default_info_map_file_path, "a") as wfd:
for file_to_add in files_to_add_to_default_info_map:
file_checksum = utils.get_file_checksum(file_to_add)
file_size = file_to_add.stat().st_size
# todo: make path relative
line_for_main_info_map = f"instl/{file_to_add.name}, f, {config_vars['TARGET_REPO_REV'].str()}, {file_checksum}, {file_size}\n"
wfd.write(line_for_main_info_map)
class IndexYamlReader(DBManager, PythonBatchCommandBase):
def __init__(self, index_yaml_path, **kwargs):
super().__init__(**kwargs)
self.index_yaml_path = Path(index_yaml_path)
def repr_own_args(self, all_args: List[str]) -> None:
all_args.append(self.unnamed__init__param(self.index_yaml_path))
def progress_msg_self(self) -> str:
return f'''read index.yaml from {self.index_yaml_path}'''
def __call__(self, *args, **kwargs) -> None:
from pyinstl import IndexYamlReaderBase
reader = IndexYamlReaderBase(config_vars)
reader.read_yaml_file(self.index_yaml_path)
class CopySpecificRepoRev(DBManager, PythonBatchCommandBase):
def __init__(self, checkout_folder, repo_rev_folder, repo_rev, **kwargs):
super().__init__(**kwargs)
self.checkout_folder = Path(checkout_folder)
self.repo_rev_folder = Path(repo_rev_folder)
self.repo_rev = repo_rev
def repr_own_args(self, all_args: List[str]) -> None:
all_args.append(self.unnamed__init__param(self.checkout_folder))
all_args.append(self.unnamed__init__param(self.repo_rev_folder))
all_args.append(self.unnamed__init__param(self.repo_rev))
def progress_msg_self(self) -> str:
return f'''Copy files of repo-rev#{self.repo_rev} from {self.checkout_folder} to {self.repo_rev_folder}'''
def __call__(self, *args, **kwargs) -> None:
self.info_map_table.mark_required_for_revision(self.repo_rev)
self.info_map_table.mark_required_for_dir("instl")
files_to_copy = self.info_map_table.get_required_items(what="file")
for a_file in files_to_copy:
source = Path(self.checkout_folder, a_file)
target = Path(self.repo_rev_folder, a_file)
print(f"copy {source} to {target}")
with CopyFileToFile(source, target, own_progress_count=0) as cftf:
cftf()
# CreateRepoRevFile is not a class that uses info map, but this file is the best place for this it
class CreateRepoRevFile(PythonBatchCommandBase):
""" create a repo-rev file inside the instl folder
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
def repr_own_args(self, all_args: List[str]) -> None:
pass
def progress_msg_self(self) -> str:
return f'''create file for repo-rev#{config_vars["TARGET_REPO_REV"].str()}'''
def __call__(self, *args, **kwargs) -> None:
if "REPO_REV_FILE_VARS" not in config_vars:
# must have a list of variable names to write to the repo-rev file
raise ValueError("REPO_REV_FILE_VARS must be defined")
repo_rev_vars = list(config_vars["REPO_REV_FILE_VARS"]) # list of configVars to write to the repo-rev file
# check that the variable names from REPO_REV_FILE_VARS do not contain
# names that must not be made public
dangerous_intersection = set(repo_rev_vars).intersection(
{"AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "PRIVATE_KEY", "PRIVATE_KEY_FILE"})
if dangerous_intersection:
log.warning("found", str(dangerous_intersection), "in REPO_REV_FILE_VARS, aborting")
raise ValueError(f"file REPO_REV_FILE_VARS {dangerous_intersection} and so is forbidden to upload")
use_zlib = bool(config_vars.get("USE_ZLIB", "False")) # should we consider zipped files or not
zip_extension = ""
if use_zlib:
zip_extension = config_vars.get("WZLIB_EXTENSION", ".wzip").str()
revision_instl_folder_path = Path(config_vars["UPLOAD_REVISION_INSTL_FOLDER"])
# create checksum for the main info_map file, either wzipped or not
main_info_map_file_name = "info_map.txt"+zip_extension
main_info_map_file = revision_instl_folder_path.joinpath(main_info_map_file_name)
main_info_map_checksum = utils.get_file_checksum(main_info_map_file)
config_vars["INFO_MAP_FILE_URL"] = "$(BASE_LINKS_URL)/$(REPO_NAME)/$(__CURR_REPO_FOLDER_HIERARCHY__)/instl/"+main_info_map_file_name
config_vars["INFO_MAP_CHECKSUM"] = main_info_map_checksum
# create checksum for the main index.yaml file, either wzipped or not
index_file_name = "index.yaml"+zip_extension
index_file_path = revision_instl_folder_path.joinpath(index_file_name)
config_vars["INDEX_CHECKSUM"] = utils.get_file_checksum(index_file_path)
config_vars["INDEX_URL"] = "$(BASE_LINKS_URL)/$(REPO_NAME)/$(__CURR_REPO_FOLDER_HIERARCHY__)/instl/"+index_file_name
config_vars["INSTL_FOLDER_BASE_URL"] = "$(BASE_LINKS_URL)/$(REPO_NAME)/$(__CURR_REPO_FOLDER_HIERARCHY__)/instl"
config_vars["REPO_REV_FOLDER_HIERARCHY"] = "$(__CURR_REPO_FOLDER_HIERARCHY__)"
# check that all variables are present
# <class 'list'>: ['INSTL_FOLDER_BASE_URL', 'REPO_REV_FOLDER_HIERARCHY', 'SYNC_BASE_URL']
missing_vars = [var for var in repo_rev_vars if var not in config_vars]
if missing_vars:
raise ValueError(f"{missing_vars} are missing cannot write repo rev file")
# create yaml out of the variables
variables_as_yaml = config_vars.repr_for_yaml(repo_rev_vars)
repo_rev_yaml_doc = aYaml.YamlDumpDocWrap(variables_as_yaml, '!define', "",
explicit_start=True, sort_mappings=True)
repo_rev_file_path = config_vars["UPLOAD_REVISION_REPO_REV_FILE"]
with utils.utf8_open_for_write(repo_rev_file_path, "w") as wfd:
aYaml.writeAsYaml(repo_rev_yaml_doc, out_stream=wfd, indentor=None, sort=True)
log.info(f"""create {repo_rev_file_path}""")
| 47.84593 | 159 | 0.697065 |
971d2b8e065d25b0ae1245c0d443dcfca1ff267f | 7,081 | py | Python | test/test_projection.py | chrisantonellis/baemo | a38ea4f32d09a0813e2637ff1e2aa963f432b326 | [
"MIT"
] | 2 | 2016-12-08T00:11:58.000Z | 2017-04-19T16:29:51.000Z | test/test_projection.py | chrisantonellis/pymongo_basemodel | a38ea4f32d09a0813e2637ff1e2aa963f432b326 | [
"MIT"
] | null | null | null | test/test_projection.py | chrisantonellis/pymongo_basemodel | a38ea4f32d09a0813e2637ff1e2aa963f432b326 | [
"MIT"
] | null | null | null |
import sys; sys.path.append("../")
import unittest
from baemo.projection import Projection
from baemo.exceptions import ProjectionMalformed
from baemo.exceptions import ProjectionTypeMismatch
class TestProjection(unittest.TestCase):
# __init__
def test_init__no_params(self):
p = Projection()
self.assertEqual(p.__dict__, {})
self.assertEqual(type(p), Projection)
def test_init__dict_param(self):
p = Projection({"k": 0})
self.assertEqual(p.__dict__, {"k": 0})
# __call__
def test_call__dict_param(self):
p = Projection()
p({"k": 0})
self.assertEqual(p.__dict__, {"k": 0})
# set
def test_set__non_delimited_key_param(self):
p = Projection()
p.set("k1", 1)
self.assertEqual(p.__dict__, {"k1": 1})
p.set("k2", 1)
self.assertEqual(p.__dict__, {"k1": 1, "k2": 1})
def test_set__delimited_key_param(self):
p = Projection()
p.set("k1.k2.k3", 1)
self.assertEqual(p.__dict__, {
"k1": {
"k2": {
"k3": 1
}
}
})
# _merge
def test__merge__simple_projection_params(self):
p1 = Projection({"k1": 1})
p2 = Projection({"k2": 1})
self.assertEqual(Projection._merge(p1, p2), Projection({
"k1": 1,
"k2": 1
}))
def test__merge__advanced_projection_params(self):
p1 = Projection({"k1": 1, "k2": 2, "k3": {"k4": 1}})
p2 = Projection({"k5": 1, "k6": 2, "k3": {"k8": 1}})
self.assertEqual(Projection._merge(p1, p2), Projection({
"k1": 1,
"k2": 2,
"k3": {
"k4": 1,
"k8": 1
},
"k5": 1,
"k6": 2
}))
def test__merge__projection_param__delete_key(self):
p1 = Projection({"k2": -1})
p2 = Projection({"k1": 1, "k2": 2, "k3": 1})
self.assertEqual(Projection._merge(p1, p2), Projection({
"k1": 1,
"k3": 1
}))
# merge
def test_merge__dict_param(self):
p = Projection({"k1": 1})
d = {"k2": 1}
self.assertEqual(p.merge(d), Projection({
"k1": 1,
"k2": 1
}))
def test_merge__projection_param(self):
p1 = Projection({"k1": 1})
p2 = Projection({"k2": 1})
self.assertEqual(p1.merge(p2), Projection({
"k1": 1,
"k2": 1
}))
def test_merge__projection_param__raises_ProjectionTypeMismatch(self):
p1 = Projection({"k1": 1})
p2 = Projection({"k2": 0})
with self.assertRaises(ProjectionTypeMismatch):
p1.merge(p2)
# update
def test_update(self):
p1 = Projection({"k1": 1})
p2 = Projection({"k2": 1})
p1.update(p2)
self.assertEqual(p1.__dict__, {
"k1": 1,
"k2": 1
})
# _flatten
def test__flatten__projection_param__returns_dict(self):
p = Projection({"k1": 1, "k2": 2, "k3": {"k4": 1, "k5": 2}})
flattened = Projection._flatten(p)
self.assertEqual(flattened, {"k1": 1, "k2": 1, "k3": 1})
self.assertEqual(type(flattened), dict)
# flatten
def test_flatten__inclusive_projection(self):
p = Projection({
"k1": 1,
"k2": 2,
"k3": {
"k4": 1,
"k5": 2
}
})
self.assertEqual(p.flatten(), {"k1": 1, "k2": 1, "k3": 1})
def test_flatten__exclusive_projection(self):
p = Projection({
"k1": 0,
"k2": 2,
"k3": {
"k4": 0,
"k5": 2
}
})
self.assertEqual(p.flatten(), {"k1": 0})
def test_flatten__none_projection(self):
p = Projection({
"k1": 2,
"k2": {
"k3": 2
}
})
self.assertEqual(p.flatten(), {})
# _validate
def test__validate__projection_param__return_value_discarded(self):
try:
Projection._validate(Projection({"k": 1}))
except (ProjectionMalformed, ProjectionTypeMismatch):
self.fail("exception raised")
def test__validate__basic_inclusive_projection_param__returns_type(self):
d = {"k": 1}
self.assertEqual(Projection._validate(Projection(d)), "inclusive")
def test__validate__advanced_inclusive_projection_param__returns_type(self):
d = {
"k1": 1,
"k2": -1,
"k3": 2,
"k4": {
"k5": 1,
"k6": 2
}
}
self.assertEqual(Projection._validate(Projection(d)), "inclusive")
def test__validate__basic_exclusive_projection_param__returns_type(self):
d = {"k": 0}
self.assertEqual(Projection._validate(Projection(d)), "exclusive")
def test__validate__advanced_exclusive_projection_param__returns_type(self):
d = {
"k1": 0,
"k2": -1,
"k3": 2,
"k4": {
"k5": 0,
"k6": 2
}
}
self.assertEqual(Projection._validate(Projection(d)), "exclusive")
def test__validate__basic_none_projection_param__returns_type(self):
d = {"k": 2}
self.assertEqual(Projection._validate(Projection(d)), None)
def test__validate__advanced_none_projection_param__returns_type(self):
d = {
"k1": 2,
"k2": -1,
"k3": 2,
"k4": {
"k5": -1,
"k6": 2
}
}
self.assertEqual(Projection._validate(Projection(d)), None)
def test__validate__simple_projection_param__raises_ProjectionMalformed(self):
with self.assertRaises(ProjectionMalformed):
Projection({"k": "foo"})
def test__validate__advanced_projection_param__raises_ProjectionMalformed(self):
with self.assertRaises(ProjectionMalformed):
Projection({
"k1": 1,
"k2": 2,
"k3": {
"k4": "foo"
}
})
def test__validate__simple_projection_param__raises_ProjectionTypeMismatch(self):
with self.assertRaises(ProjectionTypeMismatch):
Projection({
"k": 0,
"k2": 1
})
def test__validate__advanced_projection_param__raises_ProjectionTypeMismatch(self):
with self.assertRaises(ProjectionTypeMismatch):
Projection({
"k1": 1,
"k2": 2,
"k3": {
"k4": 0
}
})
# validate
def test_validate__discard_return_value(self):
try:
Projection({"k": 0})
except (ProjectionMalformed, ProjectionTypeMismatch):
self.fail("exception raised")
if __name__ == "__main__":
unittest.main()
| 26.82197 | 87 | 0.51391 |
d8c2e616013af2d419f971ce448f25a810bba144 | 66,877 | py | Python | python/cudf/cudf/core/frame.py | philtrade/cudf | a4d5c281c9ede5cd31aeaa6c0d131d932a951554 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/core/frame.py | philtrade/cudf | a4d5c281c9ede5cd31aeaa6c0d131d932a951554 | [
"Apache-2.0"
] | null | null | null | python/cudf/cudf/core/frame.py | philtrade/cudf | a4d5c281c9ede5cd31aeaa6c0d131d932a951554 | [
"Apache-2.0"
] | null | null | null | import functools
import itertools
import warnings
from collections import OrderedDict
import cupy
import numpy as np
import pandas as pd
from pandas.api.types import is_dtype_equal
import cudf
import cudf._lib as libcudf
from cudf._lib.nvtx import annotate
from cudf._lib.scalar import as_scalar
from cudf.core import column
from cudf.core.column import as_column, build_categorical_column
from cudf.utils.dtypes import (
is_categorical_dtype,
is_datetime_dtype,
is_numerical_dtype,
is_scalar,
is_string_dtype,
min_scalar_type,
)
class Frame(libcudf.table.Table):
"""
Frame: A collection of Column objects with an optional index.
Parameters
----------
data : OrderedColumnDict
An OrderedColumnDict mapping column names to Columns
index : Table
A Frame representing the (optional) index columns.
"""
@classmethod
def _from_table(cls, table):
return cls(table._data, index=table._index)
@classmethod
@annotate("CONCAT", color="orange", domain="cudf_python")
def _concat(cls, objs, axis=0, ignore_index=False):
# shallow-copy the input DFs in case the same DF instance
# is concatenated with itself
objs = [f.copy(deep=False) for f in objs]
from cudf.core.index import as_index
from cudf.core.column.column import column_empty
from cudf.core.column.column import build_categorical_column
# Create a dictionary of the common, non-null columns
def get_non_null_cols_and_dtypes(col_idxs, list_of_columns):
# A mapping of {idx: np.dtype}
dtypes = dict()
# A mapping of {idx: [...columns]}, where `[...columns]`
# is a list of columns with at least one valid value for each
# column name across all input dataframes
non_null_columns = dict()
for idx in col_idxs:
for cols in list_of_columns:
# Skip columns not in this frame
if idx >= len(cols) or cols[idx] is None:
continue
# Store the first dtype we find for a column, even if it's
# all-null. This ensures we always have at least one dtype
# for each name. This dtype will be overwritten later if a
# non-null Column with the same name is found.
if idx not in dtypes:
dtypes[idx] = cols[idx].dtype
if cols[idx].valid_count > 0:
if idx not in non_null_columns:
non_null_columns[idx] = [cols[idx]]
else:
non_null_columns[idx].append(cols[idx])
return non_null_columns, dtypes
def find_common_dtypes_and_categories(non_null_columns, dtypes):
# A mapping of {idx: categories}, where `categories` is a
# column of all the unique categorical values from each
# categorical column across all input dataframes
categories = dict()
for idx, cols in non_null_columns.items():
# default to the first non-null dtype
dtypes[idx] = cols[0].dtype
# If all the non-null dtypes are int/float, find a common dtype
if all(is_numerical_dtype(col.dtype) for col in cols):
dtypes[idx] = np.find_common_type(
[col.dtype for col in cols], []
)
# If all categorical dtypes, combine the categories
elif all(is_categorical_dtype(col.dtype) for col in cols):
# Combine and de-dupe the categories
categories[idx] = (
cudf.concat([col.cat().categories for col in cols])
.to_series()
.drop_duplicates(ignore_index=True)
._column
)
# Set the column dtype to the codes' dtype. The categories
# will be re-assigned at the end
dtypes[idx] = min_scalar_type(len(categories[idx]))
# Otherwise raise an error if columns have different dtypes
elif not all(
is_dtype_equal(c.dtype, dtypes[idx]) for c in cols
):
raise ValueError("All columns must be the same type")
return categories
def cast_cols_to_common_dtypes(
col_idxs, list_of_columns, dtypes, categories
):
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
for idx in col_idxs:
dtype = dtypes[idx]
for cols in list_of_columns:
# If column not in this df, fill with an all-null column
if idx >= len(cols) or cols[idx] is None:
n = len(next(filter(lambda x: x is not None, cols)))
cols[idx] = column_empty(n, dtype, masked=True)
else:
# If column is categorical, rebase the codes with the
# combined categories, and cast the new codes to the
# min-scalar-sized dtype
if idx in categories:
cols[idx] = (
cols[idx]
.cat()
._set_categories(
categories[idx], is_unique=True
)
.codes
)
cols[idx] = cols[idx].astype(dtype)
def reassign_categories(categories, cols, col_idxs):
for name, idx in zip(cols, col_idxs):
if idx in categories:
cols[name] = build_categorical_column(
categories=categories[idx],
codes=as_column(
cols[name].base_data, dtype=cols[name].dtype
),
mask=cols[name].base_mask,
offset=cols[name].offset,
size=cols[name].size,
)
# Get a list of the unique table column names
names = [name for f in objs for name in f._column_names]
names = list(OrderedDict.fromkeys(names).keys())
# Combine the index and table columns for each Frame into a
# list of [...index_cols, ...table_cols]. If a table is
# missing a column, that list will have None in the slot instead
columns = [
([] if ignore_index else list(f._index._data.columns))
+ [f._data[name] if name in f._data else None for name in names]
for i, f in enumerate(objs)
]
# Get a list of the combined index and table column indices
indices = list(range(functools.reduce(max, map(len, columns))))
# The position of the first table colum in each
# combined index + table columns list
first_data_column_position = len(indices) - len(names)
# Get the non-null columns and their dtypes
non_null_cols, dtypes = get_non_null_cols_and_dtypes(indices, columns)
# Infer common dtypes between numeric columns
# and combine CategoricalColumn categories
categories = find_common_dtypes_and_categories(non_null_cols, dtypes)
# Cast all columns to a common dtype, assign combined categories,
# and back-fill missing columns with all-null columns
cast_cols_to_common_dtypes(indices, columns, dtypes, categories)
# Construct input tables with the index and data columns in the same
# order. This strips the given index/column names and replaces the
# names with their integer positions in the `cols` list
tables = []
for cols in columns:
table_cols = cols[first_data_column_position:]
table_names = indices[first_data_column_position:]
table = cls(data=dict(zip(table_names, table_cols)))
if 1 == first_data_column_position:
table._index = as_index(cols[0])
elif first_data_column_position > 1:
index_cols = cols[:first_data_column_position]
index_names = indices[:first_data_column_position]
table._index = cls(data=dict(zip(index_names, index_cols)))
tables.append(table)
# Concatenate the Tables
out = cls._from_table(
libcudf.concat.concat_tables(tables, ignore_index=ignore_index)
)
# Reassign the categories for any categorical table cols
reassign_categories(
categories, out._data, indices[first_data_column_position:]
)
# Reassign the categories for any categorical index cols
reassign_categories(
categories, out._index._data, indices[:first_data_column_position]
)
# Reassign index and column names
if isinstance(objs[0].columns, pd.MultiIndex):
out.columns = objs[0].columns
else:
out.columns = names
out._index.name = objs[0]._index.name
out._index.names = objs[0]._index.names
return out
def _get_columns_by_label(self, labels, downcast=False):
"""
Returns columns of the Frame specified by `labels`
If downcast is True, try and downcast from a DataFrame to a Series
"""
new_data = self._data.get_by_label(labels)
if downcast:
if is_scalar(labels):
nlevels = 1
elif isinstance(labels, tuple):
nlevels = len(labels)
if self._data.multiindex is False or nlevels == self._data.nlevels:
return self._constructor_sliced(
new_data, name=labels, index=self.index
)
return self._constructor(
new_data, columns=new_data.to_pandas_index(), index=self.index
)
def _get_columns_by_index(self, indices):
"""
Returns columns of the Frame specified by `labels`
"""
data = self._data.get_by_index(indices)
return self._constructor(
data, columns=data.to_pandas_index(), index=self.index
)
def _gather(self, gather_map, keep_index=True):
if not pd.api.types.is_integer_dtype(gather_map.dtype):
gather_map = gather_map.astype("int32")
result = self.__class__._from_table(
libcudf.copying.gather(
self, as_column(gather_map), keep_index=keep_index
)
)
result._copy_categories(self)
return result
def _hash(self, initial_hash_values=None):
return libcudf.hash.hash(self, initial_hash_values)
def _hash_partition(
self, columns_to_hash, num_partitions, keep_index=True
):
output, offsets = libcudf.hash.hash_partition(
self, columns_to_hash, num_partitions, keep_index
)
output = self.__class__._from_table(output)
output._copy_categories(self, include_index=keep_index)
return output, offsets
def _as_column(self):
"""
_as_column : Converts a single columned Frame to Column
"""
assert (
self._num_columns == 1
and self._index is None
and self._column_names[0] is None
), """There should be only one data column,
no index and None as the name to use this method"""
return self._data[None].copy(deep=False)
def _scatter(self, key, value):
result = self._from_table(libcudf.copying.scatter(value, key, self))
result._copy_categories(self)
return result
def _empty_like(self, keep_index=True):
result = self._from_table(
libcudf.copying.table_empty_like(self, keep_index)
)
result._copy_categories(self, include_index=keep_index)
return result
def _slice(self, arg):
"""
_slice : slice the frame as per the arg
Parameters
----------
arg : should always be of type slice and doesn't handle step
"""
from cudf.core.index import RangeIndex
num_rows = len(self)
if num_rows == 0:
return self
start, stop, stride = arg.indices(num_rows)
# This is just to handle RangeIndex type, stop
# it from materializing unnecessarily
keep_index = True
if self.index is not None and isinstance(self.index, RangeIndex):
keep_index = False
if start < 0:
start = start + num_rows
if stop < 0:
stop = stop + num_rows
if (start > stop and (stride is None or stride == 1)) or (
len(self._data) == 0 and keep_index is False
):
return self._empty_like(keep_index)
else:
start = len(self) if start > num_rows else start
stop = len(self) if stop > num_rows else stop
if stride is not None and stride != 1:
return self._gather(
cupy.arange(start, stop=stop, step=stride, dtype=np.int32)
)
else:
result = self._from_table(
libcudf.copying.table_slice(
self, [start, stop], keep_index
)[0]
)
result._copy_categories(self, include_index=keep_index)
# Adding index of type RangeIndex back to
# result
if keep_index is False and self.index is not None:
result.index = self.index[start:stop]
result.columns = self.columns
return result
def _normalize_scalars(self, other):
"""
Try to normalizes scalar values as per self dtype
"""
if (
other is not None
and (isinstance(other, float) and not np.isnan(other))
) and (self.dtype.type(other) != other):
raise TypeError(
"Cannot safely cast non-equivalent {} to {}".format(
type(other).__name__, self.dtype.name
)
)
return (
self.dtype.type(other)
if (
other is not None
and (isinstance(other, float) and not np.isnan(other))
)
else other
)
def _normalize_columns_and_scalars_type(self, other):
"""
Try to normalize the other's dtypes as per self.
Parameters
----------
self : Can be a DataFrame or Series or Index
other : Can be a DataFrame, Series, Index, Array
like object or a scalar value
if self is DataFrame, other can be only a
scalar or array like with size of number of columns
in DataFrame or a DataFrame with same dimension
if self is Series, other can be only a scalar or
a series like with same length as self
Returns:
--------
A dataframe/series/list/scalar form of normalized other
"""
if isinstance(self, cudf.DataFrame) and isinstance(
other, cudf.DataFrame
):
return [
other[self_col].astype(self._data[self_col].dtype)._column
for self_col in self._data.names
]
elif isinstance(self, (cudf.Series, cudf.Index)) and not is_scalar(
other
):
other = as_column(other)
return other.astype(self.dtype)
else:
# Handles scalar or list/array like scalars
if isinstance(self, (cudf.Series, cudf.Index)) and is_scalar(
other
):
return self._normalize_scalars(other)
elif isinstance(self, cudf.DataFrame):
out = []
if is_scalar(other):
other = [other for i in range(len(self._data.names))]
out = [
self[in_col_name]._normalize_scalars(sclr)
for in_col_name, sclr in zip(self._data.names, other)
]
return out
else:
raise ValueError(
"Inappropriate input {} and other {} combination".format(
type(self), type(other)
)
)
def where(self, cond, other=None, inplace=False):
"""
Replace values where the condition is False.
Parameters
----------
cond : bool Series/DataFrame, array-like
Where cond is True, keep the original value.
Where False, replace with corresponding value from other.
Callables are not supported.
other: scalar, list of scalars, Series/DataFrame
Entries where cond is False are replaced with
corresponding value from other. Callables are not
supported. Default is None.
DataFrame expects only Scalar or array like with scalars or
dataframe with same dimension as self.
Series expects only scalar or series like with same length
inplace : bool, default False
Whether to perform the operation in place on the data.
Returns
-------
Same type as caller
Examples:
---------
>>> import cudf
>>> df = cudf.DataFrame({"A":[1, 4, 5], "B":[3, 5, 8]})
>>> df.where(df % 2 == 0, [-1, -1])
A B
0 -1 -1
1 4 -1
2 -1 8
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.where(ser > 2, 10)
0 4
1 3
2 10
3 10
4 10
dtype: int64
>>> ser.where(ser > 2)
0 4
1 3
2 null
3 null
4 null
dtype: int64
"""
if isinstance(self, cudf.DataFrame):
if hasattr(cond, "__cuda_array_interface__"):
cond = self.from_gpu_matrix(
cond, columns=self._data.names, index=self.index
)
elif not isinstance(cond, cudf.DataFrame):
cond = self.from_pandas(pd.DataFrame(cond))
common_cols = set(self._data.names).intersection(
set(cond._data.names)
)
if len(common_cols) > 0:
# If `self` and `cond` are having unequal index,
# then re-index `cond`.
if len(self.index) != len(cond.index) or any(
self.index != cond.index
):
cond = cond.reindex(self.index)
else:
if cond.shape != self.shape:
raise ValueError(
"""Array conditional must be same shape as self"""
)
# Setting `self` column names to `cond`
# as `cond` has no column names.
cond.columns = self.columns
other = self._normalize_columns_and_scalars_type(other)
out_df = cudf.DataFrame(index=self.index)
if len(self._columns) != len(other):
raise ValueError(
"""Replacement list length or number of dataframe columns
should be equal to Number of columns of dataframe"""
)
for column_name, other_column in zip(self._data.names, other):
input_col = self._data[column_name]
if column_name in cond._data:
if is_categorical_dtype(input_col.dtype):
if np.isscalar(other_column):
try:
other_column = input_col._encode(other_column)
except ValueError:
# When other is not present in categories,
# fill with Null.
other_column = None
elif hasattr(other_column, "codes"):
other_column = other_column.codes
input_col = input_col.codes
result = libcudf.copying.copy_if_else(
input_col, other_column, cond._data[column_name]
)
if is_categorical_dtype(self._data[column_name].dtype):
result = build_categorical_column(
categories=self._data[column_name].categories,
codes=as_column(
result.base_data, dtype=result.dtype
),
mask=result.base_mask,
size=result.size,
offset=result.offset,
ordered=self._data[column_name].ordered,
)
else:
from cudf._lib.null_mask import MaskState, create_null_mask
out_mask = create_null_mask(
len(input_col), state=MaskState.ALL_NULL
)
result = input_col.set_mask(out_mask)
out_df[column_name] = self[column_name].__class__(result)
return self._mimic_inplace(out_df, inplace=inplace)
else:
if isinstance(other, cudf.DataFrame):
raise NotImplementedError(
"cannot align with a higher dimensional Frame"
)
other = self._normalize_columns_and_scalars_type(other)
cond = as_column(cond)
if len(cond) != len(self):
raise ValueError(
"""Array conditional must be same shape as self"""
)
input_col = self._data[self.name]
if is_categorical_dtype(input_col.dtype):
if np.isscalar(other):
try:
other = input_col._encode(other)
except ValueError:
# When other is not present in categories,
# fill with Null.
other = None
elif hasattr(other, "codes"):
other = other.codes
input_col = input_col.codes
result = libcudf.copying.copy_if_else(input_col, other, cond)
if is_categorical_dtype(self.dtype):
result = build_categorical_column(
categories=self._data[self.name].categories,
codes=as_column(result.base_data, dtype=result.dtype),
mask=result.base_mask,
size=result.size,
offset=result.offset,
ordered=self._data[self.name].ordered,
)
if isinstance(self, cudf.Index):
from cudf.core.index import as_index
result = as_index(result, name=self.name)
else:
result = self._copy_construct(data=result)
return self._mimic_inplace(result, inplace=inplace)
def mask(self, cond, other=None, inplace=False):
"""
Replace values where the condition is True.
Parameters
----------
cond : bool Series/DataFrame, array-like
Where cond is False, keep the original value.
Where True, replace with corresponding value from other.
Callables are not supported.
other: scalar, list of scalars, Series/DataFrame
Entries where cond is True are replaced with
corresponding value from other. Callables are not
supported. Default is None.
DataFrame expects only Scalar or array like with scalars or
dataframe with same dimension as self.
Series expects only scalar or series like with same length
inplace : bool, default False
Whether to perform the operation in place on the data.
Returns
-------
Same type as caller
Examples:
---------
>>> import cudf
>>> df = cudf.DataFrame({"A":[1, 4, 5], "B":[3, 5, 8]})
>>> df.mask(df % 2 == 0, [-1, -1])
A B
0 1 3
1 -1 5
2 5 -1
>>> ser = cudf.Series([4, 3, 2, 1, 0])
>>> ser.mask(ser > 2, 10)
0 10
1 10
2 2
3 1
4 0
dtype: int64
>>> ser.mask(ser > 2)
0 null
1 null
2 2
3 1
4 0
dtype: int64
"""
if not hasattr(cond, "__invert__"):
# We Invert `cond` below and call `where`, so
# making sure the object supports
# `~`(inversion) operator or `__invert__` method
cond = cupy.asarray(cond)
return self.where(cond=~cond, other=other, inplace=inplace)
def _partition(self, scatter_map, npartitions, keep_index=True):
output_table, output_offsets = libcudf.partitioning.partition(
self, scatter_map, npartitions, keep_index
)
# due to the split limitation mentioned
# here: https://github.com/rapidsai/cudf/issues/4607
# we need to remove first & last elements in offsets.
# TODO: Remove this after the above issue is fixed.
output_offsets = output_offsets[1:-1]
result = libcudf.copying.table_split(
output_table, output_offsets, keep_index=keep_index
)
result = [self.__class__._from_table(tbl) for tbl in result]
for frame in result:
frame._copy_categories(self, include_index=keep_index)
if npartitions:
for i in range(npartitions - len(result)):
result.append(self._empty_like(keep_index))
return result
@annotate("SCATTER_BY_MAP", color="green", domain="cudf_python")
def scatter_by_map(
self, map_index, map_size=None, keep_index=True, **kwargs
):
"""Scatter to a list of dataframes.
Uses map_index to determine the destination
of each row of the original DataFrame.
Parameters
----------
map_index : Series, str or list-like
Scatter assignment for each row
map_size : int
Length of output list. Must be >= uniques in map_index
keep_index : bool
Conserve original index values for each row
Returns
-------
A list of cudf.DataFrame objects.
"""
# map_index might be a column name or array,
# make it a Column
if isinstance(map_index, str):
map_index = self._data[map_index]
elif isinstance(map_index, cudf.Series):
map_index = map_index._column
else:
map_index = as_column(map_index)
# Convert float to integer
if map_index.dtype == np.float:
map_index = map_index.astype(np.int32)
# Convert string or categorical to integer
if isinstance(map_index, cudf.core.column.StringColumn):
map_index = map_index.as_categorical_column(
"category"
).as_numerical
warnings.warn(
"Using StringColumn for map_index in scatter_by_map. "
"Use an integer array/column for better performance."
)
elif isinstance(map_index, cudf.core.column.CategoricalColumn):
map_index = map_index.as_numerical
warnings.warn(
"Using CategoricalColumn for map_index in scatter_by_map. "
"Use an integer array/column for better performance."
)
if kwargs.get("debug", False) == 1 and map_size is not None:
unique_count = map_index.unique_count()
if map_size < unique_count:
raise ValueError(
"ERROR: map_size must be >= %d (got %d)."
% (unique_count, map_size)
)
tables = self._partition(map_index, map_size, keep_index)
return tables
def dropna(self, axis=0, how="any", subset=None, thresh=None):
"""
Drops rows (or columns) containing nulls from a Column.
Parameters
----------
axis : {0, 1}, optional
Whether to drop rows (axis=0, default) or columns (axis=1)
containing nulls.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row (or column).
any (default) drops rows (or columns) containing at least
one null value. all drops only rows (or columns) containing
*all* null values.
subset : list, optional
List of columns to consider when dropping rows (all columns
are considered by default). Alternatively, when dropping
columns, subset is a list of rows to consider.
thresh: int, optional
If specified, then drops every row (or column) containing
less than `thresh` non-null values
Returns
-------
Copy of the DataFrame with rows/columns containing nulls dropped.
"""
if axis == 0:
return self._drop_na_rows(how=how, subset=subset, thresh=thresh)
else:
return self._drop_na_columns(how=how, subset=subset, thresh=thresh)
def _drop_na_rows(self, how="any", subset=None, thresh=None):
"""
Drops null rows from `self`.
how : {"any", "all"}, optional
Specifies how to decide whether to drop a row.
any (default) drops rows containing at least
one null value. all drops only rows containing
*all* null values.
subset : list, optional
List of columns to consider when dropping rows.
thresh: int, optional
If specified, then drops every row containing
less than `thresh` non-null values.
"""
if subset is None:
subset = self._column_names
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self._data.names
):
subset = (subset,)
diff = set(subset) - set(self._data)
if len(diff) != 0:
raise KeyError("columns {!r} do not exist".format(diff))
subset_cols = [
name for name, col in self._data.items() if name in subset
]
if len(subset_cols) == 0:
return self.copy(deep=True)
result = self.__class__._from_table(
libcudf.stream_compaction.drop_nulls(
self, how=how, keys=subset, thresh=thresh
)
)
result._copy_categories(self)
return result
def _drop_na_columns(self, how="any", subset=None, thresh=None):
"""
Drop columns containing nulls
"""
out_cols = []
if subset is None:
df = self
else:
df = self.take(subset)
if thresh is None:
if how == "all":
thresh = 1
else:
thresh = len(df)
for col in self._data.names:
if (len(df[col]) - df[col].null_count) < thresh:
continue
out_cols.append(col)
return self[out_cols]
def _apply_boolean_mask(self, boolean_mask):
"""
Applies boolean mask to each row of `self`,
rows corresponding to `False` is dropped
"""
boolean_mask = as_column(boolean_mask)
if boolean_mask.has_nulls:
raise ValueError(
"cannot mask with boolean_mask containing null values"
)
result = self.__class__._from_table(
libcudf.stream_compaction.apply_boolean_mask(
self, as_column(boolean_mask)
)
)
result._copy_categories(self)
return result
def _quantiles(
self,
q,
interpolation="LINEAR",
is_sorted=False,
column_order=(),
null_precedence=(),
):
interpolation = libcudf.types.Interpolation[interpolation]
is_sorted = libcudf.types.Sorted["YES" if is_sorted else "NO"]
column_order = [libcudf.types.Order[key] for key in column_order]
null_precedence = [
libcudf.types.NullOrder[key] for key in null_precedence
]
result = self.__class__._from_table(
libcudf.quantiles.quantiles(
self,
q,
interpolation,
is_sorted,
column_order,
null_precedence,
)
)
result._copy_categories(self)
return result
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value
(i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups.
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending.
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
"""
if method not in {"average", "min", "max", "first", "dense"}:
raise KeyError(method)
method_enum = libcudf.sort.RankMethod[method.upper()]
if na_option not in {"keep", "top", "bottom"}:
raise KeyError(na_option)
# TODO code for selecting numeric columns
source = self
if numeric_only:
warnings.warn("numeric_only=True is not implemented yet")
out_rank_table = libcudf.sort.rank_columns(
source, method_enum, na_option, ascending, pct
)
return self._from_table(out_rank_table).astype(np.float64)
def repeat(self, repeats, axis=None):
"""Repeats elements consecutively
Parameters
----------
repeats : int, array, numpy array, or Column
the number of times to repeat each element
Example
-------
>>> import cudf as cudf
>>> s = cudf.Series([0, 2]) # or DataFrame
>>> s
0 0
1 2
dtype: int64
>>> s.repeat([3, 4])
0 0
0 0
0 0
1 2
1 2
1 2
1 2
dtype: int64
>>> s.repeat(2)
0 0
0 0
1 2
1 2
dtype: int64
>>>
"""
if axis is not None:
raise NotImplementedError(
"Only axis=`None` supported at this time."
)
return self._repeat(repeats)
def _repeat(self, count):
if is_scalar(count):
count = as_scalar(count)
else:
count = as_column(count)
result = self.__class__._from_table(
libcudf.filling.repeat(self, count)
)
result._copy_categories(self)
return result
def _fill(self, fill_values, begin, end, inplace):
col_and_fill = zip(self._columns, fill_values)
if not inplace:
data_columns = (c._fill(v, begin, end) for (c, v) in col_and_fill)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
for (c, v) in col_and_fill:
c.fill(v, begin, end, inplace=True)
return self
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
"""Shift values by `periods` positions.
"""
assert axis in (None, 0) and freq is None
return self._shift(periods)
def _shift(self, offset, fill_value=None):
data_columns = (col.shift(offset, fill_value) for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def __array__(self, dtype=None):
raise TypeError(
"Implicit conversion to a host NumPy array via __array__ is not allowed, \
To explicitly construct a GPU array, consider using \
cupy.asarray(...)\nTo explicitly construct a \
host array, consider using .to_array()"
)
def drop_duplicates(
self,
subset=None,
keep="first",
nulls_are_equal=True,
ignore_index=False,
):
"""
Drops rows in frame as per duplicate rows in `subset` columns from
self.
subset : list, optional
List of columns to consider when dropping rows.
keep : ["first", "last", False] first will keep first of duplicate,
last will keep last of the duplicate and False drop all
duplicate
nulls_are_equal: null elements are considered equal to other null
elements
ignore_index: bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
"""
if subset is None:
subset = self._column_names
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self._data.names
):
subset = (subset,)
diff = set(subset) - set(self._data)
if len(diff) != 0:
raise KeyError("columns {!r} do not exist".format(diff))
subset_cols = [name for name in self._column_names if name in subset]
if len(subset_cols) == 0:
return self.copy(deep=True)
result = self._from_table(
libcudf.stream_compaction.drop_duplicates(
self,
keys=subset,
keep=keep,
nulls_are_equal=nulls_are_equal,
ignore_index=ignore_index,
)
)
result._copy_categories(self)
return result
def replace(self, to_replace, replacement):
copy_data = self._data.copy()
for name, col in copy_data.items():
if not (to_replace is None and replacement is None):
try:
(
col_all_nan,
col_replacement,
col_to_replace,
) = _get_replacement_values(
to_replace=to_replace,
replacement=replacement,
col_name=name,
column=col,
)
copy_data[name] = col.find_and_replace(
col_to_replace, col_replacement, col_all_nan
)
except KeyError:
# Do not change the copy_data[name]
pass
result = self._from_table(Frame(copy_data, self.index))
return result
def _copy_categories(self, other, include_index=True):
"""
Utility that copies category information from `other`
to `self`.
"""
for name, col, other_col in zip(
self._data.keys(), self._data.values(), other._data.values()
):
if isinstance(
other_col, cudf.core.column.CategoricalColumn
) and not isinstance(col, cudf.core.column.CategoricalColumn):
self._data[name] = build_categorical_column(
categories=other_col.categories,
codes=as_column(col.base_data, dtype=col.dtype),
mask=col.base_mask,
ordered=other_col.ordered,
size=col.size,
offset=col.offset,
)
if include_index:
# include_index will still behave as False
# incase of self._index being a RangeIndex
if (
self._index is not None
and not isinstance(self._index, cudf.core.index.RangeIndex)
and isinstance(
other._index,
(cudf.core.index.CategoricalIndex, cudf.MultiIndex),
)
):
self._index._copy_categories(other._index, include_index=False)
# When other._index is a CategoricalIndex, there is
# possibility that corresposing self._index be GenericIndex
# with codes. So to update even the class signature, we
# have to call as_index.
if isinstance(
other._index, cudf.core.index.CategoricalIndex
) and not isinstance(
self._index, cudf.core.index.CategoricalIndex
):
self._index = cudf.core.index.as_index(self._index)
return self
def _unaryop(self, op):
data_columns = (col.unary_operator(op) for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def isnull(self):
"""Identify missing values.
"""
data_columns = (col.isnull() for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def isna(self):
"""Identify missing values. Alias for `isnull`
"""
return self.isnull()
def notnull(self):
"""Identify non-missing values.
"""
data_columns = (col.notnull() for col in self._columns)
data = zip(self._column_names, data_columns)
return self.__class__._from_table(Frame(data, self._index))
def notna(self):
"""Identify non-missing values. Alias for `notnull`.
"""
return self.notnull()
def interleave_columns(self):
"""
Interleave Series columns of a table into a single column.
Converts the column major table `cols` into a row major column.
Parameters
----------
cols : input Table containing columns to interleave.
Example
-------
>>> df = DataFrame([['A1', 'A2', 'A3'], ['B1', 'B2', 'B3']])
>>> df
0 [A1, A2, A3]
1 [B1, B2, B3]
>>> df.interleave_columns()
0 A1
1 B1
2 A2
3 B2
4 A3
5 B3
Returns
-------
The interleaved columns as a single column
"""
if ("category" == self.dtypes).any():
raise ValueError(
"interleave_columns does not support 'category' dtype."
)
result = self._constructor_sliced(
libcudf.reshape.interleave_columns(self)
)
return result
def tile(self, count):
"""
Repeats the rows from `self` DataFrame `count` times to form a
new DataFrame.
Parameters
----------
self : input Table containing columns to interleave.
count : Number of times to tile "rows". Must be non-negative.
Example
-------
>>> df = Dataframe([[8, 4, 7], [5, 2, 3]])
>>> count = 2
>>> df.tile(df, count)
0 1 2
0 8 4 7
1 5 2 3
0 8 4 7
1 5 2 3
Returns
-------
The table containing the tiled "rows".
"""
result = self.__class__._from_table(libcudf.reshape.tile(self, count))
result._copy_categories(self)
return result
def searchsorted(
self, values, side="left", ascending=True, na_position="last"
):
"""Find indices where elements should be inserted to maintain order
Parameters
----------
value : Frame (Shape must be consistent with self)
Values to be hypothetically inserted into Self
side : str {‘left’, ‘right’} optional, default ‘left‘
If ‘left’, the index of the first suitable location found is given
If ‘right’, return the last such index
ascending : bool optional, default True
Sorted Frame is in ascending order (otherwise descending)
na_position : str {‘last’, ‘first’} optional, default ‘last‘
Position of null values in sorted order
Returns
-------
1-D cupy array of insertion points
"""
# Call libcudf++ search_sorted primitive
from cudf.utils.dtypes import is_scalar
scalar_flag = None
if is_scalar(values):
scalar_flag = True
if not isinstance(values, Frame):
values = as_column(values)
if values.dtype != self.dtype:
self = self.astype(values.dtype)
values = values.as_frame()
outcol = libcudf.search.search_sorted(
self, values, side, ascending=ascending, na_position=na_position
)
# Retrun result as cupy array if the values is non-scalar
# If values is scalar, result is expected to be scalar.
result = cupy.asarray(outcol.data_array_view)
if scalar_flag:
return result[0].item()
else:
return result
def _get_sorted_inds(self, ascending=True, na_position="last"):
"""
Sort by the values.
Parameters
----------
ascending : bool or list of bool, default True
If True, sort values in ascending order, otherwise descending.
na_position : {‘first’ or ‘last’}, default ‘last’
Argument ‘first’ puts NaNs at the beginning, ‘last’ puts NaNs
at the end.
Returns
-------
out_column_inds : cuDF Column of indices sorted based on input
Difference from pandas:
* Support axis='index' only.
* Not supporting: inplace, kind
* Ascending can be a list of bools to control per column
"""
# This needs to be updated to handle list of bools for ascending
if ascending is True:
if na_position == "last":
na_position = 0
elif na_position == "first":
na_position = 1
elif ascending is False:
if na_position == "last":
na_position = 1
elif na_position == "first":
na_position = 0
else:
warnings.warn(
"When using a sequence of booleans for `ascending`, "
"`na_position` flag is not yet supported and defaults to "
"treating nulls as greater than all numbers"
)
na_position = 0
# If given a scalar need to construct a sequence of length # of columns
if np.isscalar(ascending):
ascending = [ascending] * self._num_columns
return libcudf.sort.order_by(self, ascending, na_position)
def sin(self):
return self._unaryop("sin")
def cos(self):
return self._unaryop("cos")
def tan(self):
return self._unaryop("tan")
def asin(self):
return self._unaryop("asin")
def acos(self):
return self._unaryop("acos")
def atan(self):
return self._unaryop("atan")
def exp(self):
return self._unaryop("exp")
def log(self):
return self._unaryop("log")
def sqrt(self):
return self._unaryop("sqrt")
@staticmethod
def _validate_merge_cfg(
lhs,
rhs,
left_on,
right_on,
on,
how,
left_index=False,
right_index=False,
lsuffix=None,
rsuffix=None,
):
"""
Error for various combinations of merge input parameters
"""
len_left_on = len(left_on) if left_on is not None else 0
len_right_on = len(right_on) if right_on is not None else 0
# must actually support the requested merge type
if how not in ["left", "inner", "outer", "leftanti", "leftsemi"]:
raise NotImplementedError(
"{!r} merge not supported yet".format(how)
)
# Passing 'on' with 'left_on' or 'right_on' is potentially ambiguous
if on:
if left_on or right_on:
raise ValueError(
'Can only pass argument "on" OR "left_on" '
'and "right_on", not a combination of both.'
)
# Require same total number of columns to join on in both operands
if not (len_left_on + left_index * len(lhs.index.names)) == (
len_right_on + right_index * len(rhs.index.names)
):
raise ValueError(
"Merge operands must have same number of join key columns"
)
# If nothing specified, must have common cols to use implicitly
same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys())
if not (left_index or right_index):
if not (left_on or right_on):
if len(same_named_columns) == 0:
raise ValueError("No common columns to perform merge on")
for name in same_named_columns:
if not (
name in left_on
and name in right_on
and (left_on.index(name) == right_on.index(name))
):
if not (lsuffix or rsuffix):
raise ValueError(
"there are overlapping columns but "
"lsuffix and rsuffix are not defined"
)
if on:
on_keys = [on] if not isinstance(on, list) else on
for key in on_keys:
if not (key in lhs._data.keys() and key in rhs._data.keys()):
raise KeyError("Key {} not in both operands".format(on))
else:
for key in left_on:
if key not in lhs._data.keys():
raise KeyError('Key "{}" not in left operand'.format(key))
for key in right_on:
if key not in rhs._data.keys():
raise KeyError('Key "{}" not in right operand'.format(key))
def _merge(
self,
right,
on,
left_on,
right_on,
left_index,
right_index,
lsuffix,
rsuffix,
how,
method,
sort=False,
):
lhs = self
rhs = right
if left_on is None:
left_on = []
if right_on is None:
right_on = []
# Making sure that the "on" arguments are list of column names
if on:
on = [on] if isinstance(on, str) else list(on)
if left_on:
left_on = [left_on] if isinstance(left_on, str) else list(left_on)
if right_on:
right_on = (
[right_on] if isinstance(right_on, str) else list(right_on)
)
self._validate_merge_cfg(
self,
right,
left_on,
right_on,
on,
how,
left_index=left_index,
right_index=right_index,
lsuffix=lsuffix,
rsuffix=rsuffix,
)
if on:
left_on = right_on = on
same_named_columns = set(lhs._data.keys()) & set(rhs._data.keys())
if not (left_on or right_on) and not (left_index and right_index):
left_on = right_on = list(same_named_columns)
no_suffix_cols = []
for name in same_named_columns:
if left_on is not None and right_on is not None:
if name in left_on and name in right_on:
if left_on.index(name) == right_on.index(name):
no_suffix_cols.append(name)
for name in same_named_columns:
if name not in no_suffix_cols:
lhs.rename({name: "%s%s" % (name, lsuffix)}, inplace=True)
rhs.rename({name: "%s%s" % (name, rsuffix)}, inplace=True)
if name in left_on:
left_on[left_on.index(name)] = "%s%s" % (name, lsuffix)
if name in right_on:
right_on[right_on.index(name)] = "%s%s" % (name, rsuffix)
categorical_dtypes = {}
for name, col in itertools.chain(lhs._data.items(), rhs._data.items()):
if is_categorical_dtype(col):
categorical_dtypes[name] = col.dtype
# Save the order of the original column names for preservation later
org_names = list(itertools.chain(lhs._data.keys(), rhs._data.keys()))
# If neither left_index or right_index specified, that data won't
# be carried through the join. We'll get a new RangeIndex afterwards
lhs_full_view = False
rhs_full_view = False
if left_index:
lhs_full_view = True
if right_index:
rhs_full_view = True
# potentially do an implicit typecast
(lhs, rhs, to_categorical) = self._typecast_before_merge(
lhs, rhs, left_on, right_on, left_index, right_index, how
)
gdf_result = libcudf.join.join(
lhs,
rhs,
left_on,
right_on,
how,
method,
left_index=lhs_full_view,
right_index=rhs_full_view,
)
gdf_data = list(gdf_result._data.items())
result = []
cat_codes = []
for org_name in org_names:
for i in range(len(gdf_data)):
if gdf_data[i][0] == org_name:
result.append(gdf_data.pop(i))
break
for cat_name in to_categorical:
for i in range(len(gdf_data)):
if gdf_data[i][0] == cat_name + "_codes":
cat_codes.append(gdf_data.pop(i))
assert len(gdf_data) == 0
cat_codes = dict(cat_codes)
# Build a new data frame based on the merged columns from GDF
to_frame_data = cudf.core.column_accessor.ColumnAccessor()
for name, col in result:
if is_string_dtype(col):
to_frame_data[name] = col
elif is_categorical_dtype(categorical_dtypes.get(name, col.dtype)):
dtype = categorical_dtypes.get(name, col.dtype)
to_frame_data[name] = column.build_categorical_column(
categories=dtype.categories,
codes=cat_codes.get(str(name) + "_codes", col),
mask=col.base_mask,
size=col.size,
offset=col.offset,
ordered=dtype.ordered,
)
else:
to_frame_data[name] = column.build_column(
col.base_data,
dtype=categorical_dtypes.get(name, col.dtype),
mask=col.base_mask,
offset=col.offset,
size=col.size,
)
gdf_result._data = to_frame_data
to_return = self.__class__._from_table(gdf_result)
# If sort=True, Pandas would sort on the key columns in the
# same order as given in 'on'. If the indices are used as
# keys, the index will be sorted. If one index is specified,
# the key column on the other side will be used to sort.
# If no index is specified, return a new RangeIndex
if sort:
to_sort = self.__class__()
if left_index and right_index:
by = list(to_return._index._data.columns)
if left_on and right_on:
by += list(to_return[left_on]._data.columns)
elif left_index:
by = list(to_return[right_on]._data.columns)
elif right_index:
by = list(to_return[left_on]._data.columns)
else:
# left_on == right_on, or different names but same columns
# in both cases we can sort by either
by = list(to_return[left_on]._data.columns)
for i, col in enumerate(by):
to_sort[i] = col
inds = to_sort.argsort()
to_return = to_return.take(
inds, keep_index=(left_index or right_index)
)
return to_return
else:
return to_return
def _typecast_before_merge(
self, lhs, rhs, left_on, right_on, left_index, right_index, how
):
def casting_rules(lhs, rhs, dtype_l, dtype_r, how):
cast_warn = "can't safely cast column {} from {} with type \
{} to {}, upcasting to {}"
ctgry_err = "can't implicitly cast column {0} to categories \
from {1} during {1} join"
rtn = None
if pd.api.types.is_dtype_equal(dtype_l, dtype_r):
rtn = dtype_l
elif is_categorical_dtype(dtype_l) and is_categorical_dtype(
dtype_r
):
raise TypeError("Left and right categories must be the same.")
elif how == "left":
check_col = rhs._data[rcol].fillna(0)
if not check_col.can_cast_safely(dtype_l):
rtn = casting_rules(lhs, rhs, dtype_l, dtype_r, "inner")
warnings.warn(
cast_warn.format(rcol, "right", dtype_r, dtype_l, rtn)
)
else:
rtn = dtype_l
elif how == "right":
check_col = lhs._data[lcol].fillna(0)
if not check_col.can_cast_safely(dtype_r):
rtn = casting_rules(lhs, rhs, dtype_l, dtype_r, "inner")
warnings.warn(
cast_warn.format(lcol, "left", dtype_l, dtype_r, rtn)
)
else:
rtn = dtype_r
elif is_categorical_dtype(dtype_l):
if how == "right":
raise ValueError(ctgry_err.format(rcol, "right"))
rtn = lhs[lcol].cat.categories.dtype
to_categorical.append(lcol)
lhs[lcol + "_codes"] = lhs[lcol].cat.codes
elif is_categorical_dtype(dtype_r):
if how == "left":
raise ValueError(ctgry_err.format(lcol, "left"))
rtn = rhs[rcol].cat.categories.dtype
to_categorical.append(rcol)
rhs[rcol + "_codes"] = rhs[rcol].cat.codes
elif how in ["inner", "outer"]:
if (np.issubdtype(dtype_l, np.number)) and (
np.issubdtype(dtype_r, np.number)
):
if dtype_l.kind == dtype_r.kind:
# both ints or both floats
rtn = max(dtype_l, dtype_r)
else:
rtn = np.find_common_type([], [dtype_l, dtype_r])
elif is_datetime_dtype(dtype_l) and is_datetime_dtype(dtype_r):
rtn = max(dtype_l, dtype_r)
return rtn
if left_index or right_index:
if isinstance(
lhs.index, cudf.core.multiindex.MultiIndex
) or isinstance(rhs.index, cudf.core.multiindex.MultiIndex):
if left_index and right_index:
compare_cols_l = lhs._index._data.columns
compare_cols_r = rhs._index._data.columns
elif left_index:
compare_cols_l = lhs._index._data.columns
compare_cols_r = rhs[right_on]._data.columns
elif right_index:
compare_cols_l = lhs[left_on]._data.columns
compare_cols_r = rhs._index._data.columns
for left, right in compare_cols_l, compare_cols_r:
if not pd.api.types.is_dtype_equal(
left.dtype, right.dtype
):
raise NotImplementedError(
"Typecasting not yet supported for MultiIndicies"
)
return lhs, rhs, []
if left_index and right_index:
to_dtype = casting_rules(
lhs.index, rhs.index, lhs.index.dtype, rhs.index.dtype, how
)
elif left_index:
to_dtype = lhs.index.dtype
elif right_index:
to_dtype = rhs.index.dtype
lhs.index = lhs.index.astype(to_dtype)
rhs.index = rhs.index.astype(to_dtype)
return lhs, rhs, []
left_on = sorted(left_on)
right_on = sorted(right_on)
to_categorical = []
for lcol, rcol in zip(left_on, right_on):
if (lcol not in lhs._data) or (rcol not in rhs._data):
# probably wrong columns specified, let libcudf error
continue
dtype_l = lhs._data[lcol].dtype
dtype_r = rhs._data[rcol].dtype
if pd.api.types.is_dtype_equal(dtype_l, dtype_r):
continue
to_dtype = casting_rules(lhs, rhs, dtype_l, dtype_r, how)
if to_dtype is not None:
lhs[lcol] = lhs[lcol].astype(to_dtype)
rhs[rcol] = rhs[rcol].astype(to_dtype)
return lhs, rhs, to_categorical
def _is_sorted(self, ascending=None, null_position=None):
"""
Returns a boolean indicating whether the data of the Frame are sorted
based on the parameters given. Does not account for the index.
Parameters
----------
self : Frame
Frame whose columns are to be checked for sort order
ascending : None or list-like of booleans
None or list-like of boolean values indicating expected sort order
of each column. If list-like, size of list-like must be
len(columns). If None, all columns expected sort order is set to
ascending. False (0) - ascending, True (1) - descending.
null_position : None or list-like of booleans
None or list-like of boolean values indicating desired order of
nulls compared to other elements. If list-like, size of list-like
must be len(columns). If None, null order is set to before. False
(0) - before, True (1) - after.
Returns
-------
returns : boolean
Returns True, if sorted as expected by ``ascending`` and
``null_position``, False otherwise.
"""
return libcudf.sort.is_sorted(
self, ascending=ascending, null_position=null_position
)
def _get_replacement_values(to_replace, replacement, col_name, column):
from cudf.utils import utils
from pandas.api.types import is_dict_like
all_nan = False
if is_dict_like(to_replace) and replacement is None:
replacement = list(to_replace.values())
to_replace = list(to_replace.keys())
elif not is_scalar(to_replace):
if is_scalar(replacement):
all_nan = replacement is None
if all_nan:
replacement = [replacement] * len(to_replace)
# Do not broadcast numeric dtypes
elif pd.api.types.is_numeric_dtype(column.dtype):
if len(to_replace) > 0:
replacement = [replacement]
else:
# If to_replace is empty, replacement has to be empty.
replacement = []
else:
replacement = utils.scalar_broadcast_to(
replacement,
(len(to_replace),),
np.dtype(type(replacement)),
)
else:
# If both are non-scalar
if len(to_replace) != len(replacement):
raise ValueError(
"Replacement lists must be "
"of same length."
"Expected {}, got {}.".format(
len(to_replace), len(replacement)
)
)
else:
if not is_scalar(replacement):
raise TypeError(
"Incompatible types '{}' and '{}' "
"for *to_replace* and *replacement*.".format(
type(to_replace).__name__, type(replacement).__name__
)
)
to_replace = [to_replace]
replacement = [replacement]
if is_dict_like(to_replace) and is_dict_like(replacement):
replacement = replacement[col_name]
to_replace = to_replace[col_name]
if is_scalar(replacement):
replacement = [replacement]
if is_scalar(to_replace):
to_replace = [to_replace]
if isinstance(replacement, list):
all_nan = replacement.count(None) == len(replacement)
return all_nan, replacement, to_replace
| 36.071737 | 86 | 0.541756 |
f4625202e574999d45e00007c89f654cfefc40cb | 11,517 | py | Python | flare/ase/otf.py | aaronchen0316/flare | 47a2a89af635dfec6b41a873625ac2411da14ebb | [
"MIT"
] | null | null | null | flare/ase/otf.py | aaronchen0316/flare | 47a2a89af635dfec6b41a873625ac2411da14ebb | [
"MIT"
] | null | null | null | flare/ase/otf.py | aaronchen0316/flare | 47a2a89af635dfec6b41a873625ac2411da14ebb | [
"MIT"
] | null | null | null | """
:class:`ASE_OTF` is the on-the-fly training module for ASE, WITHOUT molecular dynamics engine.
It needs to be used adjointly with ASE MD engine.
"""
import os
import sys
import inspect
import pickle
from time import time
from copy import deepcopy
import logging
import numpy as np
from flare.ase.npt import NPT_mod
from ase.md.nvtberendsen import NVTBerendsen
from ase.md.nptberendsen import NPTBerendsen
from ase.md.verlet import VelocityVerlet
from ase.md.langevin import Langevin
from flare.ase.nosehoover import NoseHoover
from ase import units
from ase.io import read, write
import flare
from flare.otf import OTF
from flare.utils.learner import is_std_in_bound
from flare.ase.atoms import FLARE_Atoms
from flare.ase.calculator import FLARE_Calculator
import flare.ase.dft as dft_source
class ASE_OTF(OTF):
"""
On-the-fly training module using ASE MD engine, a subclass of OTF.
Args:
atoms (ASE Atoms): the ASE Atoms object for the on-the-fly MD run,
with calculator set as FLARE_Calculator.
timestep: the timestep in MD. Please use ASE units, e.g. if the
timestep is 1 fs, then set `timestep = 1 * units.fs`
number_of_steps (int): the total number of steps for MD.
dft_calc (ASE Calculator): any ASE calculator is supported,
e.g. Espresso, VASP etc.
md_engine (str): the name of MD thermostat, only `VelocityVerlet`,
`NVTBerendsen`, `NPTBerendsen`, `NPT` and `Langevin`, `NoseHoover`
are supported.
md_kwargs (dict): Specify the args for MD as a dictionary, the args are
as required by the ASE MD modules consistent with the `md_engine`.
trajectory (ASE Trajectory): default `None`, not recommended,
currently in experiment.
The following arguments are for on-the-fly training, the user can also
refer to :class:`flare.otf.OTF`
Args:
prev_pos_init ([type], optional): Previous positions. Defaults
to None.
rescale_steps (List[int], optional): List of frames for which the
velocities of the atoms are rescaled. Defaults to [].
rescale_temps (List[int], optional): List of rescaled temperatures.
Defaults to [].
calculate_energy (bool, optional): If True, the energy of each
frame is calculated with the GP. Defaults to False.
write_model (int, optional): If 0, write never. If 1, write at
end of run. If 2, write after each training and end of run.
If 3, write after each time atoms are added and end of run.
std_tolerance_factor (float, optional): Threshold that determines
when DFT is called. Specifies a multiple of the current noise
hyperparameter. If the epistemic uncertainty on a force
component exceeds this value, DFT is called. Defaults to 1.
skip (int, optional): Number of frames that are skipped when
dumping to the output file. Defaults to 0.
init_atoms (List[int], optional): List of atoms from the input
structure whose local environments and force components are
used to train the initial GP model. If None is specified, all
atoms are used to train the initial GP. Defaults to None.
output_name (str, optional): Name of the output file. Defaults to
'otf_run'.
max_atoms_added (int, optional): Number of atoms added each time
DFT is called. Defaults to 1.
freeze_hyps (int, optional): Specifies the number of times the
hyperparameters of the GP are optimized. After this many
updates to the GP, the hyperparameters are frozen.
Defaults to 10.
n_cpus (int, optional): Number of cpus used during training.
Defaults to 1.
"""
def __init__(
self,
atoms,
timestep,
number_of_steps,
dft_calc,
md_engine,
md_kwargs,
trajectory=None,
**otf_kwargs
):
self.atoms = FLARE_Atoms.from_ase_atoms(atoms)
self.timestep = timestep
self.md_engine = md_engine
self.md_kwargs = md_kwargs
if md_engine == "VelocityVerlet":
MD = VelocityVerlet
elif md_engine == "NVTBerendsen":
MD = NVTBerendsen
elif md_engine == "NPTBerendsen":
MD = NPTBerendsen
elif md_engine == "NPT":
MD = NPT_mod
elif md_engine == "Langevin":
MD = Langevin
elif md_engine == "NoseHoover":
MD = NoseHoover
else:
raise NotImplementedError(md_engine + " is not implemented in ASE")
self.md = MD(
atoms=self.atoms, timestep=timestep, trajectory=trajectory, **md_kwargs
)
force_source = dft_source
self.flare_calc = self.atoms.calc
# Convert ASE timestep to ps for the output file.
flare_dt = timestep / (units.fs * 1e3)
super().__init__(
dt=flare_dt,
number_of_steps=number_of_steps,
gp=self.flare_calc.gp_model,
force_source=force_source,
dft_loc=dft_calc,
dft_input=self.atoms,
**otf_kwargs
)
self.flare_name = self.output_name + "_flare.json"
self.dft_name = self.output_name + "_dft.pickle"
self.atoms_name = self.output_name + "_atoms.json"
def get_structure_from_input(self, prev_pos_init):
self.structure = self.atoms
if prev_pos_init is None:
self.atoms.prev_positions = np.copy(self.atoms.positions)
else:
assert len(self.atoms.positions) == len(
self.atoms.prev_positions
), "Previous positions and positions are not same length"
self.atoms.prev_positions = prev_pos_init
def initialize_train(self):
super().initialize_train()
# TODO: Turn this into a "reset" method.
if not isinstance(self.atoms.calc, FLARE_Calculator):
self.flare_calc.reset()
self.atoms.calc = self.flare_calc
if self.md_engine == "NPT":
if not self.md.initialized:
self.md.initialize()
else:
if self.md.have_the_atoms_been_changed():
raise NotImplementedError(
"You have modified the atoms since the last timestep."
)
def compute_properties(self):
"""
Compute energies, forces, stresses, and their uncertainties with
the FLARE ASE calcuator, and write the results to the
OTF structure object.
"""
# Change to FLARE calculator if necessary.
if not isinstance(self.atoms.calc, FLARE_Calculator):
self.flare_calc.reset()
self.atoms.calc = self.flare_calc
if not self.flare_calc.results:
self.atoms.calc.calculate(self.atoms)
def md_step(self):
"""
Get new position in molecular dynamics based on the forces predicted by
FLARE_Calculator or DFT calculator
"""
# Update previous positions.
self.structure.prev_positions = np.copy(self.structure.positions)
# Reset FLARE calculator.
if self.dft_step:
self.flare_calc.reset()
self.atoms.calc = self.flare_calc
# Take MD step.
self.md.step()
def write_gp(self):
self.flare_calc.write_model(self.flare_name)
def write_gp(self):
self.flare_calc.write_model(self.flare_name)
def rescale_temperature(self, new_pos):
# call OTF method
super().rescale_temperature(new_pos)
# update ASE atoms
if self.curr_step in self.rescale_steps:
rescale_ind = self.rescale_steps.index(self.curr_step)
temp_fac = self.rescale_temps[rescale_ind] / self.temperature
vel_fac = np.sqrt(temp_fac)
curr_velocities = self.atoms.get_velocities()
self.atoms.set_velocities(curr_velocities * vel_fac)
def update_temperature(self):
self.KE = self.atoms.get_kinetic_energy()
self.temperature = self.atoms.get_temperature()
# Convert velocities to Angstrom / ps.
self.velocities = self.atoms.get_velocities() * units.fs * 1e3
def update_gp(self, train_atoms, dft_frcs, dft_energy=None, dft_stress=None):
self.output.add_atom_info(train_atoms, self.structure.stds)
# Convert ASE stress (xx, yy, zz, yz, xz, xy) to FLARE stress
# (xx, xy, xz, yy, yz, zz).
flare_stress = None
if dft_stress is not None:
flare_stress = -np.array(
[
dft_stress[0],
dft_stress[5],
dft_stress[4],
dft_stress[1],
dft_stress[3],
dft_stress[2],
]
)
if self.force_only:
dft_energy = None
flare_stress = None
# update gp model
self.gp.update_db(
self.structure,
dft_frcs,
custom_range=train_atoms,
energy=dft_energy,
stress=flare_stress,
)
self.gp.set_L_alpha()
# train model
if (self.dft_count - 1) < self.freeze_hyps:
self.train_gp()
# update mgp model
if self.flare_calc.use_mapping:
self.flare_calc.mgp_model.build_map(self.flare_calc.gp_model)
# write model
if (self.dft_count - 1) < self.freeze_hyps:
if self.write_model == 2:
self.write_gp()
if self.write_model == 3:
self.write_gp()
def as_dict(self):
# DFT module and Trajectory will cause issue in deepcopy
self.dft_module = self.dft_module.__name__
md = self.md
self.md = None
dct = deepcopy(dict(vars(self)))
self.dft_module = eval(self.dft_module)
self.md = md
# write atoms and flare calculator to separate files
write(self.atoms_name, self.atoms)
dct["atoms"] = self.atoms_name
self.flare_calc.write_model(self.flare_name)
dct["flare_calc"] = self.flare_name
# dump dft calculator as pickle
with open(self.dft_name, "wb") as f:
pickle.dump(self.dft_loc, f) # dft_loc is the dft calculator
dct["dft_loc"] = self.dft_name
dct["gp"] = self.gp_name
for key in ["output", "pred_func", "structure", "dft_input", "md"]:
dct.pop(key)
return dct
@staticmethod
def from_dict(dct):
flare_calc = FLARE_Calculator.from_file(dct["flare_calc"])
# https://github.com/mir-group/flare/commit/88efe67e28f2b6a9cb5c6662675aad0209af134d
flare_calc.reset()
dct["atoms"] = read(dct["atoms"])
dct["atoms"].calc = flare_calc
dct.pop("gp")
with open(dct["dft_loc"], "rb") as f:
dct["dft_calc"] = pickle.load(f)
for key in ["dt", "dft_loc"]:
dct.pop(key)
new_otf = ASE_OTF(**dct)
new_otf.dft_count = dct["dft_count"]
new_otf.curr_step = dct["curr_step"]
new_otf.std_tolerance = dct["std_tolerance"]
if new_otf.md_engine == "NPT":
if not new_otf.md.initialized:
new_otf.md.initialize()
return new_otf
| 34.379104 | 95 | 0.611704 |
1419da2489479c359a497eed348b78f2dc48f53e | 2,392 | py | Python | ssseg/cfgs/gcnet/cfgs_cityscapes_resnet101os16.py | skydengyao/sssegmentation | 606b05983fa967bb3c98d1120f44dfc516532dad | [
"MIT"
] | 1 | 2021-05-28T06:42:37.000Z | 2021-05-28T06:42:37.000Z | ssseg/cfgs/gcnet/cfgs_cityscapes_resnet101os16.py | skydengyao/sssegmentation | 606b05983fa967bb3c98d1120f44dfc516532dad | [
"MIT"
] | null | null | null | ssseg/cfgs/gcnet/cfgs_cityscapes_resnet101os16.py | skydengyao/sssegmentation | 606b05983fa967bb3c98d1120f44dfc516532dad | [
"MIT"
] | null | null | null | '''define the config file for cityscapes and resnet101os16'''
from .base_cfg import *
# modify dataset config
DATASET_CFG = DATASET_CFG.copy()
DATASET_CFG['train'].update(
{
'type': 'cityscapes',
'rootdir': 'data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': (0.5, 2.0)}),
('RandomCrop', {'crop_size': (512, 1024), 'one_category_max_ratio': 0.75}),
('RandomFlip', {'flip_prob': 0.5}),
('PhotoMetricDistortion', {}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),
('Padding', {'output_size': (512, 1024), 'data_type': 'tensor'}),]
}
)
DATASET_CFG['test'].update(
{
'type': 'cityscapes',
'rootdir': 'data/CityScapes',
'aug_opts': [('Resize', {'output_size': (2048, 1024), 'keep_ratio': True, 'scale_range': None}),
('Normalize', {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375]}),
('ToTensor', {}),],
}
)
# modify dataloader config
DATALOADER_CFG = DATALOADER_CFG.copy()
DATALOADER_CFG['train'].update(
{
'batch_size': 8,
}
)
# modify optimizer config
OPTIMIZER_CFG = OPTIMIZER_CFG.copy()
OPTIMIZER_CFG.update(
{
'max_epochs': 220
}
)
# modify losses config
LOSSES_CFG = LOSSES_CFG.copy()
# modify model config
MODEL_CFG = MODEL_CFG.copy()
MODEL_CFG.update(
{
'num_classes': 19,
'backbone': {
'type': 'resnet101',
'series': 'resnet',
'pretrained': True,
'outstride': 16,
'use_stem': True,
'selected_indices': (2, 3),
},
}
)
# modify inference config
INFERENCE_CFG = INFERENCE_CFG.copy()
# modify common config
COMMON_CFG = COMMON_CFG.copy()
COMMON_CFG['train'].update(
{
'backupdir': 'gcnet_resnet101os16_cityscapes_train',
'logfilepath': 'gcnet_resnet101os16_cityscapes_train/train.log',
}
)
COMMON_CFG['test'].update(
{
'backupdir': 'gcnet_resnet101os16_cityscapes_test',
'logfilepath': 'gcnet_resnet101os16_cityscapes_test/test.log',
'resultsavepath': 'gcnet_resnet101os16_cityscapes_test/gcnet_resnet101os16_cityscapes_results.pkl'
}
) | 31.473684 | 110 | 0.576923 |
20515355e9215f1fa5a57d854f00a7e328dee7ce | 3,285 | py | Python | services/web/server/src/simcore_service_webserver/projects/module_setup.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 25 | 2018-04-13T12:44:12.000Z | 2022-03-12T15:01:17.000Z | services/web/server/src/simcore_service_webserver/projects/module_setup.py | colinRawlings/osparc-simcore | bf2f18d5bc1e574d5f4c238d08ad15156184c310 | [
"MIT"
] | 2,553 | 2018-01-18T17:11:55.000Z | 2022-03-31T16:26:40.000Z | services/web/server/src/simcore_service_webserver/projects/module_setup.py | odeimaiz/osparc-simcore | 71c2fc58dcfe067487dcd75cb70298a4d6237e97 | [
"MIT"
] | 20 | 2018-01-18T19:45:33.000Z | 2022-03-29T07:08:47.000Z | """ projects management subsystem
A project is a document defining a osparc study
It contains metadata about the study (e.g. name, description, owner, etc) and a workbench section that describes the study pipeline
"""
import json
import logging
from pprint import pformat
from aiohttp import web
from servicelib.aiohttp.application_keys import APP_JSONSCHEMA_SPECS_KEY
from servicelib.aiohttp.application_setup import ModuleCategory, app_module_setup
from servicelib.aiohttp.rest_routing import (
get_handlers_from_namespace,
iter_path_operations,
map_handlers_with_operations,
)
from ..constants import APP_OPENAPI_SPECS_KEY
from ..resources import resources
from . import projects_handlers, projects_nodes_handlers, projects_tags_handlers
from .config import CONFIG_SECTION_NAME, assert_valid_config
from .projects_access import setup_projects_access
from .projects_db import setup_projects_db
logger = logging.getLogger(__name__)
def _create_routes(tag, specs, *handlers_module, disable_login: bool = False):
"""
:param disable_login: Disables login_required decorator for testing purposes defaults to False
:type disable_login: bool, optional
"""
# TODO: Remove 'disable_login' and use instead a mock.patch on the decorator!
handlers = {}
for mod in handlers_module:
handlers.update(get_handlers_from_namespace(mod))
if disable_login:
handlers = {name: hnds.__wrapped__ for name, hnds in handlers.items()}
routes = map_handlers_with_operations(
handlers,
filter(
lambda o: tag in o[3] and "snapshot" not in o[2],
iter_path_operations(specs),
),
strict=True,
)
if disable_login:
logger.debug("%s:\n%s", CONFIG_SECTION_NAME, pformat(routes))
return routes
@app_module_setup(
"simcore_service_webserver.projects",
ModuleCategory.ADDON,
depends=[f"simcore_service_webserver.{mod}" for mod in ("rest", "db")],
logger=logger,
)
def setup_projects(app: web.Application) -> bool:
# ----------------------------------------------
# TODO: temporary, just to check compatibility between
# trafaret and pydantic schemas
assert_valid_config(app)
# ---------------------------------------------
# API routes
specs = app[APP_OPENAPI_SPECS_KEY]
# security access : Inject permissions to rest API resources
setup_projects_access(app)
# database API
setup_projects_db(app)
app.router.add_routes(
_create_routes(
"project",
specs,
projects_handlers,
projects_nodes_handlers,
projects_tags_handlers,
)
)
# FIXME: this uses some unimplemented handlers, do we really need to keep this in?
# app.router.add_routes( _create_routes("node", specs, nodes_handlers) )
# json-schemas for projects datasets
# FIXME: schemas are hard-coded to api/V0!!!
with resources.stream("api/v0/schemas/project-v0.0.1.json") as fh:
project_schema = json.load(fh)
if APP_JSONSCHEMA_SPECS_KEY in app:
app[APP_JSONSCHEMA_SPECS_KEY][CONFIG_SECTION_NAME] = project_schema
else:
app[APP_JSONSCHEMA_SPECS_KEY] = {CONFIG_SECTION_NAME: project_schema}
return True
| 31.586538 | 135 | 0.698021 |
d37a5637e29053c8550ae6915808c8dc7e9908b8 | 2,124 | py | Python | keystone/tests/unit/test_shadow_users.py | chetanzope/keystone | 13007a80d20521e75ad8803d6f1d11b2c50a1f69 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/test_shadow_users.py | chetanzope/keystone | 13007a80d20521e75ad8803d6f1d11b2c50a1f69 | [
"Apache-2.0"
] | null | null | null | keystone/tests/unit/test_shadow_users.py | chetanzope/keystone | 13007a80d20521e75ad8803d6f1d11b2c50a1f69 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
from keystone.common import provider_api
from keystone.tests import unit
from keystone.tests.unit.identity.shadow_users import test_backend
from keystone.tests.unit.identity.shadow_users import test_core
from keystone.tests.unit.ksfixtures import database
PROVIDERS = provider_api.ProviderAPIs
class ShadowUsersTests(unit.TestCase,
test_backend.ShadowUsersBackendTests,
test_core.ShadowUsersCoreTests):
def setUp(self):
super(ShadowUsersTests, self).setUp()
self.useFixture(database.Database())
self.load_backends()
self.idp = {
'id': uuid.uuid4().hex,
'enabled': True,
'description': uuid.uuid4().hex
}
self.mapping = {
'id': uuid.uuid4().hex,
}
self.protocol = {
'id': uuid.uuid4().hex,
'idp_id': self.idp['id'],
'mapping_id': self.mapping['id']
}
self.federated_user = {
'idp_id': self.idp['id'],
'protocol_id': self.protocol['id'],
'unique_id': uuid.uuid4().hex,
'display_name': uuid.uuid4().hex
}
PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp)
PROVIDERS.federation_api.create_mapping(
self.mapping['id'], self.mapping
)
PROVIDERS.federation_api.create_protocol(
self.idp['id'], self.protocol['id'], self.protocol)
self.domain_id = (
PROVIDERS.federation_api.get_idp(self.idp['id'])['domain_id'])
| 36.62069 | 75 | 0.639831 |
12c8cc2da24058e7efcfc0043a1a6554162d7b54 | 859 | py | Python | contacts/urls.py | mkimartinez/Lyfey | 8a27ef7fea92146f043c68c7ef5ee3672014c3fc | [
"MIT"
] | null | null | null | contacts/urls.py | mkimartinez/Lyfey | 8a27ef7fea92146f043c68c7ef5ee3672014c3fc | [
"MIT"
] | null | null | null | contacts/urls.py | mkimartinez/Lyfey | 8a27ef7fea92146f043c68c7ef5ee3672014c3fc | [
"MIT"
] | null | null | null | from django.conf.urls import url
from django.contrib.staticfiles import views as static_views
from django.conf.urls.static import static
from django.conf import settings
import contacts.views as views
from django.views.generic import ListView,DetailView
from contacts.models import ContactForm
# urlpatterns = [
# url(r'^$', views.indexJobs, name='indexJobs'),
app_name ='contacts'
# ]
urlpatterns = [
url(r'^$', views.sendMessage, name='sendMessage'),
# url(r'^create/$',views.create_job,name='jobsCreate'),
# # url(r'^$', ListView.as_view(queryset= Job.objects.all().order_by("-date_posted")[:25],
# # template_name = "jobs/jobsIndex.html")),
# url(r'^(?P<pk>\d+)$',DetailView.as_view(model=Job,template_name="jobs/job_detail.html"))
] | 45.210526 | 108 | 0.639115 |
dcc80f970f2adeaad9b1c67a7ad2bf849f1da64d | 3,811 | py | Python | benchmark/titanic/TT4.py | sumonbis/FairPreprocessing | c644dd38615f34dba39320397fb00d5509602864 | [
"MIT"
] | 3 | 2021-09-01T10:42:46.000Z | 2022-01-24T06:44:36.000Z | benchmark/titanic/TT4.py | sumonbis/FairPreprocessing | c644dd38615f34dba39320397fb00d5509602864 | [
"MIT"
] | null | null | null | benchmark/titanic/TT4.py | sumonbis/FairPreprocessing | c644dd38615f34dba39320397fb00d5509602864 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import sys
sys.path.append('../../')
from utils.packages import *
from utils.ml_fairness import *
from utils.standard_data import *
# In[2]:
# a map of more aggregated titles
Title_Dictionary = {
"Capt": "Officer",
"Col": "Officer",
"Major": "Officer",
"Jonkheer": "Royalty",
"Don": "Royalty",
"Sir" : "Royalty",
"Dr": "Officer",
"Rev": "Officer",
"the Countess":"Royalty",
"Dona": "Royalty",
"Mme": "Mrs",
"Mlle": "Miss",
"Ms": "Mrs",
"Mr" : "Mr",
"Mrs" : "Mrs",
"Miss" : "Miss",
"Master" : "Master",
"Lady" : "Royalty"
}
def cleanTicket( ticket ):
ticket = ticket.replace( '.' , '' )
ticket = ticket.replace( '/' , '' )
ticket = ticket.split()
ticket = map( lambda t : t.strip() , ticket )
ticket = list(filter( lambda t : not t.isdigit() , ticket ))
if len( ticket ) > 0:
return ticket[0]
else:
return 'XXX'
# In[3]:
# Load data
train = pd.read_csv('../../data/titanic/train.csv')
test = pd.read_csv('../../data/titanic/test.csv')
df = train
# In[4]:
## BASIC PREP
df['Sex'] = df['Sex'].replace({'female': 0.0, 'male': 1.0})
## Imputation
df[ 'Age' ] = df.Age.fillna( df.Age.mean() )
df[ 'Fare' ] = df.Fare.fillna( df.Fare.mean() )
## filna(-1)
## Custom(feature)
title = pd.DataFrame()
title[ 'Title' ] = df[ 'Name' ].map( lambda name: name.split( ',' )[1].split( '.' )[0].strip() )
title[ 'Title' ] = title.Title.map( Title_Dictionary )
df[ 'Title' ] = title[ 'Title' ]
df[ 'Ticket' ] = df[ 'Ticket' ].map( cleanTicket )
df[ 'Cabin' ] = df.Cabin.fillna( 'U' )
df[ 'FamilySize' ] = df[ 'Parch' ] + df[ 'SibSp' ] + 1
df[ 'Family_Single' ] = df[ 'FamilySize' ].map( lambda s : 1 if s == 1 else 0 )
df[ 'Family_Small' ] = df[ 'FamilySize' ].map( lambda s : 1 if 2 <= s <= 4 else 0 )
df[ 'Family_Large' ] = df[ 'FamilySize' ].map( lambda s : 1 if 5 <= s else 0 )
# Basic
# One-hot encoder
cat_feat = ['Title', 'Ticket', 'Cabin'] # 'Ticket', 'Embarked'
df = pd.get_dummies(df, columns=cat_feat, prefix_sep='=')
drop_column = ['Embarked', 'PassengerId', 'Name']
df.drop(drop_column, axis=1, inplace = True)
# In[5]:
seed = randrange(100)
y1_train, y1_test = train_test_split(df, test_size = 0.3, random_state = seed) #
pro_att_name = ['Sex']
priv_class = [1]
reamining_cat_feat = []
y1_data_orig_train, y1_X_train, y1_y_train = load_titanic_data(y1_train, pro_att_name, priv_class, reamining_cat_feat)
y1_data_orig_test, y1_X_test, y1_y_test = load_titanic_data(y1_test, pro_att_name, priv_class, reamining_cat_feat)
## FeatureSelection
from sklearn.feature_selection import RFECV
model = LogisticRegression()
rfecv = RFECV( estimator = model , step = 1 , cv = 2 , scoring = 'accuracy' )
trained_rfecv = rfecv.fit( y1_X_train , y1_y_train )
y1_X_train = trained_rfecv.transform(y1_X_train)
y1_X_test = trained_rfecv.transform(y1_X_test)
y1_data_orig_train.features = y1_X_train
y1_data_orig_test.features = y1_X_test
y1_model = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,
intercept_scaling=1, max_iter=100, multi_class='ovr', n_jobs=1,
penalty='l2', random_state=None, solver='liblinear', tol=0.0001,
verbose=0, warm_start=False)
y1_mdl = y1_model.fit(y1_X_train, y1_y_train)
plot_model_performance(y1_mdl, y1_X_test, y1_y_test)
# In[ ]:
| 29.315385 | 118 | 0.567568 |
ecac6bf7d0a03e31a0a4aeef55402ff229bb9a2b | 969 | py | Python | mysite/medicare/urls.py | marufahmedtushar/Medi_Care | bc3dc9449924cfaf6fa1e0e8795af84169e45f69 | [
"Apache-2.0"
] | null | null | null | mysite/medicare/urls.py | marufahmedtushar/Medi_Care | bc3dc9449924cfaf6fa1e0e8795af84169e45f69 | [
"Apache-2.0"
] | null | null | null | mysite/medicare/urls.py | marufahmedtushar/Medi_Care | bc3dc9449924cfaf6fa1e0e8795af84169e45f69 | [
"Apache-2.0"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
path("",views.index, name="index"),
path('about/', views.about, name='about'),
path('contact/', views.contact, name='contact'),
path('error/', views.doctorsearch, name='error'),
path('doctor/', views.doctor, name='doctor'),
path('medicine/', views.medicine, name='medicine'),
path('delete/<str:pk>/', views.delete_pres, name='delete_pres'),
path('deletedonner/<str:pk>/', views.delete_donner, name='delete_donner'),
path('pres_show/', views.pres_show, name='pres_show'),
path('p/', views.p, name='p'),
path('loginuser/', views.loginuser, name='loginuser'),
path('register/', views.register, name='register'),
path('logoutuser/', views.logoutuser, name='logoutuser'),
path('doctorsearch/', views.doctorsearch, name='doctorsearch'),
path('donner/', views.donner, name='donner'),
path('donnershow/', views.donnershow, name='donnershow'),
] | 40.375 | 78 | 0.659443 |
34930dff61ffaa800adf6910a2d992e13a38fdf7 | 2,818 | py | Python | e2e/e2e/cmd.py | zramsay/ibc-rs | b1b9dac6132a2fd2a86fa1c6f0179f47db3e3454 | [
"Apache-2.0"
] | null | null | null | e2e/e2e/cmd.py | zramsay/ibc-rs | b1b9dac6132a2fd2a86fa1c6f0179f47db3e3454 | [
"Apache-2.0"
] | null | null | null | e2e/e2e/cmd.py | zramsay/ibc-rs | b1b9dac6132a2fd2a86fa1c6f0179f47db3e3454 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import json
import logging as l
import subprocess
from dataclasses import dataclass, fields as datafields, is_dataclass
from pathlib import Path
from typing import Any, List, TypeVar, Generic, Type, Callable
@dataclass
class Config:
config_file: Path
relayer_cmd: str
log_level: str
max_retries: int = 5
T = TypeVar('T')
@dataclass
class CmdResult(Generic[T]):
cmd: 'Cmd'
config: Config
result: Any
retries: int = 0
def success(self) -> T:
status = self.result.get('status') or 'unknown'
result = self.result.get('result') or {}
if status == "success":
data = self.cmd.process(result)
l.debug(str(data))
return data
elif self.retries < self.config.max_retries:
left = self.config.max_retries - self.retries
l.warn(f'Command failed: retrying (retries left: {left})')
return self.cmd.retry(self.config, self.retries).success()
else:
raise ExpectedSuccess(self.cmd, status, result)
class Cmd(Generic[T]):
name: str
def process(self, result: Any) -> Any:
raise NotImplementedError("Cmd::process")
def args(self) -> List[str]:
raise NotImplementedError("Cmd::args")
def to_cmd(self) -> str:
return f"{self.name} {' '.join(self.args())}"
def run(self, config: Config, retries: int = 0) -> CmdResult[T]:
full_cmd = f'{config.relayer_cmd} -c {config.config_file}'.split(' ')
full_cmd.extend(self.name.split(' '))
full_cmd.extend(self.args())
l.debug(' '.join(full_cmd))
res = subprocess.run(full_cmd, capture_output=True, text=True)
lines = res.stdout.splitlines()
last_line = ''.join(lines[-1:])
l.debug(last_line)
return CmdResult(cmd=self, config=config, retries=retries, result=json.loads(last_line))
def retry(self, config: Config, retries: int) -> CmdResult[T]:
return self.run(config, retries + 1)
C = TypeVar('C', bound=Cmd)
def cmd(name: str) -> Callable[[Type[C]], Type[C]]:
def decorator(klass: Type[C]) -> Type[C]:
klass.name = name
return klass
return decorator
def from_dict(klass, dikt) -> Any:
if is_dataclass(klass):
fields = datafields(klass)
args = {f.name: from_dict(f.type, dikt[f.name]) for f in fields}
return klass(**args)
else:
return dikt
class ExpectedSuccess(Exception):
cmd: Any
status: str
result: Any
def __init__(self, cmd: Any, status: str, result: Any) -> None:
self.cmd = cmd
self.status = status
self.result = result
super().__init__(
f"Command '{cmd}' failed. Expected 'success', got '{status}'. Message: {result}"
)
| 26.336449 | 96 | 0.611781 |
676ef27ca4d31b489f2b993ed2f4361227703d17 | 692 | py | Python | tests/timetest.py | Ibuprofen/zooplankton | 8c16613495c2619bbeca25fa7ce5e8673cca0fd1 | [
"MIT"
] | null | null | null | tests/timetest.py | Ibuprofen/zooplankton | 8c16613495c2619bbeca25fa7ce5e8673cca0fd1 | [
"MIT"
] | null | null | null | tests/timetest.py | Ibuprofen/zooplankton | 8c16613495c2619bbeca25fa7ce5e8673cca0fd1 | [
"MIT"
] | 1 | 2016-05-15T05:38:06.000Z | 2016-05-15T05:38:06.000Z | import time
import random
from itertools import count
fps = 24
loop_delta = 1./fps
current_time = target_time = time.time()
for i in count():
#### loop frequency evaluation
previous_time = current_time
current_time = time.time()
time_delta = current_time - previous_time
print 'loop #%d frequency: %s' % (i, 1. / time_delta)
#### processing
# processing example that sleeps a random time between 0 and loop_delta/2.
time.sleep(random.uniform(0, loop_delta / 2.))
#### sleep management
target_time += loop_delta
sleep_time = target_time - time.time()
if sleep_time > 0:
time.sleep(sleep_time)
else:
print 'took too long'
| 25.62963 | 78 | 0.669075 |
2e27baef58d4b38f795f40e311658d377e2a5add | 1,198 | py | Python | test/test3_queue.py | anandpskerala/amanobot | 86b5c70157508248b0d8100074af466fcd547f4f | [
"MIT"
] | null | null | null | test/test3_queue.py | anandpskerala/amanobot | 86b5c70157508248b0d8100074af466fcd547f4f | [
"MIT"
] | null | null | null | test/test3_queue.py | anandpskerala/amanobot | 86b5c70157508248b0d8100074af466fcd547f4f | [
"MIT"
] | null | null | null | import time
import queue
import amanobot
from amanobot.loop import OrderedWebhook
def u(update_id):
return { 'update_id': update_id, 'message': update_id }
sequence = [
u(1), # initialize
u(2), # no buffering
u(4), # 1-gap
u(3), # clear 2
u(7), # 2-gap
u(5), # return, leave 1-gap
u(6), # clear 2
u(10), # 2-gap
u(9), # 1-gap
u(8), # clear 3
u(15),
u(12),
u(13),
u(11),
u(14),
u(17),
u(18),
u(21),
u(20),
u(19),
u(16),
u(22), # no buffering
u(24),
9, # skip id=23
u(23), # discard
u(26),
u(27),
9, # skip id=25
u(25), # discard
u(30),
u(29),
5,
u(32),
u(33),
2, # clear 29,30, skip 28
u(31), # clear 31,32,33
u(39),
u(36),
2,
u(37),
7, # clear 36,37,39
u(28), # discard
u(38), # discard
u(40), # return
]
def handle(msg):
print(msg)
bot = amanobot.Bot('abc')
webhook = OrderedWebhook(bot, handle)
webhook.run_as_thread(maxhold=8)
for update in sequence:
if type(update) is dict:
webhook.feed(update)
time.sleep(1)
else:
time.sleep(update)
| 14.261905 | 59 | 0.491653 |
dd90ced3f89194af39f3401f6b8bbce3d42588cc | 651 | py | Python | src/main.py | Krzem5/Python-Automatic_Background_Remover | 9a8f900ae496e92e000fbddbda70c64648f39d29 | [
"BSD-3-Clause"
] | null | null | null | src/main.py | Krzem5/Python-Automatic_Background_Remover | 9a8f900ae496e92e000fbddbda70c64648f39d29 | [
"BSD-3-Clause"
] | null | null | null | src/main.py | Krzem5/Python-Automatic_Background_Remover | 9a8f900ae496e92e000fbddbda70c64648f39d29 | [
"BSD-3-Clause"
] | null | null | null | import cv2
def process(frame,bg):
r,g,b=frame[:,:,0],frame[:,:,1],frame[:,:,2]
br,bg,bb=bg[:,:,0],bg[:,:,1],bg[:,:,2]
r=r.astype("uint8")
g=g.astype("uint8")
b=b.astype("uint8")
br=br.astype("uint8")
bg=bg.astype("uint8")
bb=bb.astype("uint8")
mask=(abs((r*0.241+g*0.691+b*0.068)-(br*0.241+bg*0.691+bb*0.068))>80).astype("uint8").reshape(frame.shape[:2]+(1,))
f=frame*mask
f=cv2.filter2D(f,-1,np.ones((3,3),np.float32)/9)
return f
cap=cv2.VideoCapture(0)
bg=cap.read()[1]
while True:
_,frame=cap.read()
cv2.imshow("Cap",frame)
p=process(frame,bg)
cv2.imshow("Person",p)
if (cv2.waitKey(1)&0xff==27):
break
cap.release()
| 17.594595 | 116 | 0.614439 |
29dc5d852d2cf88b17ec8925e1a6e845717cb9d2 | 399 | py | Python | oleloweb_run.py | smehan/olelo | 993ecb4288ffe8c78fee4474333b31387ea3d403 | [
"BSD-3-Clause"
] | null | null | null | oleloweb_run.py | smehan/olelo | 993ecb4288ffe8c78fee4474333b31387ea3d403 | [
"BSD-3-Clause"
] | null | null | null | oleloweb_run.py | smehan/olelo | 993ecb4288ffe8c78fee4474333b31387ea3d403 | [
"BSD-3-Clause"
] | null | null | null | ###########################################################
# Copyright (C) 2018 Shawn Mehan <shawn dot mehan at shawnmehan dot com>
# Initialization module for olelo flask web app
###########################################################
#
# -*- coding: utf-8 -*-
# standard libs
# 3rd-party libs
# application lib
from oleloweb import app
if __name__ == '__main__':
app.run(debug=True)
| 23.470588 | 72 | 0.491228 |
7ed73127e4a6cced5bc66ae0179220b2f862ff1f | 4,005 | py | Python | apps/reader/tests.py | ghuntley/NewsBlur | ce52a329c2624a5b596a25416606356cfbfe4d9c | [
"MIT"
] | null | null | null | apps/reader/tests.py | ghuntley/NewsBlur | ce52a329c2624a5b596a25416606356cfbfe4d9c | [
"MIT"
] | null | null | null | apps/reader/tests.py | ghuntley/NewsBlur | ce52a329c2624a5b596a25416606356cfbfe4d9c | [
"MIT"
] | null | null | null | from utils import json_functions as json
from django.test.client import Client
from django.test import TestCase
from django.core.urlresolvers import reverse
class ReaderTest(TestCase):
fixtures = ['subscriptions.json', 'stories.json', '../../rss_feeds/fixtures/gawker1.json']
def setUp(self):
self.client = Client()
def test_api_feeds(self):
self.client.login(username='conesus', password='test')
response = self.client.get(reverse('load-feeds'))
content = json.decode(response.content)
self.assertEquals(len(content['feeds']), 1)
self.assertEquals(content['feeds']['1']['feed_title'], 'Gawker')
self.assertEquals(content['folders'], [1, {'Tech': [4, 5, {'Deep Tech': [6, 7]}]}, 2, 3, 8, 9, {'Blogs': [8, 9]}])
def test_delete_feed(self):
self.client.login(username='conesus', password='test')
response = self.client.get(reverse('load-feeds'))
feeds = json.decode(response.content)
self.assertEquals(feeds['folders'], [1, {'Tech': [4, 5, {'Deep Tech': [6, 7]}]}, 2, 3, 8, 9, {'Blogs': [8, 9]}])
# Delete feed
response = self.client.post(reverse('delete-feed'), {'feed_id': 1, 'in_folder': ''})
response = json.decode(response.content)
self.assertEquals(response['code'], 1)
response = self.client.get(reverse('load-feeds'))
feeds = json.decode(response.content)
self.assertEquals(feeds['folders'], [{'Tech': [4, 5, {'Deep Tech': [6, 7]}]}, 2, 3, 8, 9, {'Blogs': [8, 9]}])
# Delete feed
response = self.client.post(reverse('delete-feed'), {'feed_id': 9, 'in_folder': 'Blogs'})
response = json.decode(response.content)
self.assertEquals(response['code'], 1)
response = self.client.get(reverse('load-feeds'))
feeds = json.decode(response.content)
self.assertEquals(feeds['folders'], [{'Tech': [4, 5, {'Deep Tech': [6, 7]}]}, 2, 3, 8, 9, {'Blogs': [8]}])
# Delete feed
response = self.client.post(reverse('delete-feed'), {'feed_id': 5, 'in_folder': 'Tech'})
response = json.decode(response.content)
self.assertEquals(response['code'], 1)
response = self.client.get(reverse('load-feeds'))
feeds = json.decode(response.content)
self.assertEquals(feeds['folders'], [{'Tech': [4, {'Deep Tech': [6, 7]}]}, 2, 3, 8, 9, {'Blogs': [8]}])
# Delete feed
response = self.client.post(reverse('delete-feed'), {'feed_id': 4, 'in_folder': 'Tech'})
response = json.decode(response.content)
self.assertEquals(response['code'], 1)
response = self.client.get(reverse('load-feeds'))
feeds = json.decode(response.content)
self.assertEquals(feeds['folders'], [{'Tech': [{'Deep Tech': [6, 7]}]}, 2, 3, 8, 9, {'Blogs': [8]}])
# Delete feed
response = self.client.post(reverse('delete-feed'), {'feed_id': 8, 'in_folder': ''})
response = json.decode(response.content)
self.assertEquals(response['code'], 1)
response = self.client.get(reverse('load-feeds'))
feeds = json.decode(response.content)
self.assertEquals(feeds['folders'], [{'Tech': [{'Deep Tech': [6, 7]}]}, 2, 3, 9, {'Blogs': [8]}])
def test_load_single_feed(self):
# from django.conf import settings
# from django.db import connection
# settings.DEBUG = True
# connection.queries = []
self.client.login(username='conesus', password='test')
url = reverse('load-single-feed', kwargs=dict(feed_id=1))
response = self.client.get(url)
feed = json.decode(response.content)
self.assertEquals(len(feed['feed_tags']), 0)
self.assertEquals(len(feed['classifiers']['tags']), 0)
# self.assert_(connection.queries)
# settings.DEBUG = False | 45.511364 | 122 | 0.581773 |
68cf069f1ef6e6af42c5ef416280f43d1f6ec35c | 3,058 | py | Python | code/test/test_layer_replacer/test_layer_replacer_sparse_facto_activation.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 1 | 2021-07-15T07:05:18.000Z | 2021-07-15T07:05:18.000Z | code/test/test_layer_replacer/test_layer_replacer_sparse_facto_activation.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 2 | 2021-07-15T06:12:47.000Z | 2021-07-16T10:05:36.000Z | code/test/test_layer_replacer/test_layer_replacer_sparse_facto_activation.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | null | null | null | import logging
from copy import deepcopy
import tempfile
import pathlib
import unittest
from keras.layers import Dense, Conv2D
from keras.models import Sequential
from keras.optimizers import Adam
from palmnet.core.activation_palminizer import ActivationPalminizer
from palmnet.core.faustizer import Faustizer
from palmnet.core.layer_replacer_sparse_facto_activations import LayerReplacerSparseFactoActivations
from palmnet.core.layer_replacer_sparse_facto_tucker import LayerReplacerSparseFactoTucker
from palmnet.core.layer_replacer_sparse_facto_tucker_faust import LayerReplacerSparseFactoTuckerFaust
from palmnet.data import Cifar100, Mnist
class TestLayerReplacerSparseFactoActivation(unittest.TestCase):
def setUp(self) -> None:
# self.base_model = Cifar100.load_model("cifar100_vgg19_2048x2048")
# (self.X_train, self.y_train), (self.X_test, self.y_test) = Cifar100.load_data()
self.base_model = Cifar100.load_model("mnist_lenet")
(self.X_train, self.y_train), (self.X_test, self.y_test) = Mnist.load_data()
def test_simple(self):
act_palminizer = ActivationPalminizer(
sparsity_fac=2,
nb_factor=2,
nb_iter=100,
batch_size=100,
seed=1,
nb_epochs=2,
delta_threshold_palm=1e-6,
hierarchical=False,
queue_maxisize=2,
train_data=self.X_train[:200],
val_data=self.X_test[:1000]
)
# act_palminizer = ActivationPalminizer(sparsity_fac=2,
# nb_factor=None,
# nb_iter=2,
# delta_threshold_palm=1e-6,
# hierarchical=True,
# data=self.X_train[:100])
with tempfile.TemporaryDirectory() as tmpdirname:
path_to_checkpoint = pathlib.Path(tmpdirname) / "checkpoint"
# path_to_checkpoint.mkdir(parents=True)
model_transformer = LayerReplacerSparseFactoActivations(only_mask=False,
sparse_factorizer=act_palminizer,
path_checkpoint_file=path_to_checkpoint)
pyqalm_logger = logging.getLogger("pyqalm")
pyqalm_logger.setLevel(logging.WARNING)
model_transformer.fit_transform(deepcopy(self.base_model))
# del model_transformer
#
# model_transformer_bis = LayerReplacerSparseFactoTuckerFaust(sparse_factorizer=faustizer,
# path_checkpoint_file=path_to_checkpoint)
# model_transformer_bis.load_dct_name_compression()
# new_model = model_transformer_bis.transform(deepcopy(self.base_model))
# print(new_model)
# new_model.compile(Adam(), loss="mse")
# result = new_model.predict(self.X_train[:10])
if __name__ == '__main__':
unittest.main()
| 40.236842 | 105 | 0.635056 |
0781bfd325326a0b6142cfebd8e40ee8ded11063 | 3,708 | py | Python | pymatgen/phonon/tests/test_plotter.py | frssp/pymatgen | bdd977f065b66191557c7398b31a1571bc541fdb | [
"MIT"
] | 1 | 2018-10-10T02:37:10.000Z | 2018-10-10T02:37:10.000Z | pymatgen/phonon/tests/test_plotter.py | frssp/pymatgen | bdd977f065b66191557c7398b31a1571bc541fdb | [
"MIT"
] | null | null | null | pymatgen/phonon/tests/test_plotter.py | frssp/pymatgen | bdd977f065b66191557c7398b31a1571bc541fdb | [
"MIT"
] | 2 | 2020-04-30T14:19:12.000Z | 2021-07-30T08:24:48.000Z | from __future__ import division, unicode_literals
import unittest
import os
import json
import scipy
from io import open
from pymatgen.phonon.dos import CompletePhononDos
from pymatgen.phonon.plotter import PhononDosPlotter, PhononBSPlotter, ThermoPlotter
from pymatgen.phonon.bandstructure import PhononBandStructureSymmLine
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class PhononDosPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = PhononDosPlotter(sigma=0.2, stack=True)
self.plotter_nostack = PhononDosPlotter(sigma=0.2, stack=False)
def test_add_dos_dict(self):
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 0)
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
self.assertEqual(len(d), 2)
def test_get_dos_dict(self):
self.plotter.add_dos_dict(self.dos.get_element_dos(),
key_sort_func=lambda x: x.X)
d = self.plotter.get_dos_dict()
for el in ["Na", "Cl"]:
self.assertIn(el, d)
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.add_dos("Total", self.dos)
self.plotter.get_plot(units="mev")
self.plotter_nostack.add_dos("Total", self.dos)
self.plotter_nostack.get_plot(units="mev")
class PhononBSPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_phonon_bandstructure.json"), "r") as f:
d = json.loads(f.read())
self.bs = PhononBandStructureSymmLine.from_dict(d)
self.plotter = PhononBSPlotter(self.bs)
def test_bs_plot_data(self):
self.assertEqual(len(self.plotter.bs_plot_data()['distances'][0]), 51,
"wrong number of distances in the first branch")
self.assertEqual(len(self.plotter.bs_plot_data()['distances']), 4,
"wrong number of branches")
self.assertEqual(
sum([len(e) for e in self.plotter.bs_plot_data()['distances']]),
204, "wrong number of distances")
self.assertEqual(self.plotter.bs_plot_data()['ticks']['label'][4], "Y",
"wrong tick label")
self.assertEqual(len(self.plotter.bs_plot_data()['ticks']['label']),
8, "wrong number of tick labels")
def test_plot(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.get_plot(units="mev")
class ThermoPlotterTest(unittest.TestCase):
def setUp(self):
with open(os.path.join(test_dir, "NaCl_complete_ph_dos.json"), "r") as f:
self.dos = CompletePhononDos.from_dict(json.load(f))
self.plotter = ThermoPlotter(self.dos, self.dos.structure)
def test_plot_functions(self):
# Disabling latex for testing.
from matplotlib import rc
rc('text', usetex=False)
self.plotter.plot_cv(5, 100, 5, show=False)
self.plotter.plot_entropy(5, 100, 5, show=False)
self.plotter.plot_internal_energy(5, 100, 5, show=False)
self.plotter.plot_helmholtz_free_energy(5, 100, 5, show=False)
self.plotter.plot_thermodynamic_properties(5, 100, 5, show=False)
if __name__ == "__main__":
unittest.main()
| 37.454545 | 86 | 0.635653 |
809e7001dd9ab9c76baec241a692c77e1374fccb | 2,981 | py | Python | ulfs/graphing_common.py | asappresearch/texrel | dff447a99d56f2f92284df866fa01e7762dc6eac | [
"MIT"
] | 2 | 2021-07-09T16:32:00.000Z | 2022-03-21T17:32:39.000Z | ulfs/graphing_common.py | asappresearch/texrel | dff447a99d56f2f92284df866fa01e7762dc6eac | [
"MIT"
] | null | null | null | ulfs/graphing_common.py | asappresearch/texrel | dff447a99d56f2f92284df866fa01e7762dc6eac | [
"MIT"
] | 1 | 2021-07-09T16:32:02.000Z | 2021-07-09T16:32:02.000Z | import subprocess
import json
from typing import Iterable, Tuple, List, Optional
def run(cmd_list, tail_lines=0):
return '\n'.join(subprocess.check_output(cmd_list).decode('utf-8').split('\n')[- tail_lines:]).strip()
def get_recent_logfiles(path, age_minutes):
files = subprocess.check_output(['find', path, '-cmin', '-%s' % age_minutes]).decode('utf-8').split('\n')
files = [f for f in files if f != '' and not f.endswith('logs')]
return files
def get_logfiles_by_pattern(path, pattern):
cmd_list = ['ls', path]
print(cmd_list)
files = subprocess.check_output(cmd_list).decode('utf-8').split('\n')
files = [f for f in files if f != '' and not f.endswith('logs') and pattern in f]
return files
def read_meta(filepath):
import json
head_line = head_line = head(filepath, 1)
meta = json.loads(head_line)
return meta
def head(file, lines):
return subprocess.check_output(['head', '-n', str(lines), file]).decode('utf-8')
def tail(file, lines):
return subprocess.check_output(['tail', '-n', str(lines), file]).decode('utf-8')
def get_filepath_of_longest_file(filepaths: Iterable[str]) -> Tuple[str, int]:
longest_filepath = ''
max_lines = -1
for filepath in filepaths:
print(filepath)
num_lines = get_num_lines(filepath)
if num_lines > max_lines:
max_lines = num_lines
longest_filepath = filepath
filepath = longest_filepath
return filepath, max_lines
def get_ref(filepath):
try:
with open(filepath, 'r') as f:
meta_line = f.readline()
print('meta_line [' + meta_line + ']')
if meta_line == '':
return None
# print('meta_line', meta_line)
meta = json.loads(meta_line.replace('meta: ', '').strip())
# print(meta.get('params', {}).keys())
ref = meta.get('params', {}).get('ref', '')
return ref
except Exception as e:
print('graphing_commmon.get_ref exception', e, filepath)
return ''
def get_meta_keys(filepath: str, keys: List[str]) -> List[Optional[str]]:
try:
with open(filepath, 'r') as f:
meta_line = f.readline()
if meta_line == '':
return [None] * len(keys)
meta = json.loads(meta_line.replace('meta: ', '').strip())
# params = meta.get('params', {})
values = []
for key in keys:
d = meta
key_parts = key.split('.')
for k in key_parts[:-1]:
# print('d.keys()', d.keys(), 'k', k)
d = d.get(k, {})
v = d.get(key_parts[-1], '')
values.append(v)
return values
except Exception as e:
print('graphing_common.get_meta_keys exception', e, filepath)
return [None] * len(keys)
def get_num_lines(filepath):
num_lines = int(subprocess.check_output(['wc', '-l', filepath]).decode('utf-8').split(' ')[0])
return num_lines
| 31.378947 | 109 | 0.591748 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.