hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e267bc4acfd6d4776608d17cfb3aaba69c932c1e | 660 | py | Python | backend/src/utils.py | jonas-scholz123/boomer-humour-exhumer | ca7110eed220b3805f29a1cef45c3ce04bfdbceb | [
"MIT"
] | null | null | null | backend/src/utils.py | jonas-scholz123/boomer-humour-exhumer | ca7110eed220b3805f29a1cef45c3ce04bfdbceb | [
"MIT"
] | null | null | null | backend/src/utils.py | jonas-scholz123/boomer-humour-exhumer | ca7110eed220b3805f29a1cef45c3ce04bfdbceb | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import config
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
class ConceptNetDict:
def __init__(self):
path = config.paths["embeddings"] + "en_mini_conceptnet.h5"
self.df = pd.read_hdf(path, "data")
def __getitem__(self, idx):
return self.df.loc[idx].values
def __contains__(self, idx):
return self.get(idx) is not None
def get(self, key):
try:
return self.__getitem__(key)
except KeyError:
return | 23.571429 | 67 | 0.613636 |
89ea3f60af5dbb14dba2b4c971f2474c30d72f44 | 14,363 | py | Python | michi/geocoder/street_stretch.py | fractalphile/michi | 449c784929e84b9d47728b8af4db8db2e292fb67 | [
"MIT"
] | null | null | null | michi/geocoder/street_stretch.py | fractalphile/michi | 449c784929e84b9d47728b8af4db8db2e292fb67 | [
"MIT"
] | 1 | 2019-07-12T11:58:59.000Z | 2019-07-12T11:58:59.000Z | michi/geocoder/street_stretch.py | fractalphile/michi | 449c784929e84b9d47728b8af4db8db2e292fb67 | [
"MIT"
] | 1 | 2019-07-10T20:58:31.000Z | 2019-07-10T20:58:31.000Z | from collections import OrderedDict
import itertools
import pandas as pd
from shapely.geometry import LineString
from shapely.ops import linemerge
from ..utils.utils import drop_consecutive_duplicates
class StreetStretch:
"""
An object that represents a "stretch" - usually a list of segments along
a single street.
It is created from a list of segments and provides functions to get
the on/from/to streets, geometry and length of the stretch.
"""
def __init__(self, geocoder, segments, side=None):
"""
Parameters
----------
geocoder : Geocoder
A reference to a Geocoder, usally the one that created this stretch
through get_street_stretch_by_geometry or get_street_stretch_by_code.
But a StreetStretch can be created manually from a list of segments.
The given segments must exist in the given Geocoder.
segments : list of str
A list of segments in the normalized format (physical_id:<id>)
side : str, optional
A side of street to drive on - either 'L' or 'R' (Default None)
"""
# Reference to the geocoder that created this stretch
self.geocoder = geocoder
self._segments = segments
self.side = side
self._on_from_to = None
def get_segments(self, include_side_of_street=True):
"""
Return the list of segment IDs.
Parameters
----------
include_side_of_street : bool, optional
Whether to include the side of street character (L or R) with the
segment IDs. (Default True)
Returns
-------
list of str
"""
if include_side_of_street:
return [s.split(':')[1] for s in self._segments]
else:
return [s.split(':')[1][:-1] for s in self._segments]
@property
def segments(self):
return self.get_segments(False)
def __len__(self):
"""
Returns
-------
int
The number of segments on the stretch.
"""
return len(self._segments)
@property
def length(self):
"""
Returns
-------
The length of the stretch in feet.
"""
return sum([self.geocoder.segments[i]['len'] for i in self.segments])
def _get_on_streets(self, segments):
"""
Given a list of segments, return a list of sets of street codes that the
segments are on. Sets of street codes are returned because sometimes
multiple street codes refer to the same physical street.
If a street transitions into another street, consider it the same
street. For example, Hogan Place turns into Leonard Street over three
segments. The first segment is just Hogan Place, then one segment is
both Hogan and Leonard, and the final one is just Leonard. Since the
streets overlapped, it will be considered one street.
Parameters
----------
segments : list of str
Returns
-------
list of sets of str
"""
streets = []
for segment_id in segments:
# Get the set of street codes for each segment
street_codes = self.geocoder.segments[segment_id]['street_code']
# Check if this segment's street codes overlap with any of the
# already processed segments, if so, add this segment's street
# codes to the existing set.
match = False
for i in range(len(streets)):
if streets[i].intersection(street_codes):
streets[i] = streets[i].union(street_codes)
match = True
if not match:
streets.append(street_codes)
return streets
@property
def number_of_on_streets(self):
return len(self._get_on_streets(self.segments))
@property
def start_and_end_on_same_street(self):
"""
Returns
-------
bool
Whether or not the street that the stretch starts on is the same
as the one that it ends on.
"""
# Get the on streets using _get_on_streets to handle street codes
# that change even though the street physically stays the same.
segments = self.segments
on_streets = self._get_on_streets(segments)
# Get the on street codes specifically for the endpoints.
endpoints = self._get_on_streets([segments[0], segments[-1]])
# If there is only one street code set for the endpoints, then they
# must start and end on the same street.
if len(endpoints) == 1:
return True
# Otherwise, check if each of the endpoints intersects with any of the
# streets the strtech goes on. Since `_get_on_from_to` handles
# transitioning street codes, this ensures that even if the endpoints
# themselves have different street codes, if the street codes overlap
# during the stretch, then it will be counted as starting and stopping
# on the same street.
for street in on_streets:
if endpoints[0].intersection(street):
if endpoints[1].intersection(street):
return True
return False
@property
def number_of_turns(self):
"""
Return the number of "turns" on as stretch, which is the number of times
that a segment's street codes don't match the next segment's street
codes at all.
Returns
-------
int
"""
turns = 0
previous_street = None
for segment_id in self.segments:
street = self.geocoder.segments[segment_id]['street_code']
if previous_street and not previous_street.intersection(street):
turns += 1
previous_street = street
return turns
def get_geometry(self, merge=True):
"""
Return the geometry of the stretch, either as a list of geometries for
each segment, or as one single geometry.
Parameters
----------
merge : bool, optional
Whether to merge the segment geometries into a single geometry.
(Default True)
Returns
-------
shapely.LineString or list of shapely.LineString
"""
geometries = []
for segment in self.get_segments():
segment = self.geocoder.segment_column + ':' + segment
segment_id, side_of_street = self.geocoder.parse_geometry(segment)[2:]
geometry = self.geocoder.segments[segment_id]['geometry']
traffic_direction = self.geocoder.segments[segment_id]['traffic_direction']
# Flip the geometry if direction of travel is reverse of
# the drawn direction
if (
(traffic_direction == 'A') or
((traffic_direction == 'T') and (side_of_street == 'L'))
):
# Create a new LineString from the coordinates reversed
geometries.append(LineString(geometry.coords[::-1]))
else:
geometries.append(geometry)
if merge:
# Manually Merge the geometries by getting all of the coordinates
# from each segment in order
coords = [c for g in geometries for c in g.coords]
# Drop consecutive points - necessary?
coords = drop_consecutive_duplicates(coords)
return LineString(coords)
else:
return geometries
@property
def endpoint_nodes(self):
"""
Return a tuple of node IDs with the start and end nodes of the stretch.
Returns
-------
(str, str)
A tuple (start_node, end_node)
"""
return (
# Get the node that comes before the first segment
self.geocoder.parse_geometry(
list(self.geocoder.node_network.predecessors(
self._segments[0]
))[0]
)[2],
# And the node that comes after the last
self.geocoder.parse_geometry(
list(self.geocoder.node_network[self._segments[-1]])[0]
)[2]
)
@property
def on_from_to(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._on_from_to
@property
def on_streets(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._on_streets
@property
def from_streets(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._from_streets
@property
def to_streets(self):
if not self._on_from_to:
self._on_from_to = self.get_on_from_to()
return self._to_streets
def get_on_from_to(self):
"""
Return a list of dictionaries of On/From/To street options. Each
dictionary has `on_street`, `on_street_code`, `from_street`,
`from_street_code`, `to_street` and `to_street_code`.
If the on/from/to is unambiguous, then it will return a list of length
one. When ambiguous, return more than one option, with the "most likely"
option first. Likelihood is determined by the number of times that
street appears along the stretch, how often it appears globally in NYC,
and whether it is at the start or end of the stretch.
Returns
-------
list of dict
"""
def get_streets_from_segments(segments, sort_cols, segments_dict):
"""
Return a list of (street, street_code) tuples for the given segments
sorted in "likelihood" order.
Parameters
----------
segments: list
A list of segment IDs
sort_cols: list
A subset of ['count', 'start', 'end', 'global_count'] used to
sort the streets into likelihood order. For the on street, use
all. For from/to, use count and global_count.
"""
# Iterate through all the segments` street/street_code pairs and
# Add them to the streets dictionary.
streets = {}
for i, segment in enumerate(segments):
pairs = segments_dict[segment]['street_street_code_pair']
for street, street_code in pairs:
pair = (street, street_code)
if pair not in streets:
streets[pair] = {
'street': street,
'street_code': street_code,
# Keep track of occurances of this pair.
'global_count': len(self.geocoder.streets[street_code]['df']),
'count': 0, 'start': 0, 'end': 0,
}
# If the street appears at the start or end, favor it.
if i == 0:
streets[pair]['start'] += 1
if i == (len(segments) - 1):
streets[pair]['end'] += 1
# Count the number of occurances of that pair.
streets[pair]['count'] += 1
# Return (street, street_code) tuples sorted by likelihood
return [
(street['street'], street['street_code']) for street in
sorted(streets.values(), key=lambda street: tuple(
-street[col] for col in sort_cols
))
]
# Get the unique on segment IDs and use them to get on street options.
on_segments = self.segments
on_streets = get_streets_from_segments(
on_segments, ['count', 'start', 'end', 'global_count'],
self.geocoder.segments
)
def drop_overlapping_streets(a, b):
"""
Return the streets in a that are not in b unless a and b are the
same.
"""
a_codes = [s[1] for s in a]
b_codes = [s[1] for s in b]
if set(a_codes).difference(b_codes):
return [s for s in a if s[1] not in b_codes]
return a
def get_node_streets(node):
"""A function to get street options for the nodes."""
# If the node is a dead end, just return DEAD END.
if self.geocoder.nodes[node]['dead_end']:
return [(
'DEAD END', 'dead_end'
)]
# Get the segments at the node, not inculding the on segments.
segments = set([
s for s in self.geocoder.nodes[node]['segments']
#if self.geocoder.lion_segments[s]['physical_id'] not in on_segments
])
segments2 = set([
s for s in segments if
self.geocoder.lion_segments[s]['physical_id'] not in on_segments
])
if segments2:
segments = segments2
streets = get_streets_from_segments(
segments, ['count', 'global_count'], self.geocoder.lion_segments
)
return drop_overlapping_streets(streets, on_streets)
# Get from node, to node and the respective street options
from_node, to_node = self.endpoint_nodes
from_streets = get_node_streets(from_node)
to_streets = get_node_streets(to_node)
on_streets = drop_overlapping_streets(on_streets, from_streets + to_streets)
# Cache the results on the object for future lookup
self._on_streets = on_streets
self._from_streets = from_streets
self._to_streets = to_streets
# Return a list of dictionaries of the combinations of on/from/to
return [
{
'on_street': os, 'from_street': fs, 'to_street': ts,
'on_street_code': osc, 'from_street_code': fsc,
'to_street_code': tsc
}
for (os, osc), (fs, fsc), (ts, tsc)
in itertools.product(on_streets, from_streets, to_streets)
]
| 35.640199 | 90 | 0.571399 |
d338ee4b6afb2413043beb23273fdefd42953ab6 | 2,483 | py | Python | extract_moments_in_time_frames_hdf5.py | ombretta/3D-ResNets-PyTorch | a5b0f092c36c5256257ba854fbc50718c35244fb | [
"MIT"
] | null | null | null | extract_moments_in_time_frames_hdf5.py | ombretta/3D-ResNets-PyTorch | a5b0f092c36c5256257ba854fbc50718c35244fb | [
"MIT"
] | null | null | null | extract_moments_in_time_frames_hdf5.py | ombretta/3D-ResNets-PyTorch | a5b0f092c36c5256257ba854fbc50718c35244fb | [
"MIT"
] | 1 | 2020-12-22T11:24:38.000Z | 2020-12-22T11:24:38.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 28 11:55:57 2021
@author: ombretta
"""
import cv2
import os
import h5py
import numpy as np
def extract_frames(video_path, video_dest):
with h5py.File(video_dest, "w") as h5_file:
cap = cv2.VideoCapture(video_path)
frameCount, frameWidth, frameHeight, fps = get_video_properties(cap)
print(video_path, frameCount, frameWidth, frameHeight, fps)
# Read all video frames
buf = np.empty((frameCount, frameHeight, frameWidth, 3), np.dtype('float32'))
fc = 0
ret = True
while ((fc < frameCount) and ret):
ret, frame_bgr = cap.read()
frame_rgb = frame_bgr[:, :, [2, 1, 0]] # Swap bgr to rgb
buf[fc] = frame_rgb[:,:,:]
fc += 1
cap.release()
h5_file.create_dataset('video', data=frame_rgb)
num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
num_images = h5_file['video'][...].shape[0]
print(num_frames, num_images)
return num_frames == num_images
def get_video_properties(cap):
frameCount = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = int(cap.get(cv2.CAP_PROP_FPS))
return frameCount, frameWidth, frameHeight, fps
def main():
dataset_path = "/tudelft.net/staff-bulk/ewi/insy/CV-DataSets/Moments_in_Time_Raw/"
for data_set in ["training", "validation"]:
dest = dataset_path+data_set+"_h5/"
if not os.path.exists(dest):
os.mkdir(dest)
for video_class in os.listdir(dataset_path+data_set):
if not os.path.exists(dest+video_class):
os.mkdir(dest+video_class)
for v in os.listdir(dataset_path+data_set+"/"+video_class):
video_path = dataset_path+data_set+"/"+video_class+"/"+v
video_dest = dest + video_class + "/" + v.split(".mp4")[0] + ".hdf5"
if os.path.exists(video_dest):
print(video_dest, "already exists")
else:
print("Saving", video_dest)
print(extract_frames(video_path, video_dest))
if __name__== "__main__":
main()
| 31.0375 | 86 | 0.566653 |
a13c389f636fb1edeef9104cf719ae074bd69253 | 4,495 | py | Python | src/olympia/reviewers/urls.py | hiikezoe/addons-server | 1574d92fae097796b40b54579e3c7eeb876d6e32 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/reviewers/urls.py | hiikezoe/addons-server | 1574d92fae097796b40b54579e3c7eeb876d6e32 | [
"BSD-3-Clause"
] | null | null | null | src/olympia/reviewers/urls.py | hiikezoe/addons-server | 1574d92fae097796b40b54579e3c7eeb876d6e32 | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls import url
from django.shortcuts import redirect
from olympia.addons.urls import ADDON_ID
from olympia.reviewers import views, views_themes
# All URLs under /editors/
urlpatterns = (
url(r'^$', views.dashboard, name='reviewers.dashboard'),
url(r'^dashboard$',
lambda request: redirect('reviewers.dashboard', permanent=True)),
url(r'^queue$', views.queue, name='reviewers.queue'),
url(r'^queue/new$', views.queue_nominated,
name='reviewers.queue_nominated'),
url(r'^queue/updates$', views.queue_pending,
name='reviewers.queue_pending'),
url(r'^queue/reviews$', views.queue_moderated,
name='reviewers.queue_moderated'),
url(r'^queue/application_versions\.json$', views.application_versions_json,
name='reviewers.application_versions_json'),
url(r'^queue/auto_approved', views.queue_auto_approved,
name='reviewers.queue_auto_approved'),
url(r'^queue/content_review', views.queue_content_review,
name='reviewers.queue_content_review'),
url(r'^queue/expired_info_requests', views.queue_expired_info_requests,
name='reviewers.queue_expired_info_requests'),
url(r'^unlisted_queue$', views.unlisted_queue,
name='reviewers.unlisted_queue'),
url(r'^unlisted_queue/all$', views.unlisted_list,
name='reviewers.unlisted_queue_all'),
url(r'^logs$', views.eventlog, name='reviewers.eventlog'),
url(r'^log/(\d+)$', views.eventlog_detail,
name='reviewers.eventlog.detail'),
url(r'^reviewlog$', views.reviewlog, name='reviewers.reviewlog'),
url(r'^beta_signed_log$', views.beta_signed_log,
name='reviewers.beta_signed_log'),
url(r'^queue_version_notes/%s?$' % ADDON_ID, views.queue_version_notes,
name='reviewers.queue_version_notes'),
url(r'^queue_review_text/(\d+)?$', views.queue_review_text,
name='reviewers.queue_review_text'), # (?P<addon_id>[^/<>"']+)
url(r'^queue_viewing$', views.queue_viewing,
name='reviewers.queue_viewing'),
url(r'^review_viewing$', views.review_viewing,
name='reviewers.review_viewing'),
# 'content' is not a channel, but is a special kind of review that we only
# do for listed add-ons, so we abuse the channel parameter to handle that.
url(r'^review(?:-(?P<channel>listed|unlisted|content))?/%s$' % ADDON_ID,
views.review, name='reviewers.review'),
url(r'^whiteboard/(?P<channel>listed|unlisted|content)/%s$' % ADDON_ID,
views.whiteboard, name='reviewers.whiteboard'),
url(r'^performance/(?P<user_id>\d+)?$', views.performance,
name='reviewers.performance'),
url(r'^motd$', views.motd, name='reviewers.motd'),
url(r'^motd/save$', views.save_motd, name='reviewers.save_motd'),
url(r'^abuse-reports/%s$' % ADDON_ID, views.abuse_reports,
name='reviewers.abuse_reports'),
url(r'^leaderboard/$', views.leaderboard, name='reviewers.leaderboard'),
url('^themes$',
lambda request: redirect('reviewers.dashboard', permanent=True)),
url('^themes/pending$', views_themes.themes_list,
name='reviewers.themes.list'),
url('^themes/flagged$', views_themes.themes_list,
name='reviewers.themes.list_flagged',
kwargs={'flagged': True}),
url('^themes/updates$', views_themes.themes_list,
name='reviewers.themes.list_rereview',
kwargs={'rereview': True}),
url('^themes/queue/$', views_themes.themes_queue,
name='reviewers.themes.queue_themes'),
url('^themes/queue/flagged$', views_themes.themes_queue_flagged,
name='reviewers.themes.queue_flagged'),
url('^themes/queue/updates$', views_themes.themes_queue_rereview,
name='reviewers.themes.queue_rereview'),
url('^themes/queue/commit$', views_themes.themes_commit,
name='reviewers.themes.commit'),
url('^themes/queue/single/(?P<slug>[^ /]+)$', views_themes.themes_single,
name='reviewers.themes.single'),
url('^themes/history/(?P<username>[^ /]+)?$',
views_themes.themes_history, name='reviewers.themes.history'),
url(r'^themes/logs$', views_themes.themes_logs,
name='reviewers.themes.logs'),
url('^themes/release$', views_themes.release_locks,
name='reviewers.themes.release_locks'),
url('^themes/logs/deleted/$', views_themes.deleted_themes,
name='reviewers.themes.deleted'),
url('^themes/search/$', views_themes.themes_search,
name='reviewers.themes.search'),
)
| 49.395604 | 79 | 0.685428 |
2e63d6376cb08db256c4cd1c57318e51d7e46019 | 6,249 | py | Python | Bot/src/funhouse/reddit.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 181 | 2021-05-26T17:37:40.000Z | 2022-02-26T08:36:07.000Z | Bot/src/funhouse/reddit.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 24 | 2021-05-14T19:47:34.000Z | 2021-09-06T17:16:17.000Z | Bot/src/funhouse/reddit.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 16 | 2021-07-02T09:40:56.000Z | 2022-01-21T10:07:08.000Z | # !/usr/bin/python
"""
Copyright ©️: 2020 Seniatical / _-*™#7519
License: Apache 2.0
A permissive license whose main conditions require preservation of copyright and license notices.
Contributors provide an express grant of patent rights.
Licensed works, modifications, and larger works may be distributed under different terms and without source code.
FULL LICENSE CAN BE FOUND AT:
https://www.apache.org/licenses/LICENSE-2.0.html
Any violation to the license, will result in moderate action
You are legally required to mention (original author, license, source and any changes made)
"""
import random
import datetime
import discord
from discord.ext import commands
from discord.ext.commands import BucketType, cooldown
import apraw
import asyncio
from utility import emojis as emoji
from utility import abbrev_denary as convert_size
import aiohttp
from requests.utils import requote_uri
from core._.tasks.reddit import memes
class Reddit(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.reddit = apraw.Reddit(
username=bot.env('REDDIT_USERNAME'),
password=bot.env('REDDIT_PASSWORD'),
client_id=bot.env('REDDIT_CLIENT_ID'),
client_secret=bot.env('REDDIT_CLIENT_SECRET'),
user_agent='<Mecha Karen - Discord Bot [https://github.com/Seniatical/Mecha-Karen/main/Bot/src/funhouse/reddit.py]>'
)
bot.cache.cache['memes'] = []
print('Storing memes from `r/memes` in cache container - `self.cache.cache["memes"]`')
memes.add_global(bot.cache.cache['memes'])
self.bot.loop.create_task(memes.task(bot, self.reddit))
@commands.command(name='Meme', aliases=['memes'])
@commands.cooldown(1, 5, BucketType.user)
async def memes(self, ctx):
embed = random.choice(self.bot.cache.cache['memes'])
await ctx.send(embed=embed)
@commands.command(name='BB', aliases=['BreakingBad'])
@commands.cooldown(1, 5, BucketType.user)
async def bb(self, ctx):
subreddit = await self.reddit.subreddit('okbuddychicanery')
allsubs = []
async for i in subreddit.top(limit=50):
if i.is_video:
continue
allsubs.append(i)
random_sub = random.choice(allsubs)
name = random_sub.title
url = random_sub.url
comments = random_sub.num_comments
up = random_sub.score
author = await random_sub.author()
sub = await random_sub.subreddit()
embed = discord.Embed(
title=name,
color=discord.colour.Color.from_rgb(random.randint(1, 255), random.randint(1, 255), random.randint(1, 255))
)
embed.set_author(name=f'Posted by {author} from r/{sub}')
embed.set_image(url=url)
embed.set_footer(text='{} {} | {} {}'.format(emoji.UNICODE['up'], convert_size(up), emoji.UNICODE['text'], convert_size(comments)))
await ctx.send(embed=embed)
@commands.command(name='Reddit')
@commands.cooldown(1, 30, BucketType.user)
async def reddit(self, ctx, subreddit: str):
global msg
try:
try:
subreddit = await self.reddit.subreddit(subreddit)
except Exception:
ctx.command.reset_cooldown(ctx)
return await ctx.send('Subreddit doesnt exist!')
all_subs_ = []
async for submission in subreddit.hot(limit=10):
all_subs_.append(submission)
random_sub = random.choice(all_subs_)
if random_sub.over18:
msg = await ctx.send('Over 18 content detected.')
if ctx.channel.is_nsfw():
name = random_sub.title
url = random_sub.url
comments = random_sub.num_comments
up = random_sub.score
author = await random_sub.author()
sub = await random_sub.subreddit()
embed = discord.Embed(
title=name,
color=discord.colour.Color.from_rgb(random.randint(1, 255), random.randint(1, 255), random.randint(1, 255))
)
embed.set_author(name=f'Posted by {author} from r/{sub}')
embed.set_image(url=url)
embed.set_footer(text='{} {} | {} {}'.format(emoji.UNICODE['up'], convert_size(up), emoji.UNICODE['text'], convert_size(comments)))
await msg.edit(content=None, embed=embed)
else:
embed = discord.Embed(title='Error 404!', colour=discord.Color.red(),
description="You must use this command in a channel marked as **NSFW**.",
timestamp=ctx.message.created_at
).set_footer(text='Invoked by {}'.format(ctx.author), icon_url=ctx.author.avatar)
embed.set_image(url='https://i.imgur.com/cy9t3XN.gif')
await msg.edit(content=None, embed=embed)
ctx.command.reset_cooldown(ctx)
else:
name = random_sub.title
url = random_sub.url
comments = random_sub.num_comments
up = random_sub.score
author = await random_sub.author()
sub = await random_sub.subreddit()
embed = discord.Embed(
title=name,
color=discord.colour.Color.from_rgb(random.randint(1, 255), random.randint(1, 255), random.randint(1, 255))
)
embed.set_author(name=f'Posted by {author} from r/{sub}')
embed.set_image(url=url)
embed.set_footer(text='{} {} | {} {}'.format(emoji.UNICODE['up'], convert_size(up), emoji.UNICODE['text'], convert_size(comments)))
await ctx.send(embed=embed)
except discord.errors.HTTPException:
ctx.command.reset_cooldown(ctx)
return await ctx.send('Message Content was too large!')
def setup(bot):
bot.add_cog(Reddit(bot))
| 42.80137 | 155 | 0.590174 |
de8e33f961552427074b3a95c39a853ee3a84b93 | 1,673 | py | Python | io_scene_xray/hotkeys.py | ed8rez/blender-xray | 543ae9c40dc706216552d92b7f7b7c9d8da9dbf3 | [
"BSD-2-Clause"
] | 1 | 2021-02-14T11:52:25.000Z | 2021-02-14T11:52:25.000Z | io_scene_xray/hotkeys.py | ed8rez/blender-xray | 543ae9c40dc706216552d92b7f7b7c9d8da9dbf3 | [
"BSD-2-Clause"
] | null | null | null | io_scene_xray/hotkeys.py | ed8rez/blender-xray | 543ae9c40dc706216552d92b7f7b7c9d8da9dbf3 | [
"BSD-2-Clause"
] | null | null | null | import bpy
from .obj.imp.ops import OpImportObject
from .obj.exp.ops import OpExportObjects
class KayMap():
def __init__(self):
self.key = None
self.text = None
self.operator_id = None
self.shift = False
self.ctrl = False
self.alt = False
self.key_modifier = 'NONE'
io_scene_xray_keymaps = {}
# keymaps
# import object
obj_imp_keymap = KayMap()
obj_imp_keymap.key = 'F8'
obj_imp_keymap.operator_id = OpImportObject.bl_idname
obj_imp_keymap.text = OpImportObject.bl_label
# export object
obj_exp_keymap = KayMap()
obj_exp_keymap.key = 'F8'
obj_exp_keymap.operator_id = OpExportObjects.bl_idname
obj_exp_keymap.text = OpExportObjects.bl_label
obj_exp_keymap.shift = True
keymaps_list = [
obj_imp_keymap,
obj_exp_keymap
]
def create_keymap(keymaps, keymap):
keymap_item = keymaps.keymap_items.new(
keymap.operator_id,
type=keymap.key,
value='PRESS',
shift=keymap.shift,
ctrl=keymap.ctrl,
alt=keymap.alt,
key_modifier=keymap.key_modifier
)
io_scene_xray_keymaps[keymap.operator_id] = (keymaps, keymap_item)
def register_hotkeys():
win_manager = bpy.context.window_manager
addon_keyconfigs = win_manager.keyconfigs.addon
if addon_keyconfigs:
keymaps = addon_keyconfigs.keymaps.new(
name='3D View', space_type='VIEW_3D'
)
for keymap in keymaps_list:
create_keymap(keymaps, keymap)
def unregister_hotkeys():
for operator_id, (keymaps, keymap_item) in io_scene_xray_keymaps.items():
keymaps.keymap_items.remove(keymap_item)
io_scene_xray_keymaps.clear()
| 24.246377 | 77 | 0.699342 |
fa72b0ae1a170b207348d4ae5df9e8481547e41c | 5,891 | py | Python | examples/3_Authentication.py | ktmeaton/basespace-python3-sdk | 7209517f213e626bf9b227a781de1bf8eee9f88e | [
"Apache-2.0"
] | null | null | null | examples/3_Authentication.py | ktmeaton/basespace-python3-sdk | 7209517f213e626bf9b227a781de1bf8eee9f88e | [
"Apache-2.0"
] | null | null | null | examples/3_Authentication.py | ktmeaton/basespace-python3-sdk | 7209517f213e626bf9b227a781de1bf8eee9f88e | [
"Apache-2.0"
] | null | null | null | """
Copyright 2012 Illumina
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import print_function
from BaseSpacePy.api.BaseSpaceAPI import BaseSpaceAPI
import time
import webbrowser
import os
from six.moves import cPickle as Pickle
"""
Demonstrates the basic BaseSpace authentication process The work-flow is as follows:
scope-request -> user grants access -> browsing data. The scenario is demonstrated both for device and web-based apps.
Further we demonstrate how a BaseSpaceAPI instance may be preserved across multiple http-request for the same app session using
python's pickle module.
"""
"""
NOTE: You will need to provide the credentials for your app (available in the developer portal).
You can do this with a master config file (preferred), or by filling in values below.
"""
# If you're not using a config file, fill in you app's credentials here:
clientKey = ""
clientSecret = ""
appSessionId = ""
apiServer = 'https://api.basespace.illumina.com/' # or 'https://api.cloud-hoth.illumina.com/'
apiVersion = 'v1pre3'
# First we will initialize a BaseSpace API object using our app information and the appSessionId
if clientKey:
myAPI = BaseSpaceAPI(clientKey, clientSecret, apiServer, apiVersion, appSessionId)
else:
myAPI = BaseSpaceAPI(profile='DEFAULT')
# First, get the verification code and uri for scope 'browse global'
deviceInfo = myAPI.getVerificationCode('browse global')
print("\n URL for user to visit and grant access: ")
print(deviceInfo['verification_with_code_uri'])
## PAUSE HERE
# Have the user visit the verification uri to grant us access
print("\nPlease visit the uri within 15 seconds and grant access")
print(deviceInfo['verification_with_code_uri'])
webbrowser.open_new(deviceInfo['verification_with_code_uri'])
time.sleep(15)
## PAUSE HERE
# Once the user has granted us access to objects we requested, we can
# get the basespace access token and start browsing simply by calling updatePriviliges
# on the baseSpaceApi instance.
code = deviceInfo['device_code']
myAPI.updatePrivileges(code)
# As a reference the provided access-token can be obtained from the BaseSpaceApi object
print("\nMy Access-token:")
print(myAPI.getAccessToken())
# Let's try and grab all available genomes with our new api!
allGenomes = myAPI.getAvailableGenomes()
print("\nGenomes \n" + str(allGenomes))
# If at a later stage we wish to initialize a BaseSpaceAPI object when we already have
# an access-token from a previous sessions, this may simply be done by initializing the BaseSpaceAPI
# object using the key-word AccessToken.
myToken = myAPI.getAccessToken()
myAPI.setAccessToken(myToken)
print("\nA BaseSpaceAPI instance was updated with an access-token: ")
print(myAPI)
#################### Web-based verification #################################
# The scenario where the authentication is done through a web-browser
if clientKey:
BSapiWeb = BaseSpaceAPI(clientKey, clientSecret, apiServer, apiVersion, appSessionId)
else:
BSapiWeb = BaseSpaceAPI(profile='DEFAULT')
userUrl= BSapiWeb.getWebVerificationCode('browse global','http://localhost',state='myState')
print("\nHave the user visit:")
print(userUrl)
webbrowser.open_new(userUrl)
# Once the grant has been given you will be redirected to a url looking something like this
# http://localhost/?code=<MY DEVICE CODE FROM REDICRECT>&state=myState&action=oauthv2authorization
# By getting the code parameter from the above http request we can now get our access-token
myCode = '<MY DEVICE CODE FROM REDIRECT>'
#BSapiWeb.updatePrivileges(myCode)
#################### Storing BaseSpaceApi using python's pickle module #################################
"""
It may sometimes be useful to preserve certain api objects across a series of http requests from the same user-session.
Here we demonstrate how the Python pickle module may be used to achieve this end.
The example will be for an instance of BaseSpaceAPI, but the same technique may be used for BaseSpaceAuth.
In fact, a single instance of BaseSpaceAuth would be enough for a single App and could be shared by all http-requests, as the identity of
this object is only given by the client_key and client_secret.
(There is, of course, no problem in having multiple identical BaseSpaceAuth instances).
"""
# Get current user
user= myAPI.getUserById('current')
print(user)
print(myAPI)
#### Here some work goes on
# now we wish to store the API object for the next time we get a request in this session
# make a file to store the BaseSpaceAPi instance in, for easy identification we will name this by any id that may be used for identifying
# the session again.
mySessionId = myAPI.appSessionId + '.pickle'
f = open(mySessionId,'w')
Pickle.dump(myAPI, f)
f.close()
# Imagine the current request is done, we will simulate this by deleting the api instance
myAPI = None
print("\nTry printing the removed API, we get: " + str(myAPI))
# Next request in the session with id = id123 comes in
# We'll check if if there already is a BaseSpaceAPI stored for the session
if os.path.exists(mySessionId):
f = open(mySessionId)
myAPI = Pickle.load(f)
f.close()
print()
print("We got the API back!")
print(myAPI)
else:
print("Looks like we haven't stored anything for this session yet")
# create a BaseSpaceAPI for the first time
| 37.762821 | 137 | 0.748939 |
e5f206a9d16d493aabafba459722289b6aebf5d7 | 4,020 | py | Python | cwlkernel/AutoCompleteEngine.py | fabricebrito/CWLJNIKernel | c87d9d1ae326bb198c4b8e14836ce934b9841c0d | [
"Apache-2.0"
] | 4 | 2020-02-28T16:03:26.000Z | 2021-03-28T12:58:25.000Z | cwlkernel/AutoCompleteEngine.py | fabricebrito/CWLJNIKernel | c87d9d1ae326bb198c4b8e14836ce934b9841c0d | [
"Apache-2.0"
] | 1 | 2020-12-09T11:06:42.000Z | 2020-12-09T19:08:23.000Z | cwlkernel/AutoCompleteEngine.py | fabricebrito/CWLJNIKernel | c87d9d1ae326bb198c4b8e14836ce934b9841c0d | [
"Apache-2.0"
] | 3 | 2020-04-10T15:09:11.000Z | 2020-12-09T11:26:24.000Z | import re
from typing import Dict, Iterable, Optional, Callable, Tuple, List
from pygtrie import CharTrie
class AutoCompleteEngine:
"""
AutoCompleteEngine generates suggestions given a users input.
"""
def __init__(self, magic_commands: Optional[Iterable[str]]):
self._magics_args_suggesters: Dict[str, Callable] = {}
self._commands_trie = CharTrie()
if magic_commands is not None:
for magic in magic_commands:
self.add_magic_command(magic)
def suggest(self, code: str, cursor_pos: int) -> Dict:
"""
@param code: string contains the current state of the user's input. It could be a CWL file
or magic commands.
@param cursor_pos: current position of cursor
@return: {'matches': ['MATCH1', 'MATCH1'],
'cursor_end': ,
'cursor_start': , }
"""
matches = []
cursor_end = cursor_pos
cursor_start = cursor_pos
line_classifier = re.compile(r'(?P<command>^%[ ]+[\w]*)(?P<args>( [\S]*)*)', re.MULTILINE)
for match in line_classifier.finditer(code): # type: re.Match
if match.start('command') <= cursor_pos <= match.end('command'):
new_cursor_pos = cursor_pos - match.span()[0]
code = match.group()
matches, cursor_start, cursor_end = self._suggest_magic_command(code, new_cursor_pos)
cursor_start += match.span()[0]
cursor_end += match.span()[0]
elif match.span()[0] <= cursor_pos <= match.span()[1]:
new_cursor_pos = cursor_pos - match.start('args')
code = match.group('args')
command = match.group('command')[1:].strip()
matches, cursor_start, cursor_end = self._suggest_magics_arguments(command, code, new_cursor_pos)
cursor_start += match.start('args')
cursor_end += match.start('args')
return {
'matches': matches,
'cursor_end': cursor_end,
'cursor_start': cursor_start
}
def _suggest_magic_command(self, code: str, cursor_pos: int) -> Tuple[List[str], int, int]:
cursor_end, cursor_start, token = self._parse_tokens(code, cursor_pos)
if token == '%':
token = ''
try:
matches = [m for m in set(self._commands_trie.values(prefix=token))]
matches.sort(key=len)
except KeyError:
matches = []
cursor_end = cursor_pos
cursor_start = cursor_pos
return matches, cursor_start, cursor_end
def _suggest_magics_arguments(self, command: str, code: str, cursor_pos: int) -> Tuple[List[str], int, int]:
"""Stateless command's arguments suggester"""
cursor_end, cursor_start, query_token = self._parse_tokens(code, cursor_pos)
options: List[str] = self._magics_args_suggesters[command](query_token)
return options, cursor_start, cursor_end
def add_magic_commands_suggester(self, magic_name: str, suggester: Callable) -> None:
self._magics_args_suggesters[magic_name] = suggester
@classmethod
def _parse_tokens(cls, code, cursor_pos):
code_length = len(code)
token_ends_at = code.find(" ", cursor_pos)
cursor_end = min(token_ends_at + 1, code_length - 1)
if token_ends_at == -1:
token_ends_at = code_length - 1
cursor_end = code_length
token_starts_at = code.rfind(" ", 0, cursor_pos)
cursor_start = token_starts_at + 1
if token_starts_at == -1:
token_starts_at = 0
cursor_start = cursor_pos
token = code[token_starts_at:token_ends_at + 1].strip().upper()
return cursor_end, cursor_start, token
def add_magic_command(self, magic_command_name: str):
for i in range(1, len(magic_command_name) + 1):
self._commands_trie[magic_command_name[-i:].upper()] = magic_command_name
| 43.225806 | 113 | 0.612438 |
ee9020ebeaa8c120bb6b7677807a8960a59ed303 | 21,682 | py | Python | landlab/components/space/space.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | landlab/components/space/space.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | 1 | 2016-03-16T02:34:08.000Z | 2016-04-20T19:31:30.000Z | landlab/components/space/space.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | import numpy as np
from landlab.components.erosion_deposition.generalized_erosion_deposition import (
DEFAULT_MINIMUM_TIME_STEP,
_GeneralizedErosionDeposition,
)
from landlab.utils.return_array import return_array_at_node
from .cfuncs import calculate_qs_in
ROOT2 = np.sqrt(2.0) # syntactic sugar for precalculated square root of 2
TIME_STEP_FACTOR = 0.5 # factor used in simple subdivision solver
class Space(_GeneralizedErosionDeposition):
"""Stream Power with Alluvium Conservation and Entrainment (SPACE)
See the publication:
Shobe, C. M., Tucker, G. E., and Barnhart, K. R.: The SPACE 1.0 model: a
Landlab component for 2-D calculation of sediment transport, bedrock
erosion, and landscape evolution, Geosci. Model Dev., 10, 4577-4604,
https://doi.org/10.5194/gmd-10-4577-2017, 2017.
Note: If timesteps are large enough that Es*dt (sediment erosion)
exceeds sediment thickness H, the 'adaptive' solver is necessary to
subdivide timesteps. Compare Es and H arrays to determine whether
timesteps are appropriate or too large for the 'basic' solver.
Parameters
----------
grid : ModelGrid
Landlab ModelGrid object
K_sed : float, field name, or array
Erodibility for sediment (units vary).
K_br : float, field name, or array
Erodibility for bedrock (units vary).
F_f : float
Fraction of permanently suspendable fines in bedrock [-].
phi : float
Sediment porosity [-].
H_star : float
Sediment thickness required for full entrainment [L].
v_s : float
Effective settling velocity for chosen grain size metric [L/T].
m_sp : float
Drainage area exponent (units vary)
n_sp : float
Slope exponent (units vary)
sp_crit_sed : float, field name, or array
Critical stream power to erode sediment [E/(TL^2)]
sp_crit_br : float, field name, or array
Critical stream power to erode rock [E/(TL^2)]
discharge_field : float, field name, or array
Discharge [L^2/T]. The default is to use the grid field
'surface_water__discharge', which is simply drainage area
multiplied by the default rainfall rate (1 m/yr). To use custom
spatially/temporally varying flow, use 'water__unit_flux_in'
as the discharge field.
solver : string
Solver to use. Options at present include:
(1) 'basic' (default): explicit forward-time extrapolation.
Simple but will become unstable if time step is too large.
(2) 'adaptive': subdivides global time step as needed to
prevent slopes from reversing and alluvium from going
negative.
Examples
---------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.components import (FlowAccumulator,
... DepressionFinderAndRouter,
... Space,
... FastscapeEroder)
>>> np.random.seed(seed = 5000)
Define grid and initial topography:
* 5x5 grid with baselevel in the lower left corner
* All other boundary nodes closed
* Initial topography is plane tilted up to the upper right with
noise
>>> mg = RasterModelGrid((5, 5), xy_spacing=10.0)
>>> _ = mg.add_zeros('topographic__elevation', at='node')
>>> mg.at_node['topographic__elevation'] += (mg.node_y / 10. +
... mg.node_x / 10. + np.random.rand(len(mg.node_y)) / 10.)
>>> mg.set_closed_boundaries_at_grid_edges(bottom_is_closed=True,
... left_is_closed=True,
... right_is_closed=True,
... top_is_closed=True)
>>> mg.set_watershed_boundary_condition_outlet_id(
... 0, mg.at_node['topographic__elevation'], -9999.)
>>> fsc_dt = 100.
>>> space_dt = 100.
Instantiate Fastscape eroder, flow router, and depression finder
>>> fsc = FastscapeEroder(mg, K_sp=.001, m_sp=.5, n_sp=1)
>>> fr = FlowAccumulator(mg, flow_director='D8')
>>> df = DepressionFinderAndRouter(mg)
Burn in an initial drainage network using the Fastscape eroder:
>>> for x in range(100):
... fr.run_one_step()
... df.map_depressions()
... flooded = np.where(df.flood_status == 3)[0]
... fsc.run_one_step(dt=fsc_dt, flooded_nodes=flooded)
... mg.at_node['topographic__elevation'][0] -= 0.001 # Uplift
Add some soil to the drainage network:
>>> _ = mg.add_zeros('soil__depth', at='node', dtype=float)
>>> mg.at_node['soil__depth'] += 0.5
>>> mg.at_node['topographic__elevation'] += mg.at_node['soil__depth']
Instantiate the Space component:
>>> ha = Space(mg, K_sed=0.00001, K_br=0.00000000001,
... F_f=0.5, phi=0.1, H_star=1., v_s=0.001,
... m_sp=0.5, n_sp = 1.0, sp_crit_sed=0,
... sp_crit_br=0)
Now run the Space component for 2000 short timesteps:
>>> for x in range(2000): #Space component loop
... fr.run_one_step()
... df.map_depressions()
... flooded = np.where(df.flood_status == 3)[0]
... ha.run_one_step(dt=space_dt, flooded_nodes=flooded)
... mg.at_node['bedrock__elevation'][0] -= 2e-6 * space_dt
Now we test to see if soil depth and topography are right:
>>> np.around(mg.at_node['soil__depth'], decimals=3) # doctest: +NORMALIZE_WHITESPACE
array([ 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.495, 0.493,
0.492, 0.5 , 0.5 , 0.493, 0.493, 0.491, 0.5 , 0.5 ,
0.492, 0.491, 0.486, 0.5 , 0.5 , 0.5 , 0.5 , 0.5 , 0.5 ])
>>> np.around(mg.at_node['topographic__elevation'], decimals=3) # doctest: +NORMALIZE_WHITESPACE
array([ 0.423, 1.536, 2.573, 3.511, 4.561, 1.582, 0.424, 0.429,
0.438, 5.51 , 2.54 , 0.429, 0.429, 0.439, 6.526, 3.559,
0.438, 0.439, 0.451, 7.553, 4.559, 5.541, 6.57 , 7.504,
8.51 ])
"""
_name = "Space"
_input_var_names = (
"flow__receiver_node",
"flow__upstream_node_order",
"topographic__steepest_slope",
"drainage_area",
"soil__depth",
)
_output_var_names = "topographic__elevation" "soil__depth"
_var_units = {
"flow__receiver_node": "-",
"flow__upstream_node_order": "-",
"topographic__steepest_slope": "-",
"drainage_area": "m**2",
"soil__depth": "m",
"topographic__elevation": "m",
}
_var_mapping = {
"flow__receiver_node": "node",
"flow__upstream_node_order": "node",
"topographic__steepest_slope": "node",
"drainage_area": "node",
"soil__depth": "node",
"topographic__elevation": "node",
}
_var_doc = {
"flow__receiver_node": "Node array of receivers (node that receives flow from current "
"node)",
"flow__upstream_node_order": "Node array containing downstream-to-upstream ordered list of "
"node IDs",
"topographic__steepest_slope": "Topographic slope at each node",
"drainage_area": "Upstream accumulated surface area contributing to the node's "
"discharge",
"soil__depth": "Depth of sediment above bedrock",
"topographic__elevation": "Land surface topographic elevation",
}
_cite_as = """@Article{gmd-10-4577-2017,
AUTHOR = {Shobe, C. M. and Tucker, G. E. and Barnhart, K. R.},
TITLE = {The SPACE~1.0 model: a~Landlab component for 2-D calculation of sediment transport, bedrock erosion, and landscape evolution},
JOURNAL = {Geoscientific Model Development},
VOLUME = {10},
YEAR = {2017},
NUMBER = {12},
PAGES = {4577--4604},
URL = {https://www.geosci-model-dev.net/10/4577/2017/},
DOI = {10.5194/gmd-10-4577-2017}
}"""
def __init__(
self,
grid,
K_sed=None,
K_br=None,
F_f=None,
phi=None,
H_star=None,
v_s=None,
m_sp=None,
n_sp=None,
sp_crit_sed=None,
sp_crit_br=None,
discharge_field="surface_water__discharge",
solver="basic",
dt_min=DEFAULT_MINIMUM_TIME_STEP,
**kwds
):
"""Initialize the Space model.
"""
if grid.at_node["flow__receiver_node"].size != grid.size("node"):
msg = (
"A route-to-multiple flow director has been "
"run on this grid. The landlab development team has not "
"verified that SPACE is compatible with "
"route-to-multiple methods. Please open a GitHub Issue "
"to start this process."
)
raise NotImplementedError(msg)
super(Space, self).__init__(
grid,
m_sp=m_sp,
n_sp=n_sp,
phi=phi,
F_f=F_f,
v_s=v_s,
dt_min=dt_min,
discharge_field=discharge_field,
)
self._grid = grid # store grid
# space specific inits
self.H_star = H_star
if "soil__depth" in grid.at_node:
self.soil__depth = grid.at_node["soil__depth"]
else:
self.soil__depth = grid.add_zeros("soil__depth", at="node", dtype=float)
if "bedrock__elevation" in grid.at_node:
self.bedrock__elevation = grid.at_node["bedrock__elevation"]
else:
self.bedrock__elevation = grid.add_zeros(
"bedrock__elevation", at="node", dtype=float
)
self.bedrock__elevation[:] = self.topographic__elevation - self.soil__depth
self.Es = np.zeros(grid.number_of_nodes)
self.Er = np.zeros(grid.number_of_nodes)
# K's and critical values can be floats, grid fields, or arrays
self.K_sed = return_array_at_node(grid, K_sed)
self.K_br = return_array_at_node(grid, K_br)
self.sp_crit_sed = return_array_at_node(grid, sp_crit_sed)
self.sp_crit_br = return_array_at_node(grid, sp_crit_br)
# Handle option for solver
if solver == "basic":
self.run_one_step = self.run_one_step_basic
elif solver == "adaptive":
self.run_one_step = self.run_with_adaptive_time_step_solver
self.time_to_flat = np.zeros(grid.number_of_nodes)
self.porosity_factor = 1.0 / (1.0 - self.phi)
else:
raise ValueError(
"Parameter 'solver' must be one of: " + "'basic', 'adaptive'"
)
def _calc_erosion_rates(self):
"""Calculate erosion rates."""
# if sp_crits are zero, then this colapses to correct all the time.
omega_sed = self.K_sed * self.Q_to_the_m * np.power(self.slope, self.n_sp)
omega_br = self.K_br * self.Q_to_the_m * np.power(self.slope, self.n_sp)
omega_sed_over_sp_crit = np.divide(
omega_sed,
self.sp_crit_sed,
out=np.zeros_like(omega_sed),
where=self.sp_crit_sed != 0,
)
omega_br_over_sp_crit = np.divide(
omega_br,
self.sp_crit_br,
out=np.zeros_like(omega_br),
where=self.sp_crit_br != 0,
)
self.sed_erosion_term = omega_sed - self.sp_crit_sed * (
1.0 - np.exp(-omega_sed_over_sp_crit)
)
self.br_erosion_term = omega_br - self.sp_crit_br * (
1.0 - np.exp(-omega_br_over_sp_crit)
)
self.Es = self.sed_erosion_term * (
1.0 - np.exp(-self.soil__depth / self.H_star)
)
self.Er = self.br_erosion_term * np.exp(-self.soil__depth / self.H_star)
def run_one_step_basic(self, dt=1.0, flooded_nodes=None, **kwds):
"""Calculate change in rock and alluvium thickness for
a time period 'dt'.
Parameters
----------
dt : float
Model timestep [T]
flooded_nodes : array
Indices of flooded nodes, passed from flow router
"""
# Choose a method for calculating erosion:
self._calc_hydrology()
self._calc_erosion_rates()
self.qs_in[:] = 0
# iterate top to bottom through the stack, calculate qs
# cythonized version of calculating qs_in
calculate_qs_in(
np.flipud(self.stack),
self.flow_receivers,
self.cell_area_at_node,
self.q,
self.qs,
self.qs_in,
self.Es,
self.Er,
self.v_s,
self.F_f,
self.phi,
)
self.depo_rate[self.q > 0] = self.qs[self.q > 0] * (
self.v_s / self.q[self.q > 0]
)
# now, the analytical solution to soil thickness in time:
# need to distinguish D=kqS from all other cases to save from blowup!
flooded = np.full(self._grid.number_of_nodes, False, dtype=bool)
flooded[flooded_nodes] = True
# distinguish cases:
blowup = self.depo_rate == self.K_sed * self.Q_to_the_m * self.slope
# first, potential blowup case:
# positive slopes, not flooded
pos_not_flood = (self.q > 0) & (blowup) & (self.slope > 0) & (~flooded)
self.soil__depth[pos_not_flood] = self.H_star * np.log(
(self.sed_erosion_term[pos_not_flood] / self.H_star) * dt
+ np.exp(self.soil__depth[pos_not_flood] / self.H_star)
)
# positive slopes, flooded
pos_flood = (self.q > 0) & (blowup) & (self.slope > 0) & (flooded)
self.soil__depth[pos_flood] = (self.depo_rate[pos_flood] / (1 - self.phi)) * dt
# non-positive slopes, not flooded
non_pos_not_flood = (self.q > 0) & (blowup) & (self.slope <= 0) & (~flooded)
self.soil__depth[non_pos_not_flood] += (
self.depo_rate[non_pos_not_flood] / (1 - self.phi) * dt
)
# more general case:
pos_not_flood = (self.q > 0) & (~blowup) & (self.slope > 0) & (~flooded)
self.soil__depth[pos_not_flood] = self.H_star * np.log(
(
1
/ (
(self.depo_rate[pos_not_flood] / (1 - self.phi))
/ (self.sed_erosion_term[pos_not_flood])
- 1
)
)
* (
np.exp(
(
self.depo_rate[pos_not_flood] / (1 - self.phi)
- (self.sed_erosion_term[pos_not_flood])
)
* (dt / self.H_star)
)
* (
(
(
self.depo_rate[pos_not_flood]
/ (1 - self.phi)
/ (self.sed_erosion_term[pos_not_flood])
)
- 1
)
* np.exp(self.soil__depth[pos_not_flood] / self.H_star)
+ 1
)
- 1
)
)
# places where slope <= 0 but not flooded:
neg_slope_not_flooded = (
(self.q > 0) & (~blowup) & (self.slope <= 0) & (~flooded)
)
self.soil__depth[neg_slope_not_flooded] += (
self.depo_rate[neg_slope_not_flooded] / (1 - self.phi) * dt
)
# flooded nodes:
flooded_nodes = (self.q > 0) & (~blowup) & (flooded)
self.soil__depth[flooded_nodes] += (
self.depo_rate[flooded_nodes] / (1 - self.phi) * dt
)
# where discharge exists
discharge_exists = self.q > 0
self.bedrock__elevation[discharge_exists] += dt * (
-self.br_erosion_term[discharge_exists]
* (np.exp(-self.soil__depth[discharge_exists] / self.H_star))
)
# finally, determine topography by summing bedrock and soil
cores = self._grid.core_nodes
self.topographic__elevation[cores] = (
self.bedrock__elevation[cores] + self.soil__depth[cores]
)
def run_with_adaptive_time_step_solver(self, dt=1.0, flooded_nodes=[], **kwds):
"""Run step with CHILD-like solver that adjusts time steps to prevent
slope flattening.
Examples
--------
>>> from landlab import RasterModelGrid
>>> from landlab.components import FlowAccumulator
>>> import numpy as np
>>> rg = RasterModelGrid((3, 4))
>>> z = rg.add_zeros('topographic__elevation', at='node')
>>> z[:] = 0.1 * rg.x_of_node
>>> H = rg.add_zeros('soil__depth', at='node')
>>> H += 0.1
>>> br = rg.add_zeros('bedrock__elevation', at='node')
>>> br[:] = z - H
>>> fa = FlowAccumulator(rg, flow_director='FlowDirectorSteepest')
>>> fa.run_one_step()
>>> sp = Space(rg, K_sed=1.0, K_br=0.1,
... F_f=0.5, phi=0.0, H_star=1., v_s=1.0,
... m_sp=0.5, n_sp = 1.0, sp_crit_sed=0,
... sp_crit_br=0, solver='adaptive')
>>> sp.run_one_step(dt=10.0)
>>> np.round(sp.Es[5:7], 4)
array([ 0.0029, 0.0074])
>>> np.round(sp.Er[5:7], 4)
array([ 0.0032, 0.0085])
>>> np.round(H[5:7], 3)
array([ 0.088, 0.078])
"""
# Initialize remaining_time, which records how much of the global time
# step we have yet to use up.
remaining_time = dt
z = self._grid.at_node["topographic__elevation"]
br = self._grid.at_node["bedrock__elevation"]
H = self._grid.at_node["soil__depth"]
r = self.flow_receivers
time_to_flat = np.zeros(len(z))
time_to_zero_alluv = np.zeros(len(z))
dzdt = np.zeros(len(z))
cores = self._grid.core_nodes
first_iteration = True
# Outer WHILE loop: keep going until time is used up
while remaining_time > 0.0:
# Update all the flow-link slopes.
#
# For the first iteration, we assume this has already been done
# outside the component (e.g., by flow router), but we need to do
# it ourselves on subsequent iterations.
if not first_iteration:
# update the link slopes
self._update_flow_link_slopes()
# update where nodes are flooded. This shouuldn't happen because
# of the dynamic timestepper, but just in case, we update here.
new_flooded_nodes = np.where(self.slope < 0)[0]
flooded_nodes = np.asarray(
np.unique(np.concatenate((flooded_nodes, new_flooded_nodes))),
dtype=np.int64,
)
else:
first_iteration = False
# Calculate rates of entrainment
self._calc_hydrology()
self._calc_erosion_rates()
# CORRECTION HERE?
self.Es[flooded_nodes] = 0.0
self.Er[flooded_nodes] = 0.0
# Zero out sediment influx for new iteration
self.qs_in[:] = 0
calculate_qs_in(
np.flipud(self.stack),
self.flow_receivers,
self.cell_area_at_node,
self.q,
self.qs,
self.qs_in,
self.Es,
self.Er,
self.v_s,
self.F_f,
self.phi,
)
self.depo_rate[self.q > 0] = self.qs[self.q > 0] * (
self.v_s / self.q[self.q > 0]
)
# TODO handle flooded nodes in the above fn
# Now look at upstream-downstream node pairs, and recording the
# time it would take for each pair to flatten. Take the minimum.
dzdt[cores] = self.depo_rate[cores] - (self.Es[cores] + self.Er[cores])
rocdif = dzdt - dzdt[r]
zdif = z - z[r]
time_to_flat[:] = remaining_time
converging = np.where(rocdif < 0.0)[0]
time_to_flat[converging] = -(
TIME_STEP_FACTOR * zdif[converging] / rocdif[converging]
)
time_to_flat[np.where(zdif <= 0.0)[0]] = remaining_time
# From this, find the maximum stable time step with regard to slope
# evolution.
dt_max1 = np.amin(time_to_flat)
# Next we consider time to exhaust regolith
time_to_zero_alluv[:] = remaining_time
dHdt = self.porosity_factor * (self.depo_rate) - self.Es
decreasing_H = np.where(dHdt < 0.0)[0]
time_to_zero_alluv[decreasing_H] = -(
TIME_STEP_FACTOR * H[decreasing_H] / dHdt[decreasing_H]
)
# Now find the smallest time that would lead to near-empty alluv
dt_max2 = np.amin(time_to_zero_alluv)
# Take the smaller of the limits
dt_max = min(dt_max1, dt_max2)
if dt_max < self.dt_min:
dt_max = self.dt_min
# Now a vector operation: apply dzdt and dhdt to all nodes
br[cores] -= self.Er[cores] * dt_max
H[cores] += dHdt[cores] * dt_max
z[cores] = br[cores] + H[cores]
# Update remaining time and continue
remaining_time -= dt_max
| 37.254296 | 153 | 0.559127 |
c12927af3a029e0584489c9d1e0f71d750ae829f | 1,868 | py | Python | .github/workflows/postgresql_settings.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,544 | 2015-01-01T22:16:31.000Z | 2022-03-31T19:17:45.000Z | .github/workflows/postgresql_settings.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 1,686 | 2015-01-02T18:26:31.000Z | 2022-03-31T20:12:03.000Z | .github/workflows/postgresql_settings.py | Jaykingamez/evennia | cf7cab1fea99ede3efecb70a65c3eb0fba1d3745 | [
"BSD-3-Clause"
] | 867 | 2015-01-02T21:01:54.000Z | 2022-03-29T00:28:27.000Z | """
Evennia settings file.
The available options are found in the default settings file found
here:
/home/griatch/Devel/Home/evennia/evennia/evennia/settings_default.py
Remember:
Don't copy more from the default file than you actually intend to
change; this will make sure that you don't overload upstream updates
unnecessarily.
When changing a setting requiring a file system path (like
path/to/actual/file.py), use GAME_DIR and EVENNIA_DIR to reference
your game folder and the Evennia library folders respectively. Python
paths (path.to.module) should be given relative to the game's root
folder (typeclasses.foo) whereas paths within the Evennia library
needs to be given explicitly (evennia.foo).
If you want to share your game dir, including its settings, you can
put secret game- or server-specific settings in secret_settings.py.
"""
import os
# Use the defaults from Evennia unless explicitly overridden
from evennia.settings_default import *
######################################################################
# Evennia base server config
######################################################################
# This is the name of your game. Make it catchy!
SERVERNAME = "testing_mygame"
# Testing database types
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": "evennia",
"USER": "evennia",
"PASSWORD": "password",
"HOST": "localhost",
"PORT": "", # use default
"TEST": {"NAME": "default"},
}
}
######################################################################
# Settings given in secret_settings.py override those in this file.
######################################################################
try:
from server.conf.secret_settings import *
except ImportError:
print("secret_settings.py file not found or failed to import.")
| 30.622951 | 70 | 0.62152 |
dad379baaee79b811b22b22f501741544b483d6b | 1,835 | py | Python | aiokraken/websockets/schemas/unsubscribe.py | asmodehn/aiokraken | b260bd41d5aa091e6a4f1818328426fbe6f625c0 | [
"MIT"
] | null | null | null | aiokraken/websockets/schemas/unsubscribe.py | asmodehn/aiokraken | b260bd41d5aa091e6a4f1818328426fbe6f625c0 | [
"MIT"
] | 82 | 2019-08-30T09:37:49.000Z | 2022-03-29T14:53:22.000Z | aiokraken/websockets/schemas/unsubscribe.py | asmodehn/aiokraken | b260bd41d5aa091e6a4f1818328426fbe6f625c0 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
import typing
from marshmallow import fields, post_load, post_dump, pre_load, pre_dump
from aiokraken.rest.schemas.base import BaseSchema
from aiokraken.websockets.schemas.subscribe import Subscription, SubscriptionSchema
@dataclass(frozen=True)
class Unsubscribe:
subscription: Subscription
pair: typing.List[str] # TODO : use pair with proper type
reqid: typing.Optional[int] = field(default=None)
@dataclass(frozen=True)
class ChannelUnsubscribe:
channel_id: int
reqid: typing.Optional[int] = field(default=None)
class UnsubscribeSchema(BaseSchema):
"""
>>> s= UnsubscribeSchema()
>>> s.load({'event': 'unsubscribe',
... 'pair': ['XBT/USD'],
... 'subscription': {'name': 'ticker'}
... })
Unsubscribe(subscription=Subscription(name='ticker', interval=None, depth=None, token=''), pair=['XBT/USD'], reqid=None)
"""
event = fields.Constant("unsubscribe")
reqid = fields.Integer()
pair = fields.List(fields.String())
subscription = fields.Nested(SubscriptionSchema())
@post_load
def build_model(self, data, **kwargs):
data.pop('event') # not needed any longer
a = Unsubscribe(**data)
return a
class ChannelUnsubscribeSchema(BaseSchema):
"""
>>> s= ChannelUnsubscribeSchema()
>>> s.load({'event': 'unsubscribe',
... 'reqid': 39,
... 'channel_id': 142
... })
ChannelUnsubscribe(channel_id=142, reqid=39)
"""
event = fields.Constant("unsubscribe")
reqid = fields.Integer()
channel_id = fields.Integer()
@post_load
def build_model(self, data, **kwargs):
data.pop('event') # not needed any longer
a = ChannelUnsubscribe(**data)
return a
if __name__ == '__main__':
import doctest
doctest.testmod()
| 26.594203 | 124 | 0.66158 |
e4d10182ff87fb56988884cc61ae63704f69a03e | 994 | py | Python | scripts/disc_ssl_https.py | leshak/zabbix-ssl-nginx | efed212bbb17a32dddc9d30beb354c040ba63f65 | [
"MIT"
] | null | null | null | scripts/disc_ssl_https.py | leshak/zabbix-ssl-nginx | efed212bbb17a32dddc9d30beb354c040ba63f65 | [
"MIT"
] | null | null | null | scripts/disc_ssl_https.py | leshak/zabbix-ssl-nginx | efed212bbb17a32dddc9d30beb354c040ba63f65 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import crossplane
import json
NGINX_CONFIG_PATH = '/etc/nginx/nginx.conf'
HTTPS_PORT = 'ssl'
domainsList = []
nginxConfig = crossplane.parse(NGINX_CONFIG_PATH)
if nginxConfig['config']:
for cfile in nginxConfig['config']:
for parsed in cfile['parsed']:
if 'block' in parsed:
foundHttps = False
httpsDomain = None
for blk in parsed['block']:
if blk['directive'] == 'listen':
if HTTPS_PORT in blk['args']:
foundHttps = True
if foundHttps and blk['directive'] == 'server_name' and len(blk['args']) > 0:
httpsDomain = blk['args'][0]
if foundHttps and httpsDomain != None:
domainsList.append({
"{#DOMAIN_HTTPS}": httpsDomain
})
print(json.dumps({
'data': domainsList
}))
| 26.157895 | 97 | 0.512072 |
75c59fb9e7d8db6973c74d7c3e7544c27854509d | 63 | py | Python | utils/__init__.py | Misterion777/ConceptFlow | 50cedc4b1db1d0311ce98af6088609812bc71934 | [
"MIT"
] | 107 | 2020-05-02T07:41:13.000Z | 2022-02-23T07:36:20.000Z | utils/__init__.py | Misterion777/ConceptFlow | 50cedc4b1db1d0311ce98af6088609812bc71934 | [
"MIT"
] | 25 | 2020-05-02T08:13:41.000Z | 2022-03-12T00:27:57.000Z | utils/__init__.py | Misterion777/ConceptFlow | 50cedc4b1db1d0311ce98af6088609812bc71934 | [
"MIT"
] | 15 | 2020-05-18T12:30:51.000Z | 2022-02-15T01:16:51.000Z | from .utils import padding, padding_triple_id, build_kb_adj_mat | 63 | 63 | 0.873016 |
dc4aa435613b92ebc97ffc682ff5b7d035d072b5 | 10,035 | py | Python | anyex/bxinth.py | ttwishing/anyex | cfd1f2f04ab992b790add4843aafff91e5773cbf | [
"MIT"
] | null | null | null | anyex/bxinth.py | ttwishing/anyex | cfd1f2f04ab992b790add4843aafff91e5773cbf | [
"MIT"
] | null | null | null | anyex/bxinth.py | ttwishing/anyex | cfd1f2f04ab992b790add4843aafff91e5773cbf | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/anyex/anyex/blob/master/CONTRIBUTING.md#how-to-contribute-code
from anyex.base.exchange import Exchange
from anyex.base.errors import ExchangeError
class bxinth (Exchange):
def describe(self):
return self.deep_extend(super(bxinth, self).describe(), {
'id': 'bxinth',
'name': 'BX.in.th',
'countries': 'TH', # Thailand
'rateLimit': 1500,
'has': {
'CORS': False,
'fetchTickers': True,
'fetchOpenOrders': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27766412-567b1eb4-5ed7-11e7-94a8-ff6a3884f6c5.jpg',
'api': 'https://bx.in.th/api',
'www': 'https://bx.in.th',
'doc': 'https://bx.in.th/info/api',
},
'api': {
'public': {
'get': [
'', # ticker
'options',
'optionbook',
'orderbook',
'pairing',
'trade',
'tradehistory',
],
},
'private': {
'post': [
'balance',
'biller',
'billgroup',
'billpay',
'cancel',
'deposit',
'getorders',
'history',
'option-issue',
'option-bid',
'option-sell',
'option-myissue',
'option-mybid',
'option-myoptions',
'option-exercise',
'option-cancel',
'option-history',
'order',
'withdrawal',
'withdrawal-history',
],
},
},
'fees': {
'trading': {
'taker': 0.25 / 100,
'maker': 0.25 / 100,
},
},
'commonCurrencies': {
'DAS': 'DASH',
'DOG': 'DOGE',
},
})
def fetch_markets(self):
markets = self.publicGetPairing()
keys = list(markets.keys())
result = []
for p in range(0, len(keys)):
market = markets[keys[p]]
id = str(market['pairing_id'])
base = market['secondary_currency']
quote = market['primary_currency']
base = self.common_currency_code(base)
quote = self.common_currency_code(quote)
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'info': market,
})
return result
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostBalance()
balance = response['balance']
result = {'info': balance}
currencies = list(balance.keys())
for c in range(0, len(currencies)):
currency = currencies[c]
code = self.common_currency_code(currency)
account = {
'free': float(balance[currency]['available']),
'used': 0.0,
'total': float(balance[currency]['total']),
}
account['used'] = account['total'] - account['free']
result[code] = account
return self.parse_balance(result)
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
orderbook = self.publicGetOrderbook(self.extend({
'pairing': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook)
def parse_ticker(self, ticker, market=None):
timestamp = self.milliseconds()
symbol = None
if market:
symbol = market['symbol']
last = float(ticker['last_price'])
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': float(ticker['orderbook']['bids']['highbid']),
'bidVolume': None,
'ask': float(ticker['orderbook']['asks']['highbid']),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': float(ticker['change']),
'percentage': None,
'average': None,
'baseVolume': float(ticker['volume_24hours']),
'quoteVolume': None,
'info': ticker,
}
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGet(params)
result = {}
ids = list(tickers.keys())
for i in range(0, len(ids)):
id = ids[i]
ticker = tickers[id]
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
tickers = self.publicGet(self.extend({
'pairing': market['id'],
}, params))
id = str(market['id'])
ticker = tickers[id]
return self.parse_ticker(ticker, market)
def parse_trade(self, trade, market):
timestamp = self.parse8601(trade['trade_date'])
return {
'id': trade['trade_id'],
'info': trade,
'order': trade['order_id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': market['symbol'],
'type': None,
'side': trade['trade_type'],
'price': float(trade['rate']),
'amount': float(trade['amount']),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTrade(self.extend({
'pairing': market['id'],
}, params))
return self.parse_trades(response['trades'], market, since, limit)
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
response = self.privatePostOrder(self.extend({
'pairing': self.market_id(symbol),
'type': side,
'amount': amount,
'rate': price,
}, params))
return {
'info': response,
'id': str(response['order_id']),
}
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
pairing = None # TODO fixme
return self.privatePostCancel({
'order_id': id,
'pairing': pairing,
})
def parse_order(self, order, market=None):
side = self.safe_string(order, 'order_type')
symbol = None
if market is None:
marketId = self.safe_string(order, 'pairing_id')
if marketId is not None:
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = self.parse8601(order['date'])
price = self.safe_float(order, 'rate')
amount = self.safe_float(order, 'amount')
return {
'info': order,
'id': order['order_id'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': 'limit',
'side': side,
'price': price,
'amount': amount,
}
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['pairing'] = market['id']
response = self.privatePostGetorders(self.extend(request, params))
orders = self.parse_orders(response['orders'], market, since, limit)
return self.filter_by_symbol(orders, symbol)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/'
if path:
url += path + '/'
if params:
url += '?' + self.urlencode(params)
if api == 'private':
self.check_required_credentials()
nonce = self.nonce()
auth = self.apiKey + str(nonce) + self.secret
signature = self.hash(self.encode(auth), 'sha256')
body = self.urlencode(self.extend({
'key': self.apiKey,
'nonce': nonce,
'signature': signature,
# twofa: self.twofa,
}, params))
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
if api == 'public':
return response
if 'success' in response:
if response['success']:
return response
raise ExchangeError(self.id + ' ' + self.json(response))
| 35.087413 | 126 | 0.47703 |
b11f82b119d199041baa20b4a479f12fb0759d21 | 1,355 | py | Python | tests/test_resource_loader.py | Mattlk13/python-percy-client | c6add77bb5a45d0d57ea579bea577086db114cab | [
"MIT"
] | 22 | 2016-06-06T18:52:47.000Z | 2019-08-05T20:05:50.000Z | tests/test_resource_loader.py | Mattlk13/python-percy-client | c6add77bb5a45d0d57ea579bea577086db114cab | [
"MIT"
] | 85 | 2016-06-08T17:16:12.000Z | 2020-07-08T05:00:10.000Z | tests/test_resource_loader.py | startupgrind/python-percy-client | 8c6ce83c74b9dbe51beadac5923f5d9f1d999f58 | [
"MIT"
] | 23 | 2016-07-01T17:30:29.000Z | 2021-01-13T16:33:55.000Z | import unittest
import os
from percy.resource_loader import ResourceLoader
TEST_FILES_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'testdata')
class FakeWebdriver(object):
page_source = 'foo'
current_url = '/'
class FakeWebdriverAbsoluteUrl(object):
page_source = 'foo'
current_url = 'http://testserver/'
class TestPercyResourceLoader(unittest.TestCase):
def test_blank_loader(self):
resource_loader = ResourceLoader()
assert resource_loader.build_resources == []
resource_loader = ResourceLoader(webdriver=FakeWebdriver())
assert resource_loader.snapshot_resources[0].resource_url == '/'
def test_build_resources(self):
root_dir = os.path.join(TEST_FILES_DIR, 'static')
resource_loader = ResourceLoader(root_dir=root_dir, base_url='/assets/')
resources = resource_loader.build_resources
resource_urls = sorted([r.resource_url for r in resources])
assert resource_urls == [
'/assets/app.js',
'/assets/images/jellybeans.png',
'/assets/images/logo.png',
'/assets/styles.css',
]
def test_absolute_snapshot_resources(self):
resource_loader = ResourceLoader(webdriver=FakeWebdriverAbsoluteUrl())
assert resource_loader.snapshot_resources[0].resource_url == '/'
| 33.875 | 85 | 0.695941 |
73c04e6552775f3e153965812e47e57a749742e1 | 2,129 | py | Python | setup.py | dduraipandian/scrapqd | 7515ebebd54506af765c65e5c9b14331208e6cf6 | [
"MIT"
] | null | null | null | setup.py | dduraipandian/scrapqd | 7515ebebd54506af765c65e5c9b14331208e6cf6 | [
"MIT"
] | 2 | 2022-03-14T00:42:59.000Z | 2022-03-15T15:50:14.000Z | setup.py | dduraipandian/scrapqd | 7515ebebd54506af765c65e5c9b14331208e6cf6 | [
"MIT"
] | null | null | null | import sys
from os import path
import setuptools
from setuptools.command.test import test as TestCommand # noqa
import scrapqd
DIR = path.abspath(path.dirname(__file__))
with open("README.rst") as f:
readme = f.read()
class Tox(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import tox
errcode = tox.cmdline(self.test_args)
sys.exit(errcode)
setuptools.setup(
name="scrapqd",
packages=setuptools.find_packages(exclude=("tests",)),
version=scrapqd.__version__,
author=scrapqd.__author__,
author_email=scrapqd.__contact__,
description=scrapqd.__description__,
long_description=readme,
long_description_content_type="text/x-rst",
url="https://github.com/dduraipandian/scapqd",
package_dir={"scrapqd": "scrapqd"},
classifiers=[
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: Implementation :: CPython",
],
keywords=scrapqd.__keywords__,
python_requires=">=3.7",
tests_require=["tox"],
cmdclass={"test": Tox},
zip_safe=False,
include_package_data=True,
package_data={
"": [
"settings/user_agents.dat",
"gql/template.html",
"_static/sample.html"
]
},
install_requires=[
"lxml==4.8.0",
"flask==2.0.3",
"graphql-server==3.0.0b5",
"requests==2.27.1",
"graphql-core==3.2.0",
"selenium==4.1.3",
"immutable-config==1.0",
"webdriver-manager==3.5.3"
],
)
| 28.013158 | 70 | 0.612494 |
31445e474b76e20911fafe71d90958703aebb8ae | 5,833 | py | Python | gen_kinesis_data.new.py | alfredcs/marketTrend | 3a6afa04a3cb21f7d624a0169da6a733eaa7ab22 | [
"MIT"
] | null | null | null | gen_kinesis_data.new.py | alfredcs/marketTrend | 3a6afa04a3cb21f7d624a0169da6a733eaa7ab22 | [
"MIT"
] | null | null | null | gen_kinesis_data.new.py | alfredcs/marketTrend | 3a6afa04a3cb21f7d624a0169da6a733eaa7ab22 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
# vim: tabstop=2 shiftwidth=2 softtabstop=2 expandtab
import sys
import csv
import json
import argparse
from collections import OrderedDict
import base64
import traceback
import random
import time
from datetime import datetime
import pandas as pd
import boto3
import twint
import nest_asyncio
random.seed(47)
now = datetime.now()
local_filename="twitter_"+str(int(datetime.timestamp(now)))+".csv"
DELIMETER_BY_FORMAT = {
'csv': ',',
'tsv': '\t'
}
def gen_records(options, reader):
def _adjust_date(dt):
n = len('yyyy-mm-dd_HH:')
today = datetime.today()
return '{}:{}'.format(today.strftime('%Y-%m-%d %H'), dt[n:])
record_list = []
for row in reader:
is_skip = (random.randint(1, 47) % 19 < 5) if options.random_select else False
if is_skip:
continue
if int(row['Quantity']) <= 0:
continue
row['InvoiceDate'] = _adjust_date(row['InvoiceDate'])
if options.out_format in DELIMETER_BY_FORMAT:
delimeter = DELIMETER_BY_FORMAT[options.out_format]
data = delimeter.join([e for e in row.values()])
else:
try:
data = json.dumps(OrderedDict([(k, SCHEMA_CONV_TOOL[k](v)) for k, v in row.items()]), ensure_ascii=False)
except Exception as ex:
traceback.print_exc()
continue
if options.max_count == len(record_list):
yield record_list
record_list = []
record_list.append(data)
if record_list:
yield record_list
def put_records_to_firehose(client, options, records):
MAX_RETRY_COUNT = 3
for data in records:
if options.dry_run:
print(data)
continue
for _ in range(MAX_RETRY_COUNT):
try:
response = client.put_record(
DeliveryStreamName=options.stream_name,
Record={
'Data': '{}\n'.format(data)
}
)
break
except Exception as ex:
traceback.print_exc()
time.sleep(random.randint(1, 10))
else:
raise RuntimeError('[ERROR] Failed to put_records into stream: {}'.format(options.stream_name))
def put_records_to_kinesis(client, options, records):
MAX_RETRY_COUNT = 3
payload_list = []
for data in records:
partition_key = 'part-{:05}'.format(random.randint(1, 1024))
payload_list.append({'Data': data, 'PartitionKey': partition_key})
if options.dry_run:
print(json.dumps(payload_list, ensure_ascii=False))
return
for _ in range(MAX_RETRY_COUNT):
try:
response = client.put_records(Records=payload_list, StreamName=options.stream_name)
break
except Exception as ex:
traceback.print_exc()
time.sleep(random.randint(1, 10))
else:
raise RuntimeError('[ERROR] Failed to put_records into stream: {}'.format(options.stream_name))
def get_tweets(key_words, filename, since_date):
c = twint.Config()
#c.Search = "neo4j OR \graph database\ OR \graph databases \ OR graphdb OR graphconnect OR @neoquestions OR @Neo4jDE OR @Neo4jFr OR neotechnology"
#c.Search = "memory OR \flash memory\ OR \sk hynix\ OR sandisk OR intel OR micron OR microship OR samsung OR kioxia OR marvell OR \on semiconductor\ OR Infineon OR \flash memory\ OR nand"
#c.Search = "cisco OR \Juniper Networks\ OR \hpe aruba\ OR huawei OR \arista networks\ OR netgear OR vmware OR \extreme networks\ OR dell"
c.Search = key_words
#c.Store_json = True
c.Store_csv = True
#c.Custom[\user\] = [\\, \tweet\, \user_id\, \username\, \hashtags\, \mentions\]g
c.User_full = True
c.Output = filename
c.Since = since_date
c.Hide_output = True
twint.run.Search(c)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--region-name', action='store', default='us-west-2',help='aws region name (default: us-east-1)')
# parser.add_argument('-I', '--input-file', required=True, help='The input file path ex) ./resources/online_retail.csv')
parser.add_argument('--out-format', default='json', choices=['csv', 'tsv', 'json'])
parser.add_argument('--twitter-keyword', default='cisco', help='cisco; cisco OR \\SD-WAN SASE\\')
parser.add_argument('--twitter-sincedate', default='2021-05-12', help='Get all tweets since this date')
parser.add_argument('--service-name', required=True, choices=['kinesis', 'firehose', 'console'])
parser.add_argument('--stream-name', help='The name of the stream to put the data record into.')
parser.add_argument('--max-count', default=10, type=int, help='The max number of records to put.')
parser.add_argument('--random-select', action='store_true')
parser.add_argument('--dry-run', action='store_true')
options = parser.parse_args()
#print(options.accumulate(args.integers))
COUNT_STEP = 10 if options.dry_run else 1000
get_tweets(options.twitter_keyword, local_filename, options.twitter_sincedate)
#with open(options.input_file, newline='') as csvfile:
with open(local_filename, newline='') as jsonfile:
#reader = csv.DictReader(csvfile)
#reader = csv.reader(csvfile)
client = boto3.client(options.service_name, region_name=options.region_name) if options.service_name != 'console' else None
counter = 0
#for records in gen_records(options, reader):
#for records in reader:
for records in jsonfile:
if options.service_name == 'kinesis':
put_records_to_kinesis(client, options, json.loads(records))
#print(json.loads(records))
elif options.service_name == 'firehose':
put_records_to_firehose(client, options, records)
else:
print('\n'.join([e for e in records]))
counter += 1
if counter % COUNT_STEP == 0:
print('[INFO] {} steps are processed'.format(counter), file=sys.stderr)
if options.dry_run:
break
#time.sleep(random.choices([0.01, 0.03, 0.05, 0.07, 0.1])[-1])
if __name__ == '__main__':
main()
| 34.111111 | 189 | 0.681468 |
7b1b45949984f3dbe596d2c3aeb036f918184590 | 1,688 | py | Python | parser.py | NyoNyoHtwe/Python-exercises | ef060239dcf2d21ee5cde8a1326af6d130d662ed | [
"MIT"
] | null | null | null | parser.py | NyoNyoHtwe/Python-exercises | ef060239dcf2d21ee5cde8a1326af6d130d662ed | [
"MIT"
] | null | null | null | parser.py | NyoNyoHtwe/Python-exercises | ef060239dcf2d21ee5cde8a1326af6d130d662ed | [
"MIT"
] | null | null | null |
class ParserError(Exception):
pass
class Sentence(object):
def __init__(self, subject, verb, obj):
# remember we take ('noun','princess') tuples and convert them
self.subject = subject[1]
self.verb = verb[1]
self.object = obj[1]
def peek(word_list):
if word_list:
word = word_list[0]
return word[0]
else:
return None
def match(word_list, expecting):
if word_list:
word = word_list.pop(0)
if word[0] == expecting:
return word
else:
return None
else:
return None
def skip(word_list,word_type):
while peek(word_list) == word_type:
match(word_list, word_type)
def parse_verb(word_list):
skip(word_list, 'stop')
if peek(word_list) == 'verb':
return match(word_list, 'verb')
else:
raise ParserError("Expected a verb next.")
def parse_object(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'direction':
return match(word_list, 'direction')
else:
raise ParserError("Expected a noun or direction next.")
def parse_subject(word_list):
skip(word_list, 'stop')
next_word = peek(word_list)
if next_word == 'noun':
return match(word_list, 'noun')
elif next_word == 'verb':
return ('noun', 'player')
else:
raise ParserError("Expected a verb next.")
def parse_sentence(word_list):
subj = parse_subject(word_list)
verb = parse_verb(word_list)
obj = parse_object(word_list)
return Sentence(subj, verb, obj)
| 23.444444 | 72 | 0.613152 |
b3bd09c3ed4813548f547d03b1b4717965ec50e5 | 10,259 | py | Python | scalabel/label/utils.py | scalabel/scalabel | 857a32e1c08e5b9a7ab346468940621c2fe2226a | [
"Apache-2.0"
] | 279 | 2019-11-18T01:48:39.000Z | 2022-03-30T00:16:43.000Z | scalabel/label/utils.py | elrichgro/scalabel | 9be76e8976e936d357f3153ca195eac2d7d8f120 | [
"Apache-2.0"
] | 141 | 2019-11-20T02:36:11.000Z | 2022-03-29T15:17:46.000Z | scalabel/label/utils.py | elrichgro/scalabel | 9be76e8976e936d357f3153ca195eac2d7d8f120 | [
"Apache-2.0"
] | 85 | 2019-11-18T06:10:12.000Z | 2022-03-27T12:32:55.000Z | """Utility functions for label."""
import math
from typing import Dict, List, Optional, Tuple
import numpy as np
import pytest
from scipy.spatial.transform import Rotation
from ..common.typing import NDArrayF64
from .typing import Category, Extrinsics, Frame, FrameGroup, Intrinsics, Label
def get_intrinsics_from_matrix(matrix: NDArrayF64) -> Intrinsics:
"""Get intrinsics data structure from 3x3 matrix."""
intrinsics = Intrinsics(
focal=(matrix[0, 0], matrix[1, 1]),
center=(matrix[0, 2], matrix[1, 2]),
skew=matrix[0, 1],
)
return intrinsics
def get_matrix_from_intrinsics(intrinsics: Intrinsics) -> NDArrayF64:
"""Get the camera intrinsic matrix."""
calibration = np.identity(3)
calibration[0, 2] = intrinsics.center[0]
calibration[1, 2] = intrinsics.center[1]
calibration[0, 0] = intrinsics.focal[0]
calibration[1, 1] = intrinsics.focal[1]
calibration[0, 1] = intrinsics.skew
return calibration
def get_extrinsics_from_matrix(matrix: NDArrayF64) -> Extrinsics:
"""Get extrinsics data structure from 4x4 matrix."""
extrinsics = Extrinsics(
location=(matrix[0, -1], matrix[1, -1], matrix[2, -1]),
rotation=tuple(
Rotation.from_matrix(matrix[:3, :3]).as_euler("xyz").tolist()
),
)
return extrinsics
def get_matrix_from_extrinsics(extrinsics: Extrinsics) -> NDArrayF64:
"""Convert Extrinsics class object to rotation matrix."""
rot_mat = Rotation.from_euler("xyz", extrinsics.rotation).as_matrix()
translation = np.array(extrinsics.location)
extrinsics_mat = np.identity(4)
extrinsics_mat[:3, :3] = rot_mat
extrinsics_mat[:3, -1] = translation
return extrinsics_mat
def get_leaf_categories(parent_categories: List[Category]) -> List[Category]:
"""Get the leaf categories in the category tree structure."""
result = []
for category in parent_categories:
if category.subcategories is None:
result.append(category)
else:
result.extend(get_leaf_categories(category.subcategories))
return result
def get_parent_categories(
parent_categories: List[Category],
) -> Dict[str, List[Category]]:
"""Get all parent categories and their associated leaf categories."""
result = {}
for category in parent_categories:
if category.subcategories is not None:
result.update(get_parent_categories(category.subcategories))
result[category.name] = get_leaf_categories([category])
else:
return {}
return result
def check_crowd(label: Label) -> bool:
"""Check crowd attribute."""
if label.attributes is not None:
crowd = bool(label.attributes.get("crowd", False))
else:
crowd = False
return crowd
def check_ignored(label: Label) -> bool:
"""Check ignored attribute."""
if label.attributes is not None:
ignored = bool(label.attributes.get("ignored", False))
else:
ignored = False
return ignored
def check_occluded(label: Label) -> bool:
"""Check occluded attribute."""
if label.attributes is not None:
occluded = bool(label.attributes.get("occluded", False))
else:
occluded = False
return occluded
def check_truncated(label: Label) -> bool:
"""Check truncated attribute."""
if label.attributes is not None:
truncated = bool(label.attributes.get("truncated", False))
else:
truncated = False
return truncated
def cart2hom(pts_3d: NDArrayF64) -> NDArrayF64:
"""Nx3 points in Cartesian to Homogeneous by appending ones."""
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1))))
return pts_3d_hom
def project_points_to_image(
points: NDArrayF64, intrinsics: NDArrayF64
) -> NDArrayF64:
"""Project Nx3 points to Nx2 pixel coordinates with 3x3 intrinsics."""
hom_cam_coords = points / points[:, 2:3]
pts_2d = np.dot(hom_cam_coords, np.transpose(intrinsics))
return pts_2d[:, :2] # type: ignore
def rotation_y_to_alpha(
rotation_y: float, center: Tuple[float, float, float]
) -> float:
"""Convert rotation around y-axis to viewpoint angle (alpha)."""
alpha = rotation_y - math.atan2(center[0], center[2])
if alpha > math.pi:
alpha -= 2 * math.pi
if alpha <= -math.pi:
alpha += 2 * math.pi
return alpha
def get_box_transformation_matrix(
obj_loc: Tuple[float, float, float],
obj_size: Tuple[float, float, float],
ry: float,
) -> NDArrayF64:
"""Create a transformation matrix for a given label box pose."""
x, y, z = obj_loc
cos = math.cos(ry)
sin = math.sin(ry)
l, h, w = obj_size
return np.array(
[
[l * cos, -w * sin, 0, x],
[l * sin, w * cos, 0, y],
[0, 0, h, z],
[0, 0, 0, 1],
]
)
def compare_results(result: List[Frame], result_compare: List[Frame]) -> None:
"""Compare two list of frames."""
for frame, frame_ref in zip(result, result_compare):
assert frame.name == frame_ref.name
assert frame.videoName == frame_ref.videoName
assert frame.frameIndex == frame_ref.frameIndex
if frame.intrinsics is not None:
assert frame_ref.intrinsics is not None
assert frame.intrinsics.focal == pytest.approx(
frame_ref.intrinsics.focal
)
assert frame.intrinsics.center == pytest.approx(
frame_ref.intrinsics.center
)
assert frame.intrinsics.skew == pytest.approx(
frame_ref.intrinsics.skew
)
else:
assert frame.intrinsics == frame_ref.intrinsics
if frame.extrinsics is not None:
assert frame_ref.extrinsics is not None
assert frame.extrinsics.location == pytest.approx(
frame_ref.extrinsics.location
)
assert frame.extrinsics.rotation == pytest.approx(
frame_ref.extrinsics.rotation
)
else:
assert frame.extrinsics == frame_ref.extrinsics
if frame.labels is not None:
if frame_ref.labels is None:
frame_ref.labels = []
assert len(frame.labels) == 0
for label, label_ref in zip(frame.labels, frame_ref.labels):
assert label.id == label_ref.id
assert label.category == label_ref.category
if label.box2d is not None:
assert label_ref.box2d is not None
assert label.box2d.x1 == pytest.approx(label_ref.box2d.x1)
assert label.box2d.y1 == pytest.approx(label_ref.box2d.y1)
assert label.box2d.x2 == pytest.approx(label_ref.box2d.x2)
assert label.box2d.y2 == pytest.approx(label_ref.box2d.y2)
else:
assert label.box2d == label_ref.box2d
if label.box3d is not None:
assert label_ref.box3d is not None
assert label.box3d.location == pytest.approx(
label_ref.box3d.location
)
assert label.box3d.dimension == pytest.approx(
label_ref.box3d.dimension
)
assert label.box3d.orientation == pytest.approx(
label_ref.box3d.orientation
)
else:
assert label.box3d == label_ref.box3d
else:
assert frame.labels == frame_ref.labels
def compare_groups_results(
result: Optional[List[FrameGroup]],
result_compare: Optional[List[FrameGroup]],
) -> None:
"""Compare two list of group of frames."""
assert result is not None and result_compare is not None
for group, group_ref in zip(result, result_compare):
assert group.name == group_ref.name
assert group.videoName == group_ref.videoName
assert group.url == group_ref.url
if group.extrinsics is not None:
assert group_ref.extrinsics is not None
assert group.extrinsics.location == pytest.approx(
group_ref.extrinsics.location
)
assert group.extrinsics.rotation == pytest.approx(
group_ref.extrinsics.rotation
)
else:
assert group.extrinsics == group_ref.extrinsics
if group.frames is not None:
assert group_ref.frames is not None
for frame_names, frame_names_ref in zip(
group.name, group_ref.name
):
assert frame_names == frame_names_ref
else:
assert group.frames == group_ref.frames
if group.labels is not None:
if group_ref.labels is None:
group_ref.labels = []
assert len(group.labels) == 0
for label, label_ref in zip(group.labels, group_ref.labels):
assert label.id == label_ref.id
assert label.category == label_ref.category
if label.box2d is not None:
assert label_ref.box2d is not None
assert label.box2d.x1 == pytest.approx(label_ref.box2d.x1)
assert label.box2d.y1 == pytest.approx(label_ref.box2d.y1)
assert label.box2d.x2 == pytest.approx(label_ref.box2d.x2)
assert label.box2d.y2 == pytest.approx(label_ref.box2d.y2)
else:
assert label.box2d == label_ref.box2d
if label.box3d is not None:
assert label_ref.box3d is not None
assert label.box3d.location == pytest.approx(
label_ref.box3d.location
)
assert label.box3d.dimension == pytest.approx(
label_ref.box3d.dimension
)
assert label.box3d.orientation == pytest.approx(
label_ref.box3d.orientation
)
else:
assert label.box3d == label_ref.box3d
| 35.254296 | 78 | 0.599376 |
b694476f105f4ee3ea43fb77b318c1b89bb31179 | 3,496 | py | Python | src/triangulator.py | agusalex/Wireless-Sensor-Tracking | 93a39f629ab485457db7b5b447828425c846a12e | [
"MIT"
] | 3 | 2019-05-23T06:46:58.000Z | 2019-05-30T12:48:01.000Z | src/triangulator.py | agusalex/triangulation | 93a39f629ab485457db7b5b447828425c846a12e | [
"MIT"
] | 2 | 2021-05-24T19:08:33.000Z | 2021-05-24T19:24:31.000Z | src/triangulator.py | agusalex/triangulation | 93a39f629ab485457db7b5b447828425c846a12e | [
"MIT"
] | null | null | null | from src.models.packet import Packet
from src.db_utils import db_interact
from src.graph import draw
from src.models.circle import Circle
from src.models.point import Point
from src.models.sniffer import Sniffer
from src.models.packet import Packet
from src.models.triangulation import Triangulation
from itertools import islice, chain
ms_threshold = 10 # max permitted interval between packets to be triangulated
packets_list = [] # list with all the packets from all sniffers
def save_triangulation(triangulation):
db_interact.open_session()
db_interact.persist(triangulation)
db_interact.close_session()
def group_packets():
if len(packets_list) < 3:
return
device_packets = filter(lambda packet: packet.mac_address == packets_list[0].mac_address,
packets_list) # get all the packets with the same mac_address as the first packet in the list
device_packets = sorted(device_packets, key=lambda packet: packet.timestamp) # order by timestamp
device_packets = list(dict.fromkeys(device_packets)) # remove duplicates
for used_packet in device_packets: # remove from the main list the packets that are going to be analyzed
packets_list.remove(used_packet)
while len(device_packets) >= 3:
first_three = take(3, device_packets)
if ms_diff(first_three[0], first_three[1]) > ms_threshold or ms_diff(first_three[1],
first_three[2]) > ms_threshold:
device_packets.pop(0) # if the permitted ms interval is exceeded
else:
triangulate(first_three[0], first_three[1], first_three[2])
for triangulated_packet in first_three:
device_packets.remove(triangulated_packet)
# https://docs.python.org/3/library/itertools.html#itertools-recipes
group_packets()
def triangulate(packet1, packet2, packet3):
average_timestamp = timestamp_avg(packet1, packet2, packet3)
sniffer1 = packet1.get_associated_sniffer
sniffer2 = packet2.get_associated_sniffer
sniffer3 = packet3.get_associated_sniffer
#still missing the actual triangulation part
#https://github.com/gheja/trilateration.js/blob/master/trilateration.js
#https://github.com/savaki/trilateration/blob/master/trilateration.go
#https://stackoverflow.com/questions/16485370/wifi-position-triangulation
save_triangulation(Triangulation(Point(x,y), average_timestamp, packet1.mac_address))
def ms_diff(packet1, packet2):
diff = abs(packet1.timestamp - packet2.timestamp)
return diff.total_seconds() * 1000
def timestamp_avg(packet1, packet2, packet3):
# why not (timestamp1 + timestamp3 + timestamp3 / 3)? --> https://stackoverflow.com/questions/41723105/average-of-two-timestamps-in-python
timestamp1 = packet1.timestamp
timestamp2 = packet2.timestamp
timestamp3 = packet3.timestamp
return timestamp1 + abs(timestamp3 - timestamp2 - timestamp1) / 3
def get_packets():
sniffers_list = None # look for the sniffers in the DB
for sniffer in sniffers_list:
packets_list.append(sniffer.get_associated_packets)
def take(n, iterable):
return list(islice(iterable, n))
# The received JSON file from a Sniffer will contain a Sniffer ID so we can identify it
# useful link for calculating distance in meters from a RSSI value https://appelsiini.net/2017/trilateration-with-n-points/
if __name__ == '__main__':
pass
| 38.844444 | 142 | 0.728261 |
7d31fd739532fb05b395eeb50f0a167044fe1e68 | 493 | py | Python | mbox/scripts/python/echo-server.py | mrjamiebowman/mbox | a0628d05c5e84c6b29dda7db03336dfb9b2bd197 | [
"Apache-2.0"
] | 3 | 2020-12-20T06:16:43.000Z | 2022-03-14T05:14:01.000Z | mbox/scripts/python/echo-server.py | mrjamiebowman/mbox | a0628d05c5e84c6b29dda7db03336dfb9b2bd197 | [
"Apache-2.0"
] | null | null | null | mbox/scripts/python/echo-server.py | mrjamiebowman/mbox | a0628d05c5e84c6b29dda7db03336dfb9b2bd197 | [
"Apache-2.0"
] | 1 | 2022-03-14T05:23:14.000Z | 2022-03-14T05:23:14.000Z | #!/usr/bin/env python3
import socket
HOST = '0.0.0.0' # Standard loopback interface address (localhost)
PORT = 8080 # Port to listen on (non-privileged ports are > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
conn, addr = s.accept()
with conn:
print('Connected by', addr)
while True:
data = conn.recv(1024)
if not data:
break
conn.sendall(data) | 27.388889 | 72 | 0.588235 |
0141060f8177125a4a6738d3708a8d448159be87 | 8,975 | py | Python | tests/test_data/test_datasets/test_sot_dataset.py | LJoson/mmtracking | af471f07d2d2e5b30862c39f4d576a0a0fb81e69 | [
"Apache-2.0"
] | null | null | null | tests/test_data/test_datasets/test_sot_dataset.py | LJoson/mmtracking | af471f07d2d2e5b30862c39f4d576a0a0fb81e69 | [
"Apache-2.0"
] | null | null | null | tests/test_data/test_datasets/test_sot_dataset.py | LJoson/mmtracking | af471f07d2d2e5b30862c39f4d576a0a0fb81e69 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
from mmtrack.datasets import DATASETS as DATASETS
PREFIX = osp.join(osp.dirname(__file__), '../../data')
SOT_DATA_PREFIX = f'{PREFIX}/demo_sot_data'
DATASET_INFOS = dict(
GOT10kDataset=dict(
ann_file=osp.join(
SOT_DATA_PREFIX,
'trackingnet/annotations/trackingnet_train_infos.txt'),
img_prefix=osp.join(SOT_DATA_PREFIX, 'trackingnet')),
VOTDataset=dict(
dataset_type='vot2018',
ann_file=osp.join(
SOT_DATA_PREFIX,
'trackingnet/annotations/trackingnet_train_infos.txt'),
img_prefix=osp.join(SOT_DATA_PREFIX, 'trackingnet')),
OTB100Dataset=dict(
ann_file=osp.join(
SOT_DATA_PREFIX,
'trackingnet/annotations/trackingnet_train_infos.txt'),
img_prefix=osp.join(SOT_DATA_PREFIX, 'trackingnet')),
UAV123Dataset=dict(
ann_file=osp.join(
SOT_DATA_PREFIX,
'trackingnet/annotations/trackingnet_train_infos.txt'),
img_prefix=osp.join(SOT_DATA_PREFIX, 'trackingnet')),
LaSOTDataset=dict(
ann_file=osp.join(
SOT_DATA_PREFIX,
'trackingnet/annotations/trackingnet_train_infos.txt'),
img_prefix=osp.join(SOT_DATA_PREFIX, 'trackingnet')),
TrackingNetDataset=dict(
chunks_list=[0],
ann_file=osp.join(
SOT_DATA_PREFIX,
'trackingnet/annotations/trackingnet_train_infos.txt'),
img_prefix=osp.join(SOT_DATA_PREFIX, 'trackingnet')),
SOTCocoDataset=dict(
ann_file=osp.join(PREFIX, 'demo_cocovid_data', 'ann.json'),
img_prefix=osp.join(PREFIX, 'demo_cocovid_data')),
SOTImageNetVIDDataset=dict(
ann_file=osp.join(PREFIX, 'demo_cocovid_data', 'ann.json'),
img_prefix=osp.join(PREFIX, 'demo_cocovid_data')))
@pytest.mark.parametrize('dataset', [
'GOT10kDataset', 'VOTDataset', 'OTB100Dataset', 'UAV123Dataset',
'LaSOTDataset', 'TrackingNetDataset', 'SOTImageNetVIDDataset',
'SOTCocoDataset'
])
def test_load_data_infos(dataset):
dataset_class = DATASETS.get(dataset)
dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=False)
@pytest.mark.parametrize('dataset', [
'GOT10kDataset', 'VOTDataset', 'LaSOTDataset', 'TrackingNetDataset',
'SOTImageNetVIDDataset', 'SOTCocoDataset'
])
def test_get_bboxes_from_video(dataset):
dataset_class = DATASETS.get(dataset)
dataset_object = dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=False)
bboxes = dataset_object.get_bboxes_from_video(0)
assert bboxes.shape[0] == dataset_object.num_frames_per_video[0]
assert bboxes.shape[1] == 4
@pytest.mark.parametrize('dataset', [
'GOT10kDataset', 'VOTDataset', 'LaSOTDataset', 'TrackingNetDataset',
'SOTImageNetVIDDataset', 'SOTCocoDataset'
])
def test_get_visibility_from_video(dataset):
dataset_class = DATASETS.get(dataset)
dataset_object = dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=False)
visibility = dataset_object.get_visibility_from_video(0)
assert len(visibility['visible']) == dataset_object.num_frames_per_video[0]
@pytest.mark.parametrize('dataset', [
'GOT10kDataset', 'TrackingNetDataset', 'SOTImageNetVIDDataset',
'SOTCocoDataset', 'VOTDataset', 'LaSOTDataset'
])
def test_get_ann_infos_from_video(dataset):
dataset_class = DATASETS.get(dataset)
dataset_object = dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=False)
dataset_object.get_ann_infos_from_video(0)
@pytest.mark.parametrize('dataset', [
'GOT10kDataset', 'TrackingNetDataset', 'SOTImageNetVIDDataset',
'SOTCocoDataset', 'VOTDataset', 'LaSOTDataset'
])
def test_get_img_infos_from_video(dataset):
dataset_class = DATASETS.get(dataset)
dataset_object = dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=False)
dataset_object.get_img_infos_from_video(0)
@pytest.mark.parametrize(
'dataset',
['GOT10kDataset', 'VOTDataset', 'LaSOTDataset', 'TrackingNetDataset'])
def test_prepare_test_data(dataset):
dataset_class = DATASETS.get(dataset)
dataset_object = dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=True)
dataset_object.prepare_test_data(0, 1)
@pytest.mark.parametrize('dataset', [
'GOT10kDataset', 'TrackingNetDataset', 'SOTImageNetVIDDataset',
'SOTCocoDataset', 'LaSOTDataset'
])
def test_prepare_train_data(dataset):
dataset_class = DATASETS.get(dataset)
dataset_object = dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=False)
dataset_object.prepare_train_data(0)
@pytest.mark.parametrize('dataset', ['GOT10kDataset', 'TrackingNetDataset'])
def test_format_results(dataset):
dataset_class = DATASETS.get(dataset)
dataset_object = dataset_class(
**DATASET_INFOS[dataset], pipeline=[], split='train', test_mode=True)
results = []
for video_name in ['video-1', 'video-2']:
results.extend(
mmcv.list_from_file(
osp.join(SOT_DATA_PREFIX, 'trackingnet', 'TRAIN_0', video_name,
'track_results.txt')))
track_bboxes = []
for result in results:
x1, y1, x2, y2 = result.split(',')
track_bboxes.append(
np.array([float(x1),
float(y1),
float(x2),
float(y2), 0.]))
track_results = dict(track_bboxes=track_bboxes)
tmp_dir = tempfile.TemporaryDirectory()
dataset_object.format_results(track_results, resfile_path=tmp_dir.name)
if osp.isdir(tmp_dir.name):
tmp_dir.cleanup()
if osp.isfile(f'{tmp_dir.name}.zip'):
os.remove(f'{tmp_dir.name}.zip')
def test_sot_ope_evaluation():
dataset_class = DATASETS.get('UAV123Dataset')
dataset_object = dataset_class(
**DATASET_INFOS['UAV123Dataset'],
pipeline=[],
split='test',
test_mode=True)
dataset_object.num_frames_per_video = [25, 25]
results = []
data_infos = []
data_root = osp.join(SOT_DATA_PREFIX, 'trackingnet', 'TRAIN_0')
for video_name in ['video-1', 'video-2']:
bboxes = np.loadtxt(
osp.join(data_root, video_name, 'track_results.txt'),
delimiter=',')
scores = np.zeros((len(bboxes), 1))
bboxes = np.concatenate((bboxes, scores), axis=-1)
results.extend(bboxes)
data_infos.append(
dict(
video_path=osp.join(data_root, video_name),
ann_path=osp.join(data_root, video_name, 'gt_for_eval.txt'),
start_frame_id=1,
end_frame_id=25,
framename_template='%06d.jpg'))
dataset_object.data_infos = data_infos
track_results = dict(track_bboxes=results)
eval_results = dataset_object.evaluate(track_results, metric=['track'])
assert eval_results['success'] == 67.524
assert eval_results['norm_precision'] == 70.0
assert eval_results['precision'] == 50.0
def test_sot_vot_evaluation():
dataset_class = DATASETS.get('VOTDataset')
dataset_object = dataset_class(
**DATASET_INFOS['VOTDataset'],
pipeline=[],
split='test',
test_mode=True)
dataset_object.num_frames_per_video = [25, 25]
data_infos = []
results = []
vot_root = osp.join(SOT_DATA_PREFIX, 'trackingnet', 'TRAIN_0')
for video_name in ['video-1', 'video-2']:
results.extend(
mmcv.list_from_file(
osp.join(vot_root, video_name, 'vot2018_track_results.txt')))
data_infos.append(
dict(
video_path=osp.join(vot_root, video_name),
ann_path=osp.join(vot_root, video_name,
'vot2018_gt_for_eval.txt'),
start_frame_id=1,
end_frame_id=25,
framename_template='%08d.jpg'))
dataset_object.data_infos = data_infos
track_bboxes = []
for result in results:
result = result.split(',')
if len(result) == 1:
track_bboxes.append(np.array([float(result[0]), 0.]))
else:
track_bboxes.append(
np.array([
float(result[0]),
float(result[1]),
float(result[2]),
float(result[3]), 0.
]))
track_bboxes = dict(track_bboxes=track_bboxes)
eval_results = dataset_object.evaluate(
track_bboxes, interval=[1, 3], metric=['track'])
assert abs(eval_results['eao'] - 0.6661) < 0.0001
assert round(eval_results['accuracy'], 4) == 0.5826
assert round(eval_results['robustness'], 4) == 6.0
| 35.058594 | 79 | 0.65415 |
3a4491083c6073893000d3d86a997247a7a8ee70 | 8,088 | py | Python | src/sst/elements/memHierarchy/tests/testCachLineTrack.py | Xiaoyang-Lu/sst-elements | 7946241e9f5a57a0bfdbfbf8452deacb1c3a9051 | [
"BSD-3-Clause"
] | null | null | null | src/sst/elements/memHierarchy/tests/testCachLineTrack.py | Xiaoyang-Lu/sst-elements | 7946241e9f5a57a0bfdbfbf8452deacb1c3a9051 | [
"BSD-3-Clause"
] | null | null | null | src/sst/elements/memHierarchy/tests/testCachLineTrack.py | Xiaoyang-Lu/sst-elements | 7946241e9f5a57a0bfdbfbf8452deacb1c3a9051 | [
"BSD-3-Clause"
] | null | null | null | # Automatically generated SST Python input
import sst
# Define the simulation components
# cores with private L1/L2
# Shared distributed LLCs
# All caches have prefetchers and limit prefetching
cores = 6
caches = 3 # Number of LLCs on the network
memories = 2
coreclock = "2.4GHz"
uncoreclock = "1.4GHz"
coherence = "MESI"
network_bw = "60GB/s"
DEBUG_L1 = 0
# Create merlin network - this is just simple single router
comp_network = sst.Component("network", "merlin.hr_router")
comp_network.addParams({
"xbar_bw" : network_bw,
"link_bw" : network_bw,
"input_buf_size" : "2KiB",
"num_ports" : cores + caches + memories,
"flit_size" : "36B",
"output_buf_size" : "2KiB",
"id" : "0",
"topology" : "merlin.singlerouter"
})
for x in range(cores):
comp_cpu = sst.Component("cpu" + str(x), "memHierarchy.streamCPU")
iface = comp_cpu.setSubComponent("memory", "memHierarchy.memInterface")
comp_cpu.addParams({
"clock" : coreclock,
"commFreq" : 4, # issue request every 4th cycle
"rngseed" : 99+x,
"do_write" : 1,
"num_loadstore" : 1500,
"addressoffset" : 1024, # Stream between addresses 1024 & 16384
"memSize" : 1024*4
})
comp_l1cache = sst.Component("l1cache" + str(x), "memHierarchy.Cache")
comp_l1cache.addParams({
"cache_frequency" : coreclock,
"access_latency_cycles" : 3,
"tag_access_latency_cycles" : 1,
"mshr_latency_cycles" : 2,
"replacement_policy" : "lfu",
"coherence_protocol" : coherence,
"cache_size" : "2KiB", # super tiny for lots of traffic
"associativity" : 2,
"L1" : 1,
"debug" : DEBUG_L1,
"debug_level" : 10,
# prefetcher
"prefetcher" : "cassini.cacheLineTrack",
#"prefetcher" : "cassini.NextBlockPrefetcher",
#"max_outstanding_prefetch" : 2, # No more than 2 outstanding prefetches at a time; only set since L1 mshr is unlimited in size (otherwise defaults to 1/2 mshr size)
})
comp_l2cache = sst.Component("l2cache" + str(x), "memHierarchy.Cache")
comp_l2cache.addParams({
"cache_frequency" : coreclock,
"access_latency_cycles" : 9,
"tag_access_latency_cycles" : 2,
"mshr_latency_cycles" : 4,
"replacement_policy" : "nmru",
"coherence_protocol" : coherence,
"cache_size" : "4KiB",
"associativity" : 4,
"max_requests_per_cycle" : 1,
"mshr_num_entries" : 8,
# Prefetch parameters
#"prefetcher" : "cassini.NextBlockPrefetcher",
#"drop_prefetch_mshr_level" : 5, # Drop prefetch when total misses > 5
# MemNIC parameters
"memNIC.network_bw" : network_bw,
"memNIC.network_input_buffer_size" : "2KiB",
"memNIC.network_output_buffer_size" : "2KiB",
})
cpu_l1_link = sst.Link("link_cpu_cache_" + str(x))
cpu_l1_link.connect ( (iface, "port", "500ps"), (comp_l1cache, "high_network_0", "500ps") )
l1_l2_link = sst.Link("link_l1_l2_" + str(x))
l1_l2_link.connect( (comp_l1cache, "low_network_0", "100ps"), (comp_l2cache, "high_network_0", "100ps") )
l2_network_link = sst.Link("link_l2_network_" + str(x))
l2_network_link.connect( (comp_l2cache, "cache", "100ps"), (comp_network, "port" + str(x), "100ps") )
for x in range(caches):
comp_l3cache = sst.Component("l3cache" + str(x), "memHierarchy.Cache")
comp_l3cache.addParams({
"cache_frequency" : uncoreclock,
"access_latency_cycles" : 14,
"tag_access_latency_cycles" : 6,
"mshr_latency_cycles" : 12,
"replacement_policy" : "random",
"coherence_protocol" : coherence,
"cache_size" : "1MiB",
"associativity" : 32,
"mshr_num_entries" : 8,
# Distributed cache parameters
"num_cache_slices" : caches,
"slice_allocation_policy" : "rr", # Round-robin
"slice_id" : x,
# MemNIC parameters
"memNIC.network_bw" : network_bw,
"memNIC.network_input_buffer_size" : "2KiB",
"memNIC.network_output_buffer_size" : "2KiB",
})
portid = x + cores
l3_network_link = sst.Link("link_l3_network_" + str(x))
l3_network_link.connect( (comp_l3cache, "directory", "100ps"), (comp_network, "port" + str(portid), "100ps") )
for x in range(memories):
comp_directory = sst.Component("directory" + str(x), "memHierarchy.DirectoryController")
comp_directory.addParams({
"clock" : uncoreclock,
"coherence_protocol" : coherence,
"entry_cache_size" : 32768,
"mshr_num_entries" : 16,
# MemNIC parameters
"memNIC.interleave_size" : "64B", # Interleave at line granularity between memories
"memNIC.interleave_step" : str(memories * 64) + "B",
"memNIC.network_bw" : network_bw,
"memNIC.addr_range_start" : x*64,
"memNIC.addr_range_end" : 1024*1024*1024 - ((memories - x) * 64) + 63,
"memNIC.network_input_buffer_size" : "2KiB",
"memNIC.network_output_buffer_size" : "2KiB",
})
comp_memory = sst.Component("memory" + str(x), "memHierarchy.MemController")
comp_memory.addParams({
"clock" : "500MHz",
"max_requests_per_cycle" : 2,
"backing" : "none",
# Backend parameters
"backend" : "memHierarchy.simpleDRAM",
"backend.mem_size" : "512MiB",
"backend.tCAS" : 2,
"backend.tRCD" : 2,
"backend.tRP" : 3,
"backend.cycle_time" : "3ns",
"backend.row_size" : "4KiB",
"backend.row_policy" : "closed",
})
portid = x + caches + cores
link_directory_network = sst.Link("link_directory_network_" + str(x))
link_directory_network.connect( (comp_directory, "network", "100ps"), (comp_network, "port" + str(portid), "100ps") )
link_directory_memory_network = sst.Link("link_directory_memory_" + str(x))
link_directory_memory_network.connect( (comp_directory, "memory", "400ps"), (comp_memory, "direct_link", "400ps") )
# Enable statistics
sst.setStatisticLoadLevel(7)
sst.setStatisticOutput("sst.statOutputConsole")
sst.enableStatisticForComponentType("memHierarchy.Cache",
"hist_reads_log2",
{"type":"sst.HistogramStatistic",
"minvalue" : "0",
"numbins" : "30",
"binwidth" : "1",
"includeoutofbounds" : "1"
})
sst.enableStatisticForComponentType("memHierarchy.Cache",
"hist_writes_log2",
{"type":"sst.HistogramStatistic",
"minvalue" : "0",
"numbins" : "30",
"binwidth" : "1",
"includeoutofbounds" : "1"
})
sst.enableStatisticForComponentType("memHierarchy.Cache",
"hist_age_log2",
{"type":"sst.HistogramStatistic",
"minvalue" : "0",
"numbins" : "32",
"binwidth" : "1",
"includeoutofbounds" : "1"
})
sst.enableStatisticForComponentType("memHierarchy.Cache",
"hist_word_accesses",
{"type":"sst.HistogramStatistic",
"minvalue" : "0",
"numbins" : "9",
"binwidth" : "1",
"includeoutofbounds" : "1"
})
| 40.238806 | 173 | 0.55094 |
ed34d17ee74978b34c20c3bb478f1c4d60658ca6 | 1,333 | py | Python | mindhome_alpha/erpnext/accounts/doctype/bank_account/test_bank_account.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:55:29.000Z | 2021-04-29T14:55:29.000Z | mindhome_alpha/erpnext/accounts/doctype/bank_account/test_bank_account.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | null | null | null | mindhome_alpha/erpnext/accounts/doctype/bank_account/test_bank_account.py | Mindhome/field_service | 3aea428815147903eb9af1d0c1b4b9fc7faed057 | [
"MIT"
] | 1 | 2021-04-29T14:39:01.000Z | 2021-04-29T14:39:01.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe import ValidationError
import unittest
# test_records = frappe.get_test_records('Bank Account')
class TestBankAccount(unittest.TestCase):
def test_validate_iban(self):
valid_ibans = [
'GB82 WEST 1234 5698 7654 32',
'DE91 1000 0000 0123 4567 89',
'FR76 3000 6000 0112 3456 7890 189'
]
invalid_ibans = [
# wrong checksum (3rd place)
'GB72 WEST 1234 5698 7654 32',
'DE81 1000 0000 0123 4567 89',
'FR66 3000 6000 0112 3456 7890 189'
]
bank_account = frappe.get_doc({'doctype':'Bank Account'})
try:
bank_account.validate_iban()
except AttributeError:
msg = 'BankAccount.validate_iban() failed for empty IBAN'
self.fail(msg=msg)
for iban in valid_ibans:
bank_account.iban = iban
try:
bank_account.validate_iban()
except ValidationError:
msg = 'BankAccount.validate_iban() failed for valid IBAN {}'.format(iban)
self.fail(msg=msg)
for not_iban in invalid_ibans:
bank_account.iban = not_iban
msg = 'BankAccount.validate_iban() accepted invalid IBAN {}'.format(not_iban)
with self.assertRaises(ValidationError, msg=msg):
bank_account.validate_iban()
| 26.66 | 80 | 0.722431 |
2318f4e9045e0f232a529c136942b0067e8e4844 | 32,610 | py | Python | pymks/tools.py | wd15/pymks-clean | 97a4145c56626f6a1dea5c3a67dbaf83d3372446 | [
"MIT"
] | null | null | null | pymks/tools.py | wd15/pymks-clean | 97a4145c56626f6a1dea5c3a67dbaf83d3372446 | [
"MIT"
] | null | null | null | pymks/tools.py | wd15/pymks-clean | 97a4145c56626f6a1dea5c3a67dbaf83d3372446 | [
"MIT"
] | null | null | null | import matplotlib.colors as colors
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.axes_grid1 import make_axes_locatable
from sklearn.learning_curve import learning_curve
from .stats import _auto_correlations
from .stats import _cross_correlations
import numpy as np
import warnings
warnings.filterwarnings("ignore")
plt.style.library['ggplot']['text.color'] = '#555555'
plt.style.use('ggplot')
def _set_colors():
"""
Helper function used to set the color map.
"""
HighRGB = np.array([26, 152, 80]) / 255.
MediumRGB = np.array([255, 255, 191]) / 255.
LowRGB = np.array([0, 0, 0]) / 255.
cdict = _set_cdict(HighRGB, MediumRGB, LowRGB)
plt.register_cmap(name='PyMKS', data=cdict)
plt.set_cmap('PyMKS')
def _get_response_cmap():
"""
Helper function used to set the response color map.
Returns:
dictionary with colors and localizations on color bar.
"""
HighRGB = np.array([179, 255, 204]) / 255.
MediumRGB = np.array([28, 137, 63]) / 255.
LowRGB = np.array([11, 53, 24]) / 255.
cdict = _set_cdict(HighRGB, MediumRGB, LowRGB)
return colors.LinearSegmentedColormap('coeff_cmap', cdict, 256)
def _get_microstructure_cmap():
"""
Helper function used to set the microstructure color map.
Returns:
dictionary with colors and microstructure on color bar.
"""
HighRGB = np.array([229, 229, 229]) / 255.
MediumRGB = np.array([114.5, 114.5, 114.5]) / 255.
LowRGB = np.array([0, 0, 0]) / 255.
cdict = _set_cdict(HighRGB, MediumRGB, LowRGB)
return colors.LinearSegmentedColormap('micro_cmap', cdict, 256)
def _get_diff_cmap():
"""
Helper function used to set the difference color map.
Returns:
dictionary with colors and localizations on color bar.
"""
HighRGB = np.array([255, 207, 181]) / 255.
MediumRGB = np.array([238, 86, 52]) / 255.
LowRGB = np.array([99, 35, 21]) / 255.
cdict = _set_cdict(HighRGB, MediumRGB, LowRGB)
return colors.LinearSegmentedColormap('diff_cmap', cdict, 256)
def _grid_matrix_cmap():
"""
Helper function used to set the grid matrix color map.
Returns:
dictionary with colors and localizations on color bar.
"""
HighRGB = np.array([229, 229, 229]) / 255.
MediumRGB = np.array([114.5, 114.5, 114.5]) / 255.
LowRGB = np.array([0, 0, 0]) / 255.
cdict = _set_cdict(HighRGB, MediumRGB, LowRGB)
return colors.LinearSegmentedColormap('grid_cmap', cdict, 256)
def _set_cdict(HighRGB, MediumRGB, LowRGB):
"""
Helper function used to set color map from 3 RGB values.
Args:
HighRGB: RGB with highest values
MediumRGB: RGB with medium values
LowRGB: RGB with lowest values
Returns:
dictionary with colors and localizations on color bar.
"""
cdict = {'red': ((0.0, LowRGB[0], LowRGB[0]),
(0.5, MediumRGB[0], MediumRGB[0]),
(1.0, HighRGB[0], HighRGB[0])),
'green': ((0.0, LowRGB[1], LowRGB[1]),
(0.5, MediumRGB[1], MediumRGB[1]),
(1.0, HighRGB[1], HighRGB[1])),
'blue': ((0.0, LowRGB[2], LowRGB[2]),
(0.5, MediumRGB[2], MediumRGB[2]),
(1.0, HighRGB[2], HighRGB[2]))}
return cdict
def _get_coeff_cmap():
"""
Helper function used to set the influence coefficients color map.
Returns
"""
HighRGB = np.array([205, 0, 29]) / 255.
MediumRGB = np.array([240, 240, 240]) / 255.
LowRGB = np.array([17, 55, 126]) / 255.
cdict = _set_cdict(HighRGB, MediumRGB, LowRGB)
return colors.LinearSegmentedColormap('coeff_cmap', cdict, 256)
def _get_color_list(n_sets):
"""
color list for dimensionality reduction plots
Args:
n_sets: number of dataset
Returns:
list of colors for n_sets
"""
color_list = ['#1a9850', '#f46d43', '#1f78b4', '#e31a1c',
'#6a3d9a', '#b2df8a', '#fdbf6f', '#a6cee3',
'#fb9a99', '#cab2d6', '#ffff99', '#b15928']
return color_list[:n_sets]
def draw_coeff(coeff, fontsize=15, figsize=None):
"""
Visualize influence coefficients.
Args:
coeff (ND array): influence coefficients with dimensions (x, y,
n_states)
fontsize (int, optional): values used for the title font size
"""
plt.close('all')
coeff_cmap = _get_coeff_cmap()
n_coeff = coeff.shape[-1]
titles = [r'Influence Coefficients $l = %s$' % ii for ii
in np.arange(n_coeff)]
_draw_fields(np.rollaxis(coeff, -1, 0), coeff_cmap,
fontsize=fontsize, titles=titles, figsize=figsize)
def draw_microstructure_strain(microstructure, strain):
"""
Draw microstructure and its associated strain
Args:
microstructure (2D array): numpy array with dimensions (x, y)
strain (2D array): numpy array with dimensions (x, y)
"""
plt.close('all')
cmap = _get_response_cmap()
fig = plt.figure(figsize=(8, 4))
ax0 = plt.subplot(1, 2, 1)
ax0.imshow(microstructure, cmap=_get_microstructure_cmap(),
interpolation='none')
ax0.set_xticks(())
ax0.set_yticks(())
ax1 = plt.subplot(1, 2, 2)
im1 = ax1.imshow(strain, cmap=cmap, interpolation='none')
ax1.set_xticks(())
ax1.set_yticks(())
ax1.set_title(r'$\mathbf{\varepsilon_{xx}}$', fontsize=25)
ax0.set_title('Microstructure', fontsize=20)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([1.0, 0.05, 0.05, 0.9])
fig.colorbar(im1, cax=cbar_ax)
plt.tight_layout()
plt.show()
def draw_microstructures(*microstructures):
"""
Draw microstructures
Args:
microstructures (3D array): numpy array with dimensions
(n_samples, x, y)
"""
cmap = _get_microstructure_cmap()
titles = [' ' for s in np.arange(microstructures[0].shape[0])]
_draw_fields(microstructures[0], cmap, 10, titles)
def draw_strains(strains, labels=None, fontsize=15):
"""
Draw strain fields
Args:
strains (3D array): numpy arrays with dimensions (n_samples, x, y)
labels (list, str, optional): titles for strain fields
fontsize (int, optional): title font size
"""
cmap = _get_response_cmap()
if labels is None:
labels = [' ' for s in strains]
_draw_fields(strains, cmap, fontsize, labels)
def draw_concentrations(concentrations, labels=None, fontsize=15):
"""Draw comparison fields
Args:
concentrations (list): numpy arrays with dimensions (x, y)
labels (list): titles for concentrations
fontsize (int): used for the title font size
"""
if labels is None:
labels = [" " for s in concentrations]
cmap = _get_response_cmap()
_draw_fields(concentrations, cmap, fontsize, labels)
def draw_strains_compare(strain_FEM, strain_MKS, fontsize=20):
"""Draw comparison of strain fields.
Args:
strain_FEM (2D array): strain field with dimensions (x, y) from finite
element
strain_MKS (2D array): strain fieldwith dimensions (x, y) from MKS
fontsize (int, optional): scalar values used for the title font size
"""
cmap = _get_response_cmap()
titles = ['Finite Element', 'MKS']
titles_ = [r'$\mathbf{\varepsilon_{xx}}$ - %s' % title for title in titles]
_draw_fields((strain_FEM, strain_MKS), cmap, fontsize, titles_)
def draw_concentrations_compare(concentrations, labels, fontsize=15):
"""Draw comparesion of concentrations.
Args:
concentrations (3D array): list of difference arrays with dimensions
(x, y)
labels (list, str): list of titles for difference arrays
fontsize (int, optional): scalar values used for the title font size
"""
cmap = _get_response_cmap()
_draw_fields(concentrations, cmap, fontsize, labels)
def draw_differences(differences, labels=None, fontsize=15):
"""Draw differences in predicted response fields.
Args:
differences (list, 2D arrays): list of difference arrays with
dimesions (x, y).
labels (list, str, optional): titles for difference arrays
fontsize (int, optional): scalar values used for the title font size
"""
cmap = _get_diff_cmap()
if labels is None:
labels = [' ' for s in differences]
_draw_fields(differences, cmap, fontsize, labels)
def _draw_fields(fields, field_cmap, fontsize, titles, figsize=None):
"""
Helper function used to draw fields.
Args:
fields - iterable object with 2D numpy arrays
field_cmap - color map for plot
fontsize - font size for titles and color bar text
titles - titles for plot
"""
plt.close('all')
vmin = np.min(fields)
vmax = np.max(fields)
n_fields = len(fields)
if titles is not None:
n_titles = len(titles)
if n_fields != n_titles:
raise RuntimeError(
"number of plots does not match number of labels.")
plt.close('all')
if figsize is None:
figsize = (1, n_fields)
fig, axs = plt.subplots(figsize[0], figsize[1],
figsize=(figsize[1] * 4, figsize[0] * 4))
if n_fields > 1:
for field, ax, title in zip(fields, axs.flat, titles):
im = ax.imshow(field,
cmap=field_cmap, interpolation='none',
vmin=vmin, vmax=vmax)
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(title, fontsize=fontsize)
else:
im = axs.imshow(fields[0], cmap=field_cmap,
interpolation='none', vmin=vmin, vmax=vmax)
axs.set_xticks(())
axs.set_yticks(())
axs.set_title(titles[0], fontsize=fontsize)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([1.0, 0.05, 0.05, 0.9])
cbar_font = np.floor(0.8 * fontsize)
cbar_ax.tick_params(labelsize=cbar_font)
cbar_ax.yaxis.set_offset_position('right')
fig.colorbar(im, cax=cbar_ax)
plt.tight_layout()
plt.rc('font', **{'size': str(cbar_font)})
plt.show()
def draw_gridscores(grid_scores, param, score_label=None, colors=None,
data_labels=None, param_label=None, fontsize=20):
"""
Visualize the score values and standard deviations from grids
scores result from GridSearchCV while varying 1 parameters.
Args:
grid_scores (list, grid_scores): `grid_scores_` attribute from
GridSearchCV
param (list, str): parameters used in grid_scores
score_label (str): label for score value axis
colors (list): colors used for this specified parameter
param_label (list): parameter titles to appear on plot
"""
plt.close('all')
if type(grid_scores[0]) is not list:
grid_scores = [grid_scores]
if data_labels is None:
data_labels = [None for l in range(len(grid_scores))]
if score_label is None:
score_label = ''
if param_label is None:
param_label is ''
if colors is None:
colors = _get_color_list(len(grid_scores))
if len(grid_scores) != len(data_labels) or len(data_labels) != len(colors):
raise RuntimeError(
"grid_scores, colors, and param_lables must have the same length.")
mins, maxes = [], []
for grid_score, data_label, color in zip(grid_scores, data_labels, colors):
tmp = [[params[param], mean_score, scores.std()]
for params, mean_score, scores in grid_score]
_param, errors, stddev = list(zip(*tmp))
_mins = np.array(errors) - np.array(stddev)
_maxes = np.array(errors) + np.array(stddev)
plt.fill_between(_param, _mins, _maxes, alpha=0.1,
color=color)
plt.plot(_param, errors, 'o-', color=color, label=data_label,
linewidth=2)
mins.append(min(_mins))
maxes.append(max(_maxes))
if data_labels[0] is not None:
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.,
fontsize=15)
_min, _max = min(mins), max(maxes)
y_epsilon = (_max - _min) * 0.05
plt.ylim((_min - y_epsilon, _max + y_epsilon))
plt.ticklabel_format(style='sci', axis='y')
plt.ylabel(score_label, fontsize=fontsize)
plt.xlabel(param_label, fontsize=fontsize)
plt.show()
def draw_gridscores_matrix(grid_scores, params, score_label=None,
param_labels=None):
"""
Visualize the score value matrix and standard deviation matrix from grids
scores result from GridSearchCV while varying two parameters.
Args:
grid_scores (list): `grid_scores_` attribute from GridSearchCV
params (list): two parameters used in grid_scores
score_label (str): label for score value axis
param_labels (list): parameter titles to appear on plot
"""
plt.close('all')
if score_label is None:
score_label = 'R-Squared'
if param_labels is None:
param_labels = ['', '']
tmp = [[params, mean_score, scores.std()]
for parameters, mean_score, scores in grid_scores.grid_scores_]
param, means, stddev = list(zip(*tmp))
param_range_0 = grid_scores.param_grid[params[0]]
param_range_1 = grid_scores.param_grid[params[1]]
mat_size = (len(param_range_1), len(param_range_0))
fig, axs = plt.subplots(1, 2, figsize=(10, 5))
matrices = np.concatenate((np.array(means).reshape(mat_size)[None],
np.array(stddev).reshape(mat_size)[None]))
X_cmap = _grid_matrix_cmap()
x_label = param_labels[0]
y_label = param_labels[1]
plot_title = [score_label, 'Standard Deviation']
for ax, label, matrix, title in zip(axs, param_labels,
np.swapaxes(matrices, -1, -2),
plot_title):
ax.set_xticklabels(param_range_0, fontsize=12)
ax.set_yticklabels(param_range_1, fontsize=12)
ax.set_xticks(np.arange(len(param_range_0)))
ax.set_yticks(np.arange(len(param_range_0)))
ax.set_xlabel(x_label, fontsize=14)
ax.set_ylabel(y_label, fontsize=14)
ax.grid(False)
im = ax.imshow(matrix,
cmap=X_cmap, interpolation='none')
ax.set_title(title, fontsize=22)
divider = make_axes_locatable(ax)
cbar_ax = divider.append_axes("right", size="10%", pad=0.05)
cbar = plt.colorbar(im, cax=cbar_ax)
cbar.ax.tick_params(labelsize=12)
fig.subplots_adjust(right=1.2)
plt.show()
def draw_component_variance(variance):
"""
Visualize the percent variance as a function of components.
Args:
variance (list): variance ratio explanation from dimensional
reduction technique.
"""
plt.close('all')
n_components = len(variance)
x = np.arange(1, n_components + 1)
plt.plot(x, np.cumsum(variance * 100), 'o-', color='#1a9641', linewidth=2)
plt.xlabel('Number of Components', fontsize=15)
plt.xlim(0, n_components + 1)
plt.ylabel('Percent Variance', fontsize=15)
plt.show()
def draw_components_scatter(datasets, labels, title=None,
component_labels=None, view_angles=None,
legend_outside=False, fig_size=None):
"""
Visualize low dimensional representations of microstructures.
Args:
datasets (list, 2D arrays): low dimensional data with dimensions
[n_samples, n_components]. The length of n_components must be 2 or
3.
labels (list, str): list of lables for each of each array datasets
title: main title for plot
component_labels: labels for components
view_angles (int,int): the elevation and azimuth angles of the axes
to rotate the axes.
legend_outside: specify to move legend box outside the main plot
domain
figsize: (width, height) figure size in inches
"""
plt.close('all')
if title is None:
title = 'Low Dimensional Representation'
n_components = np.array(datasets[0][-1].shape)
if component_labels is None:
component_labels = range(1, n_components + 1)
if len(datasets) != len(labels):
raise RuntimeError('datasets and labels must have the same length')
if n_components != len(component_labels):
raise RuntimeError('number of components and component_labels must'
' have the same length')
if n_components[-1] == 2:
_draw_components_2D(datasets, labels, title, component_labels[:2],
legend_outside, fig_size)
elif n_components[-1] == 3:
_draw_components_3D(datasets, labels, title, component_labels,
view_angles, legend_outside, fig_size)
else:
raise RuntimeError("n_components must be 2 or 3.")
def draw_evolution(datasets, labels, title=None, component_labels=None,
view_angles=None, legend_outside=False, fig_size=None):
"""
Visualize low dimensional representations of microstructures.
Args:
datasets (list, 2D arrays): low dimensional data with dimensions
[n_samples, n_components]. The length of n_components must be 2 or
3.
labels (list, str): list of lables for each of each array datasets
title: main title for plot
component_labels: labels for components
view_angles (int,int): the elevation and azimuth angles of the axes
to rotate the axes.
legend_outside: specify to move legend box outside the main plot
domain
figsize: (width, height) figure size in inches
"""
plt.close('all')
if title is None:
title = 'Low Dimensional Representation'
n_components = np.array(datasets[0][-1].shape)
if component_labels is None:
component_labels = range(1, n_components + 1)
if len(datasets) != len(labels):
raise RuntimeError('datasets and labels must have the same length')
if n_components != len(component_labels):
raise RuntimeError('number of components and component_labels must'
' have the same length')
if n_components[-1] == 2:
_draw_components_evolution(datasets, labels,
title, component_labels[:2],
legend_outside, fig_size)
else:
raise RuntimeError("time and one component must be paired")
def _draw_components_2D(X, labels, title, component_labels,
legend_outside, fig_size):
"""
Helper function to plot 2 components.
Args:
X: Arrays with low dimensional data
labels: labels for each of the low dimensional arrays
"""
n_sets = len(X)
color_list = _get_color_list(n_sets)
if fig_size is not None:
fig = plt.figure(figsize=(fig_size[0], fig_size[1]))
else:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('Component ' + str(component_labels[0]), fontsize=15)
ax.set_ylabel('Component ' + str(component_labels[1]), fontsize=15)
X_array = np.concatenate(X)
x_min, x_max = [np.min(X_array[:, 0]), np.max(X_array[:, 0])]
y_min, y_max = [np.min(X_array[:, 1]), np.max(X_array[:, 1])]
x_epsilon = (x_max - x_min) * 0.05
y_epsilon = (y_max - y_min) * 0.05
ax.set_xlim([x_min - x_epsilon, x_max + x_epsilon])
ax.set_ylim([y_min - y_epsilon, y_max + y_epsilon])
for label, pts, color in zip(labels, X, color_list):
ax.plot(pts[:, 0], pts[:, 1], 'o', color=color, label=label)
lg = plt.legend(loc=1, borderaxespad=0., fontsize=15)
if legend_outside is not None:
lg = plt.legend(bbox_to_anchor=(1.05, 1.0), loc=2,
borderaxespad=0., fontsize=15)
lg.draggable()
plt.title(title, fontsize=20)
plt.show()
def _draw_components_evolution(X, labels, title, component_labels,
legend_outside, fig_size):
"""
Helper function to plot 2 components.
Args:
X: Arrays with low dimensional data
labels: labels for each of the low dimensional arrays
"""
n_sets = len(X)
color_list = _get_color_list(n_sets)
if fig_size is not None:
fig = plt.figure(figsize=(fig_size[0], fig_size[1]))
else:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlabel('Time', fontsize=15)
ax.set_ylabel('Components ', fontsize=15)
X_array = np.concatenate(X)
x_min, x_max = [np.min(X_array[:, 0]), np.max(X_array[:, 0])]
y_min, y_max = [np.min(X_array[:, 1]), np.max(X_array[:, 1])]
x_epsilon = (x_max - x_min) * 0.05
y_epsilon = (y_max - y_min) * 0.05
ax.set_xlim([x_min - x_epsilon, x_max + x_epsilon])
ax.set_ylim([y_min - y_epsilon, y_max + y_epsilon])
for label, pts, color in zip(labels, X, color_list):
ax.plot(pts[:, 0], pts[:, 1], 'o', color=color, label=label)
lg = plt.legend(loc=1, borderaxespad=0., fontsize=15)
if legend_outside is not None:
lg = plt.legend(bbox_to_anchor=(1.05, 1.0), loc=2,
borderaxespad=0., fontsize=15)
lg.draggable()
plt.title(title, fontsize=20)
plt.show()
def _draw_components_3D(X, labels, title, component_labels, view_angles,
legend_outside, fig_size):
"""
Helper function to plot 2 components.
Args:
X: Arrays with low dimensional data
labels: labels for each of the low dimensional arrays
"""
n_sets = len(X)
color_list = _get_color_list(n_sets)
if fig_size is not None:
fig = plt.figure(figsize=(fig_size[0], fig_size[1]))
else:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('Component ' + str(component_labels[0]), fontsize=12)
ax.set_ylabel('Component ' + str(component_labels[1]), fontsize=12)
ax.set_zlabel('Component ' + str(component_labels[2]), fontsize=12)
X_array = np.concatenate(X)
x_min, x_max = [np.min(X_array[:, 0]), np.max(X_array[:, 0])]
y_min, y_max = [np.min(X_array[:, 1]), np.max(X_array[:, 1])]
z_min, z_max = [np.min(X_array[:, 2]), np.max(X_array[:, 2])]
x_epsilon = (x_max - x_min) * 0.05
y_epsilon = (y_max - y_min) * 0.05
z_epsilon = (z_max - z_min) * 0.05
ax.set_xlim([x_min - x_epsilon, x_max + x_epsilon])
ax.set_ylim([y_min - y_epsilon, y_max + y_epsilon])
ax.set_zlim([z_min - z_epsilon, z_max + z_epsilon])
for label, pts, color in zip(labels, X, color_list):
ax.plot(pts[:, 0], pts[:, 1], pts[:, 2], 'o', color=color, label=label)
plt.title(title, fontsize=15)
if view_angles is not None:
ax.view_init(view_angles[0], view_angles[1])
lg = plt.legend(loc=1, borderaxespad=0., fontsize=15)
if legend_outside:
lg = plt.legend(bbox_to_anchor=(1.05, 1.0), loc=2,
borderaxespad=0., fontsize=15)
plt.show()
def draw_goodness_of_fit(fit_data, pred_data, labels):
"""Goodness of fit plot for MKSHomogenizationModel.
Args:
fit_data (2D array): Low dimensional representation of the prediction
values of the data used to fit the model and the actual values.
pred_data (2D array): Low dimensional representation of the prediction
values of the data used for prediction with the model and the
actual values.
"""
plt.close('all')
y_total = np.concatenate((fit_data, pred_data), axis=-1)
y_min, y_max = np.min(y_total), np.max(y_total)
middle = (y_max + y_min) / 2.
data_range = y_max - y_min
line = np.linspace(middle - data_range * 1.03 / 2,
middle + data_range * 1.03 / 2, endpoint=False)
plt.plot(line, line, '-', linewidth=3, color='#000000')
plt.plot(fit_data[0], fit_data[1], 'o', color='#1a9850', label=labels[0])
plt.plot(pred_data[0], pred_data[1], 'o',
color='#f46d43', label=labels[1])
plt.title('Goodness of Fit', fontsize=20)
plt.xlabel('Actual', fontsize=18)
plt.ylabel('Predicted', fontsize=18)
plt.legend(loc=2, fontsize=15)
plt.show()
def draw_components(X_comp, fontsize=15, figsize=None):
"""
Visualize spatial correlations.
Args:
X_corr (ND array): correlations
correlations (list, optional): correlation labels
"""
cmap = _get_coeff_cmap()
titles = [r'Component $%s$' % (ii + 1) for ii
in np.arange(X_comp.shape[0])]
_draw_fields(X_comp, cmap, fontsize, titles, figsize=figsize)
def draw_correlations(X_corr, correlations=None):
"""
Visualize spatial correlations.
Args:
X_corr (ND array): correlations
correlations (list, optional): correlation labels
"""
if correlations is None:
n_cross = X_corr.shape[-1]
L = range((np.sqrt(1 + 8 * n_cross) - 1).astype(int) / 2)
correlations = _auto_correlations(L) + _cross_correlations(L)
_draw_stats(X_corr, correlations=correlations)
def draw_autocorrelations(X_auto, autocorrelations=None):
"""
Visualize spatial autocorrelations.
Args:
X_auto (ND array): autocorrelations
autocorrelations (list, optional): autocorrelation labels.
"""
if autocorrelations is None:
n_states = X_auto.shape[-1]
autocorrelations = _auto_correlations(n_states)
_draw_stats(X_auto, correlations=autocorrelations)
def draw_crosscorrelations(X_cross, crosscorrelations=None):
"""
Visualize spatial crosscorrelations.
Args:
X_cross (ND array): cross-correlations
correlations (list, optional): cross-correlation labels.
"""
if crosscorrelations is None:
n_cross = X_cross.shape[-1]
n_states = (np.sqrt(1 + 8 * n_cross) + 1).astype(int) / 2
crosscorrelations = _cross_correlations(n_states)
_draw_stats(X_cross, correlations=crosscorrelations)
def _draw_stats(X_, correlations=None):
"""Visualize the spatial correlations.
Args:
X_: correlations
correlations: list of tuples to select the spatial correlations
that will be displayed.
"""
plt.close('all')
X_cmap = _get_coeff_cmap()
n_plots = len(correlations)
x_loc, x_labels = _get_ticks_params(X_.shape[0])
y_loc, y_labels = _get_ticks_params(X_.shape[1])
fig, axs = plt.subplots(1, n_plots, figsize=(n_plots * 5, 5))
if n_plots == 1:
axs = list([axs])
for ax, label, img in zip(axs, correlations, np.rollaxis(X_, -1)):
ax.grid(False)
ax.set_xticks(x_loc)
ax.set_xticklabels(x_labels, fontsize=12)
ax.set_yticks(y_loc)
ax.set_yticklabels(y_labels, fontsize=12)
im = ax.imshow(img, cmap=X_cmap, interpolation='none')
ax.set_title(r"Correlation $l = {0}$, $l' = {1}$".format(label[0],
label[1]),
fontsize=15)
fig.subplots_adjust(right=0.8)
divider = make_axes_locatable(ax)
cbar_ax = divider.append_axes("right", size="10%", pad=0.05)
cbar_ticks = _get_colorbar_ticks(img, 5)
cbar_ticks_diff = cbar_ticks[-1] - cbar_ticks[0]
cbar_top, cbar_grids = np.max(X_) * 0.005, 0.005
if cbar_ticks_diff <= 1e-15:
cbar_top = 0.
cbar_grids = 0.5
try:
cbar = plt.colorbar(im, cax=cbar_ax, ticks=cbar_ticks,
boundaries=np.arange(cbar_ticks[0],
cbar_ticks[-1] + cbar_top,
cbar_ticks_diff *
cbar_grids))
cbar.ax.tick_params(labelsize=12)
except:
cbar = plt.colorbar(im, cax=cbar_ax, boundaries=np.unique(X_))
cbar.ax.tick_params(labelsize=12)
fig.subplots_adjust(right=0.8)
plt.tight_layout()
plt.show()
def _get_ticks_params(l):
"""Get tick locations and labels for spatial correlation plots.
>>> l = 4
>>> result = ([0, 1, 2, 3, 4], [-2, -1, 0, 1, 2])
>>> assert result == _get_ticks_params(l)
Args:
l: shape of array along the axis
"""
segments = np.roll(np.arange(4, 7, dtype=int), 1, 0)
m = segments[np.argmin(l % segments)]
n = int(max((l + 1) / m, 1))
tick_loc = list(range(0, l + n, n))
tick_labels = list(range(int(round(- (l - 1) / 2)),
int(round(int((l + 1) / 2 + n))), n))
return tick_loc, tick_labels
def _get_colorbar_ticks(X_, n_ticks):
"""
Helper function to get colorbar color tick locations.
Args:
X: sspatial correlations array
(n_samples, x, y, local_state_correlation)
"""
tick_range = np.linspace(np.min(X_), np.max(X_), n_ticks)
return tick_range.astype(float)
def draw_learning_curves(estimator, X, y, ylim=None, cv=None, n_jobs=1,
scoring=None, train_sizes=np.linspace(.1, 1.0, 5)):
"""Code taken from scikit-learn examples for version 0.15.
Generate a simple plot of the test and traning learning curve.
Args:
estimator (class): object type that implements the "fit" and "predict"
methods
An object of that type which is cloned for each validation.
title (str): Used for the title for the chart.
X (2D array): array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y (1D array): array-like, shape (n_samples) or (n_samples,
n_features), optional Target relative to X for classification or
regression; None for unsupervised learning.
ylim (tuple, optional): Defines minimum and maximum yvalues plotted.
cv (int, optional): If an integer is passed, it is the number of folds
(defaults to 3). Specific cross-validation objects can be passed,
see sklearn.cross_validation module for the list of possible
objects
n_jobs(int, optional) : Number of jobs to run in parallel (default 1).
train_sizes (float): Relative or absolute numbers of training examples
that will be used to generate the learning curve. If the dtype is
float, it is regarded as a fraction of the maximum size of the
training set (that is determined by the selected validation
method), i.e. it has to be within (0, 1]. Otherwise it is
interpreted as absolute sizes of the training sets. Note that for
classification the number of samples usually have to be big enough
to contain at least one sample from each class. (default:
np.linspace(0.1, 1.0, 5))
Returns:
A plot of the learning curves for both the training curve and the
cross-validation curve.
"""
plt.close('all')
flat_shape = (X.shape[0],) + (np.prod(X.shape[1:]),)
X_flat = X.reshape(flat_shape)
plt.figure()
plt.title('Learning Curves', fontsize=20)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples", fontsize=15)
plt.ylabel("Score", fontsize=15)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X_flat, y, cv=cv, n_jobs=n_jobs,
train_sizes=train_sizes, scoring=scoring)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="#f46d43")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1,
color="#1a9641")
plt.plot(train_sizes, train_scores_mean, 'o-', color="#f46d43",
linewidth=2, label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="#1a9641",
linewidth=2, label="Cross-validation score")
plt.legend(loc="best")
plt.show()
| 37.353952 | 79 | 0.62398 |
594f74feef3ebb775d5dad1147ea0f3bd26329c1 | 151 | py | Python | snippets/python/utils/reshape/walk_tree.py | c6401/Snippets | a88d97005658eeda99f1a2766e3d069a64e142cb | [
"MIT"
] | null | null | null | snippets/python/utils/reshape/walk_tree.py | c6401/Snippets | a88d97005658eeda99f1a2766e3d069a64e142cb | [
"MIT"
] | null | null | null | snippets/python/utils/reshape/walk_tree.py | c6401/Snippets | a88d97005658eeda99f1a2766e3d069a64e142cb | [
"MIT"
] | null | null | null | def walk_tree(tree, key='children'):
for item in tree:
yield item
if key in tree:
yield from walk_tree(tree[key], key)
| 25.166667 | 48 | 0.582781 |
6b11becd79a1257eed8ba9977568850e156a6b39 | 12,337 | py | Python | tensorflow_quantum/core/ops/batch_util_test.py | lockwo/quantum | de57af9330ac7f5bf8298d202081c29fb2a956b8 | [
"Apache-2.0"
] | 2 | 2020-12-24T06:27:28.000Z | 2021-12-30T17:42:45.000Z | tensorflow_quantum/core/ops/batch_util_test.py | lockwo/quantum | de57af9330ac7f5bf8298d202081c29fb2a956b8 | [
"Apache-2.0"
] | null | null | null | tensorflow_quantum/core/ops/batch_util_test.py | lockwo/quantum | de57af9330ac7f5bf8298d202081c29fb2a956b8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test parallel Cirq simulations."""
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
import numpy as np
import tensorflow as tf
from absl.testing import parameterized
from scipy import stats
import cirq
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.python import util
BATCH_SIZE = 12
N_QUBITS = 5
PAULI_LENGTH = 3
SYMBOLS = ['alpha', 'beta', 'gamma']
def _get_mixed_batch(qubits, symbols, size):
circuit1, resolver1 = util.random_circuit_resolver_batch(qubits, size // 2)
circuit2, resolver2 = util.random_symbol_circuit_resolver_batch(
qubits, symbols, size // 2)
return circuit1 + circuit2, resolver1 + resolver2
def _pad_state(sim, state, n):
if isinstance(sim, cirq.Simulator):
state = state.final_state_vector
if isinstance(sim, cirq.DensityMatrixSimulator):
state = state.final_density_matrix
return np.pad(state, (0, (1 << n) - state.shape[-1]),
'constant',
constant_values=-2)
def _expectation_helper(sim, circuit, params, op):
if isinstance(sim, cirq.Simulator):
state = sim.simulate(circuit,
params).final_state_vector.astype(np.complex128)
return [
op.expectation_from_state_vector(
state,
dict(
zip(sorted(circuit.all_qubits()),
(j for j in range(len(circuit.all_qubits())))))).real
]
if isinstance(sim, cirq.DensityMatrixSimulator):
state = sim.simulate(circuit, params).final_density_matrix
return [
sum(
x._expectation_from_density_matrix_no_validation(
state,
dict(
zip(sorted(circuit.all_qubits()), (
j
for j in range(len(circuit.all_qubits()))))))
for x in op)
]
return NotImplemented
def _sample_helper(sim, state, n_qubits, n_samples):
if isinstance(sim, cirq.Simulator):
return cirq.sample_state_vector(state.final_state_vector,
list(range(n_qubits)),
repetitions=n_samples)
if isinstance(sim, cirq.DensityMatrixSimulator):
return cirq.sample_density_matrix(state.final_density_matrix,
list(range(n_qubits)),
repetitions=n_samples)
return NotImplemented
class BatchUtilTest(tf.test.TestCase, parameterized.TestCase):
"""Test cases for BatchUtils main functions."""
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.Simulator()
}])
def test_batch_simulate_state_vector(self, sim):
"""Test variable sized state vector output."""
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), N_QUBITS)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.Simulator()
}])
def test_batch_expectation(self, sim):
"""Test expectation."""
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.Simulator()
}])
def test_batch_sampled_expectation(self, sim):
"""Test expectation."""
qubits = cirq.GridQubit.rect(1, N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
qubits + [cirq.GridQubit(9, 9)], SYMBOLS, BATCH_SIZE)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for circuit, resolver, result, op in zip(circuit_batch, resolver_batch,
results, ops):
r = _expectation_helper(sim, circuit, resolver, op)
self.assertAllClose(r, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.Simulator()
}])
def test_batch_sample_basic(self, sim):
"""Test sampling."""
n_samples = 1
n_qubits = 8
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit = cirq.Circuit(*cirq.Z.on_each(*qubits[:n_qubits // 2]),
*cirq.X.on_each(*qubits[n_qubits // 2:]))
test_results = batch_util.batch_sample([circuit],
[cirq.ParamResolver({})],
n_samples, sim)
state = sim.simulate(circuit, cirq.ParamResolver({}))
expected_results = _sample_helper(sim, state, len(qubits), n_samples)
self.assertAllEqual(expected_results, test_results[0])
self.assertDTypeEqual(test_results, np.int8)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.Simulator()
}])
def test_batch_sample(self, sim):
"""Test sampling."""
n_samples = 2000 * (2**N_QUBITS)
circuit_batch, resolver_batch = _get_mixed_batch(
cirq.GridQubit.rect(1, N_QUBITS), SYMBOLS, BATCH_SIZE)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
tfq_histograms = []
for r in results:
tfq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
cirq_histograms = []
for circuit, resolver in zip(circuit_batch, resolver_batch):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
cirq_histograms.append(
np.histogram(r.dot(1 << np.arange(r.shape[-1] - 1, -1, -1)),
range=(0, 2**N_QUBITS),
bins=2**N_QUBITS)[0])
for a, b in zip(tfq_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.005)
self.assertDTypeEqual(results, np.int8)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.Simulator()
}])
def test_empty_circuits(self, sim):
"""Test functions with empty circuits."""
# Common preparation
resolver_batch = [cirq.ParamResolver({}) for _ in range(BATCH_SIZE)]
circuit_batch = [cirq.Circuit() for _ in range(BATCH_SIZE)]
qubits = cirq.GridQubit.rect(1, N_QUBITS)
ops = util.random_pauli_sums(qubits, PAULI_LENGTH, BATCH_SIZE)
n_samples = [[1000] for _ in range(len(ops))]
# If there is no op on a qubit, the expectation answer is -2.0
true_expectation = (-2.0,)
# (1) Test expectation
results = batch_util.batch_calculate_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.float32)
# (2) Test sampled_expectation
results = batch_util.batch_calculate_sampled_expectation(
circuit_batch, resolver_batch, [[x] for x in ops], n_samples, sim)
for _, _, result, _ in zip(circuit_batch, resolver_batch, results, ops):
self.assertAllClose(true_expectation, result, rtol=1.0, atol=1e-1)
self.assertDTypeEqual(results, np.float32)
# (3) Test state
results = batch_util.batch_calculate_state(circuit_batch,
resolver_batch, sim)
for circuit, resolver, result in zip(circuit_batch, resolver_batch,
results):
r = _pad_state(sim, sim.simulate(circuit, resolver), 0)
self.assertAllClose(r, result, rtol=1e-5, atol=1e-5)
self.assertDTypeEqual(results, np.complex64)
# (4) Test sampling
n_samples = 2000 * (2**N_QUBITS)
results = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, sim)
for circuit, resolver, a in zip(circuit_batch, resolver_batch, results):
state = sim.simulate(circuit, resolver)
r = _sample_helper(sim, state, len(circuit.all_qubits()), n_samples)
self.assertAllClose(r, a, atol=1e-5)
self.assertDTypeEqual(results, np.int8)
@parameterized.parameters([{
'sim': cirq.DensityMatrixSimulator()
}, {
'sim': cirq.Simulator()
}])
def test_no_circuit(self, sim):
"""Test functions with no circuits and empty arrays."""
# (1) Test expectation
results = batch_util.batch_calculate_expectation([], [], [[]], sim)
self.assertDTypeEqual(results, np.float32)
self.assertEqual(np.zeros(shape=(0, 0)).shape, results.shape)
# (2) Test sampled_expectation
results = batch_util.batch_calculate_sampled_expectation([], [], [[]],
[[]], sim)
self.assertDTypeEqual(results, np.float32)
self.assertEqual(np.zeros(shape=(0, 0)).shape, results.shape)
# (3) Test state
results = batch_util.batch_calculate_state([], [], sim)
self.assertDTypeEqual(results, np.complex64)
if isinstance(sim, cirq.Simulator):
self.assertEqual(np.zeros(shape=(0, 0)).shape, results.shape)
else:
self.assertEqual(np.zeros(shape=(0, 0, 0)).shape, results.shape)
# (4) Test sampling
results = batch_util.batch_sample([], [], [], sim)
self.assertDTypeEqual(results, np.int8)
self.assertEqual(np.zeros(shape=(0, 0, 0)).shape, results.shape)
if __name__ == '__main__':
tf.test.main()
| 39.415335 | 80 | 0.596904 |
d6cb966415f00fb0883872e3a6270dd76f2fd2d3 | 910 | py | Python | idmap/version.py | tkhyn/django-idmap | 383124fc4bd537d053f9d4c0d02a498f66831baa | [
"BSD-2-Clause"
] | 1 | 2021-04-24T16:35:15.000Z | 2021-04-24T16:35:15.000Z | idmap/version.py | tkhyn/django-idmap | 383124fc4bd537d053f9d4c0d02a498f66831baa | [
"BSD-2-Clause"
] | null | null | null | idmap/version.py | tkhyn/django-idmap | 383124fc4bd537d053f9d4c0d02a498f66831baa | [
"BSD-2-Clause"
] | 1 | 2021-02-27T14:45:48.000Z | 2021-02-27T14:45:48.000Z | """
Defines __version__ from __version_info__
"""
import subprocess
__version_info__ = (1, 0, 4, 'alpha', 0)
def get_version(version=__version_info__):
dev_st = {'alpha': 'a', 'beta': 'b', 'rc': 'c', 'final': ''}
assert len(version) == 5
assert version[3] in dev_st.keys()
n = 2 + (version[2] != 0)
version_str = '.'.join([str(v) for v in version[:n]])
if version[3] == 'final':
return version_str
if version[3:] == ('alpha', 0):
return '%s.dev0+%s' % (version_str, get_git_chgset())
else:
return ''.join((version_str, dev_st[version[3]], str(version[4])))
def get_git_chgset():
try:
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'],
universal_newlines=True).strip()[:-1]
except:
return '?'
__version__ = get_version()
| 23.947368 | 80 | 0.541758 |
623382086a16b1764ddcb1c50183b899acd065a7 | 2,434 | py | Python | romclean-pass2.py | gatesphere/romclean | 1a2bc70c691166d8199bced19ed982a26af8b6dc | [
"Unlicense"
] | null | null | null | romclean-pass2.py | gatesphere/romclean | 1a2bc70c691166d8199bced19ed982a26af8b6dc | [
"Unlicense"
] | null | null | null | romclean-pass2.py | gatesphere/romclean | 1a2bc70c691166d8199bced19ed982a26af8b6dc | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import os
mypath = os.getcwd()
kill_file = os.path.join(mypath, 'kill.lis')
error_file = os.path.join(mypath, 'error.lis')
keep_file = os.path.join(mypath, 'keep.lis')
# get files in current path
files = [f for f in os.listdir(mypath) if os.path.isfile(os.path.join(mypath, f))]
matches = {}
print "Begin: parsing file list..."
for f in files:
newf = f.replace("(","||||").replace("[","||||").replace(".","||||")
k = newf.split("||||")[0].strip()
v = matches.get(k, [])
v.append(f)
matches[k] = v
print "File list parsed."
kill_list = []
error_list = []
keep_list = []
print "Begin: filtering matches"
for k in matches.keys():
# heuristics:
# Tier 1: Prefer U > UE > UJ > W > E > J
# Tier 2: Prefer [!] over non-[!]
# Tier 3: user intervention required
v = matches[k]
if len(v) == 1:
keep_list.append(v[0])
continue # only one result for that rom -- keep it
tempkeeplist = []
for val in v:
if '(U)' in val:
tempkeeplist.append(val)
if len(tempkeeplist) = 0:
for val in v:
if '(UE)' in val or '(EU)' in val:
tempkeeplist.append(val)
if len(tempkeeplist) = 0:
for val in v:
if '(UJ)' in val or '(JU)' in val:
tempkeeplist.append(val)
if len(tempkeeplist) = 0:
for val in v:
if '(W)' in val:
tempkeeplist.append(val)
if len(tempkeeplist) = 0:
for val in v:
if '(E)' in val:
tempkeeplist.append(val)
if len(tempkeeplist) = 0:
for val in v:
if '(J)' in val:
tempkeeplist.append(val)
tempkeeplist2 = []
if len(tempkeeplist) > 1:
for val in tempkeeplist:
if '[!]' in val:
tempkeeplist2.append(val)
tempkeeplist = tempkeeplist2
if len(tempkeeplist) == 1:
for val in v:
if val not in tempkeeplist:
kill_list.append(val)
keep_list.append(tempkeeplist[0])
else:
for val in v:
error_list.append(val)
print 'Matches filtered.'
with open(kill_file, 'w') as kf:
kf.write('\n'.join[kill_list])
kf.write('\n')
print "Kill list saved as %s" % kill_file
with open(error_file, 'w') as ef:
ef.write('\n'.join[error_list])
ef.write('\n')
print "Error list saved as %s" % error_file
with open(keep_file, 'w') as keepf:
keepf.write('\n'.join[keep_list])
keepf.write('\n')
print "Keep list saved as %s" % keep_file
print "Please edit the error file to only include roms to delete before running pass3" | 25.354167 | 86 | 0.612983 |
3a7eb3ae5b7a7f8e9a7b39ae989344289803c1ae | 2,882 | py | Python | include/__init__.py | maxfischer2781/include | d8b0404f4996b6abcd39fdebf282b31fad8bb6f5 | [
"MIT"
] | 1 | 2020-07-06T22:56:18.000Z | 2020-07-06T22:56:18.000Z | include/__init__.py | maxfischer2781/include | d8b0404f4996b6abcd39fdebf282b31fad8bb6f5 | [
"MIT"
] | 2 | 2017-06-09T12:50:31.000Z | 2022-03-28T11:28:33.000Z | include/__init__.py | maxfischer2781/include | d8b0404f4996b6abcd39fdebf282b31fad8bb6f5 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
import sys
import weakref
# weak reference to installed hooks
_IMPORT_HOOKS = weakref.WeakValueDictionary()
# must have _IMPORT_HOOKS to bootstrap hook disabling
from .inhibit import DISABLED_TYPES
def path(file_path):
"""
Include module code from a file identified by its path
:param file_path: path to a file containing module code
:type file_path: str
:return: the imported module
:rtype: module
Comparable to ``execfile``, but respects the rules and constraints of modules.
If invoked again with the same ``file_path``, the same module is returned.
.. code:: python
import include
my_config = include.path('/etc/sysconfig/app_conf.py')
"""
from . import files
return _import_url(module_url=file_path, include_type=files)
def source(source_code):
"""
Include module code directly from a string
:param source_code: source code of the module
:type source_code: str
:return: the imported module
:rtype: module
Comparable to ``exec`` in a separate ``globals`` namespace, but respects the rules and constraints of modules.
If invoked again with the same ``source_code``, the same module is returned.
.. code:: python
>>> import include
>>> my_module = include.source(
>>> \"\"\"
... def foo():
... return {constant}
... \"\"\".format(constant=3))
>>> my_module.foo() == 3
True
"""
from . import encoded
return _import_url(module_url=source_code, include_type=encoded)
def _import_url(module_url, include_type):
if include_type.IMPORT_PATH not in _IMPORT_HOOKS:
include_type.install()
import_hook = _IMPORT_HOOKS[include_type.IMPORT_PATH]
module_path = import_hook.uri2module(module_url)
__import__(module_path)
return sys.modules[module_path]
def disable(identifier, children_only=False):
"""
Disable an include type
:param identifier: module or name of the include type
:param children_only: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
"""
DISABLED_TYPES.disable(identifier=identifier, children_only=children_only)
def enable(identifier, exclude_children=False):
"""
Enable a previously disabled include type
:param identifier: module or name of the include type
:param exclude_children: disable the include type only for child processes, not the current process
The ``identifier`` can be specified in multiple ways to disable an include type.
See :py:meth:`~.DisabledIncludeTypes.disable` for details.
"""
DISABLED_TYPES.enable(identifier=identifier, exclude_children=exclude_children)
| 31.326087 | 114 | 0.70923 |
da2dd30df2066e05833a12c3dc08e2974c0bd71b | 5,945 | py | Python | venv/Lib/site-packages/pandas/tests/frame/methods/test_asof.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/frame/methods/test_asof.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/frame/methods/test_asof.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | import numpy as np
import pytest
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
DataFrame,
Period,
Series,
Timestamp,
date_range,
period_range,
to_datetime,
)
import pandas._testing as tm
@pytest.fixture
def date_range_frame():
"""
Fixture for DataFrame of ints with date_range index
Columns are ['A', 'B'].
"""
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
return DataFrame({"A": np.arange(N), "B": np.arange(N)}, index=rng)
class TestFrameAsof:
def test_basic(self, date_range_frame):
df = date_range_frame
N = 50
df.loc[df.index[15:30], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = df.asof(dates)
assert result.notna().all(1).all()
lb = df.index[14]
ub = df.index[30]
dates = list(dates)
result = df.asof(dates)
assert result.notna().all(1).all()
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == 14).all(1).all()
def test_subset(self, date_range_frame):
N = 10
df = date_range_frame.iloc[:N].copy()
df.loc[df.index[4:8], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
# with a subset of A should be the same
result = df.asof(dates, subset="A")
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# same with A/B
result = df.asof(dates, subset=["A", "B"])
expected = df.asof(dates)
tm.assert_frame_equal(result, expected)
# B gives df.asof
result = df.asof(dates, subset="B")
expected = df.resample("25s", closed="right").ffill().reindex(dates)
expected.iloc[20:] = 9
tm.assert_frame_equal(result, expected)
def test_missing(self, date_range_frame):
# GH 15118
# no match found - `where` value before earliest date in index
N = 10
df = date_range_frame.iloc[:N].copy()
result = df.asof("1989-12-31")
expected = Series(
index=["A", "B"], name=Timestamp("1989-12-31"), dtype=np.float64
)
tm.assert_series_equal(result, expected)
result = df.asof(to_datetime(["1989-12-31"]))
expected = DataFrame(
index=to_datetime(["1989-12-31"]), columns=["A", "B"], dtype="float64"
)
tm.assert_frame_equal(result, expected)
# Check that we handle PeriodIndex correctly, dont end up with
# period.ordinal for series name
df = df.to_period("D")
result = df.asof("1989-12-31")
assert isinstance(result.name, Period)
def test_all_nans(self, date_range_frame):
# GH 15713
# DataFrame is all nans
result = DataFrame([np.nan]).asof([0])
expected = DataFrame([np.nan])
tm.assert_frame_equal(result, expected)
# testing non-default indexes, multiple inputs
N = 150
rng = date_range_frame.index
dates = date_range("1/1/1990", periods=N, freq="25s")
result = DataFrame(np.nan, index=rng, columns=["A"]).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=["A"])
tm.assert_frame_equal(result, expected)
# testing multiple columns
dates = date_range("1/1/1990", periods=N, freq="25s")
result = DataFrame(np.nan, index=rng, columns=["A", "B", "C"]).asof(dates)
expected = DataFrame(np.nan, index=dates, columns=["A", "B", "C"])
tm.assert_frame_equal(result, expected)
# testing scalar input
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof([3])
expected = DataFrame(np.nan, index=[3], columns=["A", "B"])
tm.assert_frame_equal(result, expected)
result = DataFrame(np.nan, index=[1, 2], columns=["A", "B"]).asof(3)
expected = Series(np.nan, index=["A", "B"], name=3)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"stamp,expected",
[
(
Timestamp("2018-01-01 23:22:43.325+00:00"),
Series(2.0, name=Timestamp("2018-01-01 23:22:43.325+00:00")),
),
(
Timestamp("2018-01-01 22:33:20.682+01:00"),
Series(1.0, name=Timestamp("2018-01-01 22:33:20.682+01:00")),
),
],
)
def test_time_zone_aware_index(self, stamp, expected):
# GH21194
# Testing awareness of DataFrame index considering different
# UTC and timezone
df = DataFrame(
data=[1, 2],
index=[
Timestamp("2018-01-01 21:00:05.001+00:00"),
Timestamp("2018-01-01 22:35:10.550+00:00"),
],
)
result = df.asof(stamp)
tm.assert_series_equal(result, expected)
def test_is_copy(self, date_range_frame):
# GH-27357, GH-30784: ensure the result of asof is an actual copy and
# doesn't track the parent dataframe / doesn't give SettingWithCopy warnings
df = date_range_frame
N = 50
df.loc[df.index[15:30], "A"] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = df.asof(dates)
with tm.assert_produces_warning(None):
result["C"] = 1
def test_asof_periodindex_mismatched_freq(self):
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
df = DataFrame(np.random.randn(N), index=rng)
# Mismatched freq
msg = "Input has different freq"
with pytest.raises(IncompatibleFrequency, match=msg):
df.asof(rng.asfreq("D"))
| 33.212291 | 85 | 0.557443 |
5a162008785960f2d37040da497d21ab459be11e | 2,472 | py | Python | build/lib/emojipasta/moderation.py | musca1997/emojipasta-bot | 408794733c988260176a0b41325e079342e146d3 | [
"MIT"
] | 10 | 2018-04-09T17:15:40.000Z | 2020-10-16T01:06:53.000Z | build/lib/emojipasta/moderation.py | musca1997/emojipasta-bot | 408794733c988260176a0b41325e079342e146d3 | [
"MIT"
] | 17 | 2018-04-15T01:14:20.000Z | 2019-08-04T11:19:37.000Z | build/lib/emojipasta/moderation.py | musca1997/emojipasta-bot | 408794733c988260176a0b41325e079342e146d3 | [
"MIT"
] | 13 | 2018-04-15T00:57:45.000Z | 2022-01-15T02:04:47.000Z | import discord
from discord.ext import commands
class Moderation:
def __init__(self, client):
self.client = client
@commands.command(pass_context=True)
async def ban(self, ctx, target: discord.User, *reason):
try:
if (ctx.message.author.server_permissions.ban_members == True):
if ctx.message.author.top_role > target.top_role:
await self.client.ban(target)
reason = " ".join(map(str, reason))
await self.client.say("Banned {0} {1}".format(target, reason))
else:
await self.client.say("You don't have the required permissions, {}".format(ctx.message.author))
except Exception as e:
await self.client.say("Failed. My role is not higher than that person.")
@commands.command(pass_context=True)
async def kick(self, ctx, target: discord.User, *reason):
try:
if (ctx.message.author.server_permissions.ban_members == True):
if ctx.message.author.top_role > target.top_role:
await self.client.kick(target)
reason = " ".join(map(str, reason))
await self.client.say("Kicked {0} {1}".format(target, reason))
else:
await self.client.say("You don't have the required permissions, {}".format(ctx.message.author))
except Exception as e:
await self.client.say("Failed. My role is not higher than that person.")
@commands.command(pass_context=True)
async def nick(self, ctx, target: discord.User, *, nickname):
try:
if (ctx.message.author.server_permissions.manage_nicknames == True):
if ctx.message.author.top_role > target.top_role:
await self.client.change_nickname(target, nickname)
await self.client.say("Done.")
elif ((ctx.message.author.server_permissions.change_nickname == True) and (str(target) == str(ctx.message.author))):
await self.client.change_nickname(target, nickname)
await self.client.say("Done, " + str(target))
else:
await self.client.say("You don't have the required permissions, {}".format(ctx.message.author))
except Exception as e:
await self.client.say("Failed. My role is not higher than that person.")
def setup(client):
client.add_cog(Moderation(client))
| 47.538462 | 128 | 0.60801 |
c121b5b2674454f3b739d07cf2ead6a0d9b4aa46 | 14,448 | py | Python | data_as_code/_recipe.py | Mikuana/data_as_code | 339f0c6bcd4e318ad925ceb4ccd6149f980d4032 | [
"MIT"
] | 2 | 2021-05-18T22:04:22.000Z | 2021-07-24T19:52:49.000Z | data_as_code/_recipe.py | Mikuana/data_as_code | 339f0c6bcd4e318ad925ceb4ccd6149f980d4032 | [
"MIT"
] | 1 | 2021-03-12T22:56:45.000Z | 2021-03-12T22:56:45.000Z | data_as_code/_recipe.py | Mikuana/data_as_code | 339f0c6bcd4e318ad925ceb4ccd6149f980d4032 | [
"MIT"
] | null | null | null | import difflib
import gzip
import json
import logging
import os
import tarfile
from enum import Enum, auto
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Union, Dict, Type, List
from data_as_code._metadata import validate_metadata
from data_as_code._step import Step
__all__ = ['Recipe', 'Role']
log = logging.getLogger(__name__)
class Role(Enum):
"""
Step Role
This enumerator codifies the distinct roles that a step can play in a
Recipe. The identification of these roles controls behavior related to
default retention of artifacts, mandatory path designation, and whether a
step can be skipped when a recipe is executed as a pickup.
"""
SOURCE = auto()
"""String which identifies source artifacts, codified as an object"""
INTERMEDIARY = auto()
"""String which identifies intermediary artifacts, codified as an object"""
PRODUCT = auto()
"""String which identifies product artifacts, codified as an object"""
class Recipe:
"""
Recipe
Responsible for managing the session details involved in a series of Steps
that generate data artifacts. This Recipe acts both as a container of
individual steps, and an orchestrator to ensure appropriate conditions are
met. Recipe initially creates all artifacts in temporary directories,
then moves the artifacts to the destination, according to the various
settings that control the retention of artifacts.
:param destination: the path to the project folder where any artifacts that
should be retained by the recipe will be output. Defaults to the
"current" directory on initialization.
:param keep: (optional) controls whether to keep source, intermediate, and
final product artifacts. Values set here are overwritten by those set in
individual Step settings.
:param trust_cache: (optional) controls whether to trust the artifacts which
may already exist in the destination folder. If set to `true` and the
anticipated fingerprint of the metadata matches the Step, then the Step
will skip execution and return the cached data and metadata instead.
Values set here are overwritten by those set in individual Step
settings.
:param pickup: (optional) controls behavior of execution to work backwards
for each product, to determine the latest cached Step in their
ingredients. The end result is that the recipe will attempt to build the
products using the least number of steps possible.
"""
keep: List[Role] = [Role.PRODUCT]
"""Controls whether to keep source, intermediate, and final product
artifacts. Values set here can be overwritten by the `keep`
parameter during construction, or by those set in individual Step settings.
Defaults to retaining all products.
"""
trust_cache = True
"""Controls whether to trust the artifacts which may already exist in the
destination folder. If set to `true` and the anticipated fingerprint of the
metadata matches the Step, then the Step will skip execution and return the
cached data and metadata instead. Values set here can be overwritten by the
`trust_cache` parameter during construction, or by those set in individual
Step settings.
"""
pickup = False
"""Controls behavior of execution to work backwards for each Product to
determine the latest cached Step in their ingredients.
"""
_workspace: Union[str, Path]
_td: TemporaryDirectory
_results: Dict[str, Step]
def __init__(
self, destination: Union[str, Path] = '.',
keep: Union[str, List[str]] = None,
trust_cache: bool = None,
pickup: bool = None
):
self.destination = Path(destination) if isinstance(destination, str) \
else destination
self.keep = ([keep] if isinstance(keep, str) else keep) or self.keep
self.trust_cache = trust_cache or self.trust_cache
self.pickup = pickup or self.pickup
self._step_check()
self._target = self._get_targets()
def execute(self):
self._begin()
self._results = {}
for name, step in self._stepper().items():
self._results[name] = step._execute(self._workspace)
self._export_metadata()
self._end()
def reproducible(self) -> bool:
"""
Verify package contents
Execute the recipe in a separate workspace to verify that identical
contents can be produced.
- check metadata against all files to verify checksums
- data without metadata warns
- data with mismatched checksum warns
- check metadata against verified files
- destination metadata without verification match warns
- verification metadata without destination match warns
- diff in contents between matching metadata warns
- optional switch to verify only what exists (from point of last
available)?
:return: a boolean indicator of whether the contents of the package is
reproducible from scratch.
"""
with TemporaryDirectory() as container:
r = self.__class__(container)
r.execute()
return self._compare(container)
@classmethod
def _check_it(cls, step_name: str, steps: dict) -> set:
"""
Iterate through ingredients of each step to determine which antecedents
are required, if the cache is not available.
"""
required = {step_name}
s = steps[step_name]
if s.check_cache() is False:
for (x, y) in s.collect_ingredients().values():
required = required.union(cls._check_it(x, steps))
return required
def _stepper(self) -> Dict[str, Step]:
"""
TODO...
Start with all products of a recipe, and check the cache for valid
artifacts. If the product is missing a valid artifact in the cache,
iterate through the ingredients of that product and check their cache
status, continuing indefinitely until a valid cache exists.
The idea is to be able to generate a product from the cache with the
least number of steps possible, potentially even when some of the data
used in certain steps is completely unavailable at the time of execution
of the recipe.
"""
steps = {}
roles = self._determine_roles()
for name, step in self._steps().items():
if step.keep is None:
step.keep = roles[name] in self.keep
if step.trust_cache is None:
step.trust_cache = self.trust_cache
steps[name] = step(self._target.folder, {k: v.metadata for k, v in steps.items()})
if self.pickup is True: # identify pick steps
pickups = set()
for k in [k for k, v in roles.items() if v is Role.PRODUCT]:
pickups = pickups.union(self._check_it(k, steps))
return {k: v for k, v in steps.items() if k in pickups}
else:
return steps
def _begin(self):
"""
Begin Recipe
Prepare to start the recipe by determining if the data package
destination is valid, then opening a workspace for temporary artifacts
to be stored. The workspace is a temporary directory, which does not
exist until this method is call.
"""
for v in self._target.results():
if v.exists() and False is True: # TODO: make a control for this
raise FileExistsError(
f"{v.as_posix()} exists and `keep.existing == True`."
"\nChange the keep.existing setting to False to overwrite."
)
self._target.folder.mkdir(exist_ok=True)
self._td = TemporaryDirectory()
self._workspace = Path(self._td.name)
def _end(self):
"""
End Recipe
Complete the recipe by building the data package from the identified
products, then removing the workspace (unless otherwise instructed in
the keep parameter).
"""
cwd = os.getcwd()
try:
os.chdir(self._target.folder)
# TODO: re-enable when I figure out why this runs so slowly
# self._package()
self._td.cleanup()
# TODO: add a parameter to optionally control removal of unexpected files
expect = self._target.results() + self._target.results(metadata=True)
for folder in [self._target.data, self._target.metadata]:
for file in [x for x in folder.rglob('*') if x.is_file()]:
if file not in expect:
log.warning(f"Removing unexpected file {file}")
file.unlink()
finally:
os.chdir(cwd)
@classmethod
def _steps(cls) -> Dict[str, Type[Step]]:
return {
k: v for k, v in cls.__dict__.items()
if (isinstance(v, type) and issubclass(v, Step))
}
@classmethod
def _products(cls) -> Dict[str, Type[Step]]:
x = [k for k, v in cls._determine_roles().items() if v is Role.PRODUCT]
return {k: v for k, v in cls._steps().items() if k in x}
@classmethod
def _step_check(cls):
steps = cls._steps()
for ix, (k, step) in enumerate(steps.items()):
priors = list(steps.keys())[:ix]
for x in step.collect_ingredients().values():
ingredient = x[0]
msg = (
f"Step '{k}' references ingredient '{ingredient}', but"
f" there is no preceding Step with that name in the recipe."
f" Valid values are: \n {priors}"
)
assert ingredient in priors, msg
@classmethod
def _determine_roles(cls) -> Dict[str, Role]:
"""
Role assigner
Determines the role that a Step result plays by looking at the links to
other steps, then assigning that role.
The logic breaks down this way:
- if a Step has no ingredients, it is a source
- if a Step is not an ingredient for any other step, then it is a
product (overwriting previous Source assignment if applicable)
- if a Step is neither a source or product, then it is an intermediary
"""
steps = cls._steps()
ingredient_list = set(
v[0] for sublist in steps.values()
for k, v in sublist.collect_ingredients().items()
)
roles = {}
for k, step in steps.items():
if not step.collect_ingredients():
roles[k] = Role.SOURCE
if k not in ingredient_list:
roles[k] = Role.PRODUCT
if roles.get(k) is None:
roles[k] = Role.INTERMEDIARY
return roles
def _get_targets(self):
fold = self.destination.absolute()
class Target:
folder = fold
data = Path(fold, 'data')
metadata = Path(fold, 'metadata')
recipe = Path(fold, 'recipe.py')
archive = Path(fold, fold.name + '.tar')
gzip = Path(fold, fold.name + '.tar.gz')
@classmethod
def results(cls, metadata=False):
lol = [
[x._make_relative_path(z[1].path, metadata) for z in x._get_results()]
for x in self._steps().values() if x.keep is True
]
return [Path(fold, item) for sublist in lol for item in sublist]
return Target
def _package(self):
# TODO: re-enable using something other than the keep param
# if self.keep.get('archive', True) is True:
with tarfile.open(self._target.archive, "w") as tar:
for k, v in self._target.results():
if v.is_file():
tar.add(v, v.relative_to(self._target.folder))
else:
for file in v.rglob('*'):
tar.add(file, file.relative_to(self._target.folder))
with gzip.open(self._target.gzip, 'wb') as f_out:
f_out.write(self._target.archive.read_bytes())
self._target.archive.unlink()
def _export_metadata(self):
for result in self._results.values():
if result.keep is True:
for k, v in result.metadata.items():
p = Path(self._target.metadata, v.codified.path)
p.parent.mkdir(parents=True, exist_ok=True)
d = v.to_dict()
validate_metadata(d)
j = json.dumps(d, indent=2)
Path(p.as_posix() + '.json').write_text(j)
def _compare(self, compare_to: Path):
"""
Compare the contents of two separate folders to verify that they match.
"""
match = True
compare_to = Path(compare_to)
meta_a = {
x.relative_to(self.destination): x.read_text()
for x in Path(self.destination, 'metadata').rglob('*')
if x.is_file()
}
meta_b = {
x.relative_to(compare_to): x.read_text()
for x in Path(compare_to, 'metadata').rglob('*')
if x.is_file()
}
only_in_b = set(meta_b.keys()).difference(set(meta_a.keys()))
if only_in_b:
match = False
log.info(f"Comparison contains files(s) not in this package:\n")
for x in only_in_b:
log.info(' - ' + x.as_posix())
only_in_a = set(meta_a.keys()).difference(set(meta_b.keys()))
if only_in_a:
match = False
log.info(f"Package contains file(s) not in the comparison:\n")
for x in only_in_a:
log.info(' - ' + x.as_posix())
# difference in intersecting metadata
for meta in set(meta_a.keys()).intersection(meta_b.keys()):
log.info(meta.as_posix())
if meta_a[meta] != meta_b[meta]:
match = False
diff = difflib.unified_diff(
meta_a[meta], meta_b[meta], 'Package', 'Comparison'
)
log.info(diff)
return match
| 37.333333 | 94 | 0.60389 |
b67604ceeeecd28156adea9318f23e10be675b04 | 22,284 | py | Python | glance/api/v2/image_data.py | shangxinjinxingzhong/glance | 3084dadc47c4f120854f9e9216c77f0ae53b2504 | [
"Apache-2.0"
] | null | null | null | glance/api/v2/image_data.py | shangxinjinxingzhong/glance | 3084dadc47c4f120854f9e9216c77f0ae53b2504 | [
"Apache-2.0"
] | null | null | null | glance/api/v2/image_data.py | shangxinjinxingzhong/glance | 3084dadc47c4f120854f9e9216c77f0ae53b2504 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cursive import exception as cursive_exception
import glance_store
from glance_store import backend
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
import six
import webob.exc
import glance.api.policy
from glance.common import exception
from glance.common import trust_auth
from glance.common import utils
from glance.common import wsgi
import glance.db
import glance.gateway
from glance.i18n import _, _LE, _LI
import glance.notifier
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class ImageDataController(object):
def __init__(self, db_api=None, store_api=None,
policy_enforcer=None, notifier=None):
db_api = db_api or glance.db.get_api()
store_api = store_api or glance_store
notifier = notifier or glance.notifier.Notifier()
self.policy = policy_enforcer or glance.api.policy.Enforcer()
self.gateway = glance.gateway.Gateway(db_api, store_api,
notifier, self.policy)
def _restore(self, image_repo, image):
"""
Restore the image to queued status.
:param image_repo: The instance of ImageRepo
:param image: The image will be restored
"""
try:
if image_repo and image:
image.status = 'queued'
image_repo.save(image)
except Exception as e:
msg = (_LE("Unable to restore image %(image_id)s: %(e)s") %
{'image_id': image.image_id,
'e': encodeutils.exception_to_unicode(e)})
LOG.exception(msg)
def _unstage(self, image_repo, image, staging_store):
"""
Restore the image to queued status and remove data from staging.
:param image_repo: The instance of ImageRepo
:param image: The image will be restored
:param staging_store: The store used for staging
"""
loc = glance_store.location.get_location_from_uri(str(
CONF.node_staging_uri + '/' + image.image_id))
try:
staging_store.delete(loc)
except glance_store.exceptions.NotFound:
pass
finally:
self._restore(image_repo, image)
def _delete(self, image_repo, image):
"""Delete the image.
:param image_repo: The instance of ImageRepo
:param image: The image that will be deleted
"""
try:
if image_repo and image:
image.status = 'killed'
image_repo.save(image)
except Exception as e:
msg = (_LE("Unable to delete image %(image_id)s: %(e)s") %
{'image_id': image.image_id,
'e': encodeutils.exception_to_unicode(e)})
LOG.exception(msg)
@utils.mutating
def upload(self, req, image_id, data, size):
backend = None
if CONF.enabled_backends:
backend = req.headers.get('x-image-meta-store',
CONF.glance_store.default_backend)
try:
glance_store.get_store_from_store_identifier(backend)
except glance_store.UnknownScheme as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.msg,
request=req,
content_type='text/plain')
image_repo = self.gateway.get_repo(req.context)
image = None
refresher = None
cxt = req.context
try:
self.policy.enforce(cxt, 'upload_image', {})
image = image_repo.get(image_id)
image.status = 'saving'
try:
if CONF.data_api == 'glance.db.registry.api':
# create a trust if backend is registry
try:
# request user plugin for current token
user_plugin = req.environ.get('keystone.token_auth')
roles = []
# use roles from request environment because they
# are not transformed to lower-case unlike cxt.roles
for role_info in req.environ.get(
'keystone.token_info')['token']['roles']:
roles.append(role_info['name'])
refresher = trust_auth.TokenRefresher(user_plugin,
cxt.tenant,
roles)
except Exception as e:
LOG.info(_LI("Unable to create trust: %s "
"Use the existing user token."),
encodeutils.exception_to_unicode(e))
image_repo.save(image, from_state='queued')
image.set_data(data, size, backend=backend)
try:
image_repo.save(image, from_state='saving')
except exception.NotAuthenticated:
if refresher is not None:
# request a new token to update an image in database
cxt.auth_token = refresher.refresh_token()
image_repo = self.gateway.get_repo(req.context)
image_repo.save(image, from_state='saving')
else:
raise
try:
# release resources required for re-auth
if refresher is not None:
refresher.release_resources()
except Exception as e:
LOG.info(_LI("Unable to delete trust %(trust)s: %(msg)s"),
{"trust": refresher.trust_id,
"msg": encodeutils.exception_to_unicode(e)})
except (glance_store.NotFound,
exception.ImageNotFound,
exception.Conflict):
msg = (_("Image %s could not be found after upload. "
"The image may have been deleted during the "
"upload, cleaning up the chunks uploaded.") %
image_id)
LOG.warn(msg)
# NOTE(sridevi): Cleaning up the uploaded chunks.
try:
image.delete()
except exception.ImageNotFound:
# NOTE(sridevi): Ignore this exception
pass
raise webob.exc.HTTPGone(explanation=msg,
request=req,
content_type='text/plain')
except exception.NotAuthenticated:
msg = (_("Authentication error - the token may have "
"expired during file upload. Deleting image data for "
"%s.") % image_id)
LOG.debug(msg)
try:
image.delete()
except exception.NotAuthenticated:
# NOTE: Ignore this exception
pass
raise webob.exc.HTTPUnauthorized(explanation=msg,
request=req,
content_type='text/plain')
except ValueError as e:
LOG.debug("Cannot save data for image %(id)s: %(e)s",
{'id': image_id,
'e': encodeutils.exception_to_unicode(e)})
self._restore(image_repo, image)
raise webob.exc.HTTPBadRequest(
explanation=encodeutils.exception_to_unicode(e))
except glance_store.StoreAddDisabled:
msg = _("Error in store configuration. Adding images to store "
"is disabled.")
LOG.exception(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPGone(explanation=msg, request=req,
content_type='text/plain')
except exception.InvalidImageStatusTransition as e:
msg = encodeutils.exception_to_unicode(e)
LOG.exception(msg)
raise webob.exc.HTTPConflict(explanation=e.msg, request=req)
except exception.Forbidden as e:
msg = ("Not allowed to upload image data for image %s" %
image_id)
LOG.debug(msg)
raise webob.exc.HTTPForbidden(explanation=msg, request=req)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except glance_store.StorageFull as e:
msg = _("Image storage media "
"is full: %s") % encodeutils.exception_to_unicode(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except exception.StorageQuotaFull as e:
msg = _("Image exceeds the storage "
"quota: %s") % encodeutils.exception_to_unicode(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except exception.ImageSizeLimitExceeded as e:
msg = _("The incoming image is "
"too large: %s") % encodeutils.exception_to_unicode(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except glance_store.StorageWriteDenied as e:
msg = _("Insufficient permissions on image "
"storage media: %s") % encodeutils.exception_to_unicode(e)
LOG.error(msg)
self._restore(image_repo, image)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req)
except cursive_exception.SignatureVerificationError as e:
msg = (_LE("Signature verification failed for image %(id)s: %(e)s")
% {'id': image_id,
'e': encodeutils.exception_to_unicode(e)})
LOG.error(msg)
self._delete(image_repo, image)
raise webob.exc.HTTPBadRequest(explanation=msg)
except webob.exc.HTTPGone as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to upload image data due to HTTP error"))
except webob.exc.HTTPError as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to upload image data due to HTTP error"))
self._restore(image_repo, image)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Failed to upload image data due to "
"internal error"))
self._restore(image_repo, image)
@utils.mutating
def stage(self, req, image_id, data, size):
image_repo = self.gateway.get_repo(req.context)
image = None
# NOTE(jokke): this is horrible way to do it but as long as
# glance_store is in a shape it is, the only way. Don't hold me
# accountable for it.
# TODO(abhishekk): After removal of backend module from glance_store
# need to change this to use multi_backend module.
def _build_staging_store():
conf = cfg.ConfigOpts()
try:
backend.register_opts(conf)
except cfg.DuplicateOptError:
pass
conf.set_override('filesystem_store_datadir',
CONF.node_staging_uri[7:],
group='glance_store')
staging_store = backend._load_store(conf, 'file')
try:
staging_store.configure()
except AttributeError:
msg = _("'node_staging_uri' is not set correctly. Could not "
"load staging store.")
raise exception.BadStoreUri(message=msg)
return staging_store
staging_store = _build_staging_store()
try:
image = image_repo.get(image_id)
image.status = 'uploading'
image_repo.save(image, from_state='queued')
try:
staging_store.add(
image_id, utils.LimitingReader(
utils.CooperativeReader(data), CONF.image_size_cap), 0)
except glance_store.Duplicate as e:
msg = _("The image %s has data on staging") % image_id
raise webob.exc.HTTPConflict(explanation=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except glance_store.StorageFull as e:
msg = _("Image storage media "
"is full: %s") % encodeutils.exception_to_unicode(e)
LOG.error(msg)
self._unstage(image_repo, image, staging_store)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except exception.StorageQuotaFull as e:
msg = _("Image exceeds the storage "
"quota: %s") % encodeutils.exception_to_unicode(e)
LOG.debug(msg)
self._unstage(image_repo, image, staging_store)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except exception.ImageSizeLimitExceeded as e:
msg = _("The incoming image is "
"too large: %s") % encodeutils.exception_to_unicode(e)
LOG.debug(msg)
self._unstage(image_repo, image, staging_store)
raise webob.exc.HTTPRequestEntityTooLarge(explanation=msg,
request=req)
except glance_store.StorageWriteDenied as e:
msg = _("Insufficient permissions on image "
"storage media: %s") % encodeutils.exception_to_unicode(e)
LOG.error(msg)
self._unstage(image_repo, image, staging_store)
raise webob.exc.HTTPServiceUnavailable(explanation=msg,
request=req)
except exception.InvalidImageStatusTransition as e:
msg = encodeutils.exception_to_unicode(e)
LOG.debug(msg)
raise webob.exc.HTTPConflict(explanation=e.msg, request=req)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("Failed to stage image data due to "
"internal error"))
self._restore(image_repo, image)
def download(self, req, image_id):
image_repo = self.gateway.get_repo(req.context)
try:
image = image_repo.get(image_id)
if image.status == 'deactivated' and not req.context.is_admin:
msg = _('The requested image has been deactivated. '
'Image data download is forbidden.')
raise exception.Forbidden(message=msg)
except exception.NotFound as e:
raise webob.exc.HTTPNotFound(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug("User not permitted to download image '%s'", image_id)
raise webob.exc.HTTPForbidden(explanation=e.msg)
return image
class RequestDeserializer(wsgi.JSONRequestDeserializer):
def upload(self, request):
try:
request.get_content_type(('application/octet-stream',))
except exception.InvalidContentType as e:
raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg)
if self.is_valid_encoding(request) and self.is_valid_method(request):
request.is_body_readable = True
image_size = request.content_length or None
return {'size': image_size, 'data': request.body_file}
def stage(self, request):
if "glance-direct" not in CONF.enabled_import_methods:
msg = _("'glance-direct' method is not available at this site.")
raise webob.exc.HTTPNotFound(explanation=msg)
try:
request.get_content_type(('application/octet-stream',))
except exception.InvalidContentType as e:
raise webob.exc.HTTPUnsupportedMediaType(explanation=e.msg)
if self.is_valid_encoding(request) and self.is_valid_method(request):
request.is_body_readable = True
image_size = request.content_length or None
return {'size': image_size, 'data': request.body_file}
class ResponseSerializer(wsgi.JSONResponseSerializer):
def download(self, response, image):
offset, chunk_size = 0, None
# NOTE(dharinic): In case of a malformed range header,
# glance/common/wsgi.py will raise HTTPRequestRangeNotSatisfiable
# (setting status_code to 416)
range_val = response.request.get_range_from_request(image.size)
if range_val:
if isinstance(range_val, webob.byterange.Range):
response_end = image.size - 1
# NOTE(dharinic): webob parsing is zero-indexed.
# i.e.,to download first 5 bytes of a 10 byte image,
# request should be "bytes=0-4" and the response would be
# "bytes 0-4/10".
# Range if validated, will never have 'start' object as None.
if range_val.start >= 0:
offset = range_val.start
else:
# NOTE(dharinic): Negative start values needs to be
# processed to allow suffix-length for Range request
# like "bytes=-2" as per rfc7233.
if abs(range_val.start) < image.size:
offset = image.size + range_val.start
if range_val.end is not None and range_val.end < image.size:
chunk_size = range_val.end - offset
response_end = range_val.end - 1
else:
chunk_size = image.size - offset
# NOTE(dharinic): For backward compatibility reasons, we maintain
# support for 'Content-Range' in requests even though it's not
# correct to use it in requests.
elif isinstance(range_val, webob.byterange.ContentRange):
response_end = range_val.stop - 1
# NOTE(flaper87): if not present, both, start
# and stop, will be None.
offset = range_val.start
chunk_size = range_val.stop - offset
response.status_int = 206
response.headers['Content-Type'] = 'application/octet-stream'
try:
# NOTE(markwash): filesystem store (and maybe others?) cause a
# problem with the caching middleware if they are not wrapped in
# an iterator very strange
response.app_iter = iter(image.get_data(offset=offset,
chunk_size=chunk_size))
# NOTE(dharinic): In case of a full image download, when
# chunk_size was none, reset it to image.size to set the
# response header's Content-Length.
if chunk_size is not None:
response.headers['Content-Range'] = 'bytes %s-%s/%s'\
% (offset,
response_end,
image.size)
else:
chunk_size = image.size
except glance_store.NotFound as e:
raise webob.exc.HTTPNoContent(explanation=e.msg)
except glance_store.RemoteServiceUnavailable as e:
raise webob.exc.HTTPServiceUnavailable(explanation=e.msg)
except (glance_store.StoreGetNotSupported,
glance_store.StoreRandomGetNotSupported) as e:
raise webob.exc.HTTPBadRequest(explanation=e.msg)
except exception.Forbidden as e:
LOG.debug("User not permitted to download image '%s'", image)
raise webob.exc.HTTPForbidden(explanation=e.msg)
# NOTE(saschpe): "response.app_iter = ..." currently resets Content-MD5
# (https://github.com/Pylons/webob/issues/86), so it should be set
# afterwards for the time being.
if image.checksum:
response.headers['Content-MD5'] = image.checksum
# NOTE(markwash): "response.app_iter = ..." also erroneously resets the
# content-length
response.headers['Content-Length'] = six.text_type(chunk_size)
def upload(self, response, result):
response.status_int = 204
def stage(self, response, result):
response.status_int = 204
def create_resource():
"""Image data resource factory method"""
deserializer = RequestDeserializer()
serializer = ResponseSerializer()
controller = ImageDataController()
return wsgi.Resource(controller, deserializer, serializer)
| 43.354086 | 79 | 0.562601 |
9668a75dddb231e172724d0723819151cf1eb710 | 2,169 | py | Python | sunnysouth/marketplace/migrations/0003_auto_20210922_0754.py | EdwinBaeza05/django-genricsl-app | a8759d609957e80883cca79f0694d494364775a4 | [
"MIT"
] | null | null | null | sunnysouth/marketplace/migrations/0003_auto_20210922_0754.py | EdwinBaeza05/django-genricsl-app | a8759d609957e80883cca79f0694d494364775a4 | [
"MIT"
] | null | null | null | sunnysouth/marketplace/migrations/0003_auto_20210922_0754.py | EdwinBaeza05/django-genricsl-app | a8759d609957e80883cca79f0694d494364775a4 | [
"MIT"
] | null | null | null | # Generated by Django 3.1 on 2021-09-22 07:54
from django.db import migrations, models
import django.db.models.deletion
import sunnysouth.lib.models
import sunnysouth.marketplace.models.assets
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('marketplace', '0002_profile_picture'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='picture',
),
migrations.AlterField(
model_name='address',
name='addressable_content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype'),
),
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(default=sunnysouth.lib.models.generate_uuid, max_length=300, unique=True)),
('created', models.DateTimeField(auto_now_add=True, help_text='Date time on which the object was created.', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Date time on which the object was last modified.', verbose_name='modified at')),
('name', models.CharField(max_length=300)),
('description', models.CharField(max_length=300)),
('type', models.CharField(max_length=300)),
('is_active', models.BooleanField(default=True, verbose_name='active')),
('image', models.FileField(null=True, upload_to=sunnysouth.marketplace.models.assets.resolve_asset_directory_path)),
('attachable_object_id', models.IntegerField()),
('attachable_content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.contenttype')),
],
options={
'ordering': ['-created', '-modified'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
| 45.1875 | 156 | 0.623329 |
b705aebee84e2fd3b89730029d8c40a871a9c22f | 1,251 | py | Python | hse2/__init__.py | smoyergh/hse-python | b56a5477e69d890591f80e94554a0c92c06ae4e6 | [
"Apache-2.0"
] | null | null | null | hse2/__init__.py | smoyergh/hse-python | b56a5477e69d890591f80e94554a0c92c06ae4e6 | [
"Apache-2.0"
] | 6 | 2021-11-10T20:48:07.000Z | 2021-12-06T16:48:59.000Z | hse2/__init__.py | smoyergh/hse-python | b56a5477e69d890591f80e94554a0c92c06ae4e6 | [
"Apache-2.0"
] | 1 | 2021-08-24T00:26:35.000Z | 2021-08-24T00:26:35.000Z | # SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2020 Micron Technology, Inc. All rights reserved.
"""
The HSE library is generally described in other places. The documentation here is
geared towards describing the structure of the HSE API and the specifics of each
entry point's operation.
Terminology:
KVS - Key-value store, containing zero or more key-value (KV)
pairs
KVDB - Key-value database, comprised of one or more KVSs and
defining a transaction domain
key - A byte string used to uniquely identify values for
storage, retrieval, and deletion in a KVS
multi-segment key - A key that is logically divided into N segments (N >= 2),
arranged to group related KV pairs when keys are sorted
lexicographically
key prefix - For multi-segment keys, the first K segments (1 <= K < N)
that group related KV pairs when keys are sorted lexi-
cographically
key prefix length - For multi-segment keys, the length of a key prefix (bytes)
unsegmented key - A key that is not logically divided into segments
"""
__all__ = ["hse", "limits", "version"]
| 35.742857 | 81 | 0.646683 |
1743614a178a9bb483503e78afb3fa7cc2478f0d | 147 | py | Python | src/bee/log/__init__.py | awenhaowenchao/bee | cac7522f8994aa3067c6e0a1bb3613de5c577129 | [
"MIT"
] | 4 | 2019-11-12T05:01:42.000Z | 2022-02-23T01:52:11.000Z | src/bee/log/__init__.py | awenhaowenchao/bee | cac7522f8994aa3067c6e0a1bb3613de5c577129 | [
"MIT"
] | 6 | 2021-03-19T08:13:39.000Z | 2022-03-02T15:00:19.000Z | src/bee/log/__init__.py | awenhaowenchao/bee | cac7522f8994aa3067c6e0a1bb3613de5c577129 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'wenchao.hao'
"""
log package.
目前可以使用python的logging来替代。
to be continued...
"""
| 14.7 | 28 | 0.619048 |
35dcfe624306896d15a993f5e703adb35549ebd3 | 777 | py | Python | python/setup.py | Billcountry/eson-python | 5a09231357eeb01ba1f62dd04fec85f395b0c2ab | [
"MIT"
] | null | null | null | python/setup.py | Billcountry/eson-python | 5a09231357eeb01ba1f62dd04fec85f395b0c2ab | [
"MIT"
] | 7 | 2020-05-10T05:20:28.000Z | 2022-03-26T16:28:08.000Z | python/setup.py | Billcountry/eson | 5a09231357eeb01ba1f62dd04fec85f395b0c2ab | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as readme:
long_description = readme.read()
with open("RELEASE", "r") as release:
version = release.read().strip()
setuptools.setup(
name="eson",
version=version,
author="Billcountry",
author_email="me@billcountry.tech",
description="Extendable JSON to support different formats of data across languages. By default supports date and datetime objects",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Billcountry/eson",
packages=['eson', 'eson.extensions'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
) | 32.375 | 135 | 0.684685 |
7eaaabcbf9b18ae697bd47590e4b4cbe5735b8f1 | 8,360 | py | Python | ferrit/rest.py | gryf/ferrit | e65a5d28a391cd3dfa8f89afe2c7d718dc2c09ac | [
"CC-BY-2.0"
] | 3 | 2019-10-29T21:38:10.000Z | 2020-04-19T10:25:22.000Z | ferrit/rest.py | gryf/ferrit | e65a5d28a391cd3dfa8f89afe2c7d718dc2c09ac | [
"CC-BY-2.0"
] | null | null | null | ferrit/rest.py | gryf/ferrit | e65a5d28a391cd3dfa8f89afe2c7d718dc2c09ac | [
"CC-BY-2.0"
] | null | null | null | import json
import logging
import os
import sys
import time
import bottle
# This global variable meant to be set in module, which imports this one
FIFO = 'ferrit.fifo'
LOG_PATH = './'
LOG = logging.getLogger('bottle')
LOG.setLevel(logging.DEBUG)
handler = logging.FileHandler(os.path.join(LOG_PATH, 'ferrit-http.log'))
handler.setFormatter(logging.Formatter('%(message)s'))
LOG.addHandler(handler)
JDOE = {"email": "j.doe@nonexistent.com",
"name": "John Doe",
"username": "jdoe"}
PATCHSET_CREATED = {"change": {"branch": "master",
"commitMessage": "commit msg",
"id": "I1",
"number": "1",
"owner": JDOE,
"project": "example",
"status": "NEW",
"subject": "create new patch",
"url": "http://localhost:8181/1"},
"changeKey": {"id": "I1"},
"eventCreatedOn": int(time.time()),
"patchSet": {"author": JDOE,
"createdOn": int(time.time()) - 1000,
"isDraft": False,
"kind": "REWORK",
"number": "1",
"parents": ["2"],
"ref": "refs/changes/77/1/1",
"revision": "e3c8ac50",
"sizeDeletions": -15,
"sizeInsertions": 29,
"uploader": JDOE},
"project": "example",
"refName": "refs/heads/master",
"type": "patchset-created",
"uploader": JDOE}
RECHECK = {"approvals": [{"description": "Verified",
"type": "Verified",
"value": "0"},
{"description": "Code-Review",
"type": "Code-Review",
"value": "0"},
{"description": "Workflow",
"type": "Workflow",
"value": "0"}],
"author": JDOE,
"change": {"branch": "master",
"commitMessage": "commit msg",
"id": "I1",
"number": "690077",
"owner": JDOE,
"project": "example",
"status": "NEW",
"subject": "create new patch",
"topic": "test",
"url": "http://localhost:8181/1"},
"changeKey": {"id": "I1"},
"comment": "Patch Set 1:\n\nrecheck",
"eventCreatedOn": int(time.time()),
"patchSet": {"author": JDOE,
"createdOn": int(time.time()) - 1000,
"isDraft": False,
"kind": "NO_CODE_CHANGE",
"number": "1",
"parents": ["2"],
"ref": "refs/changes/77/1/1",
"revision": "e3c8ac50",
"sizeDeletions": -15,
"sizeInsertions": 29,
"uploader": JDOE},
"project": "example",
"refName": "refs/heads/master",
"type": "comment-added"}
MERGED = {"change": {"branch": "master",
"commitMessage": "commit msg",
"id": "I1",
"number": "1",
"owner": JDOE,
"project": "example",
"status": "MERGED",
"subject": "create new patch",
"topic": "test",
"url": "http://localhost:8181/3"},
"changeKey": {"id": "I1"},
"eventCreatedOn": int(time.time()),
"newRev": "0ce5beac",
"patchSet": {
"author": JDOE,
"createdOn": int(time.time()) - 1000,
"isDraft": False,
"kind": "REWORK",
"number": "3",
"parents": ["1"],
"ref": "refs/changes/77/1/1",
"revision": "e3c8ac50",
"sizeDeletions": -8,
"sizeInsertions": 83,
"uploader": JDOE},
"project": "example",
"refName": "refs/heads/master",
"submitter": {"name": "CI",
"username": "ci"},
"type": "change-merged"}
class App(bottle.Bottle):
def __init__(self):
super(App, self).__init__()
self.route('/Documentation/<whatever>', callback=self._documentation)
self.route('/plugins/events-log/', callback=self._events_log)
self.route('/a/plugins/events-log/events/', callback=self._events)
self.route('/a/projects/', callback=self._projects)
self.post('/a/changes/<project>~<branch>~<id>/revisions/<commit_id>'
'/review', callback=self._changes)
self.post('/make/event', callback=self._mk_event)
def _mk_event(self):
events = {PATCHSET_CREATED['type']: PATCHSET_CREATED,
RECHECK['type']: RECHECK,
MERGED['type']: MERGED}
if bottle.request.forms.get('type') not in events.keys():
return
data = dict(events[bottle.request.forms.get('type')])
if 'project' in bottle.request.forms:
data['change']['project'] = bottle.request.forms['project']
data['project'] = bottle.request.forms['project']
if 'branch' in bottle.request.forms:
data['change']['branch'] = bottle.request.forms['branch']
with open(FIFO, 'w') as fobj:
fobj.write(json.dumps(data))
fobj.write('\n')
def _documentation(self, whatever, params=None):
return
def _events_log(self, params=None):
return
def _events(self):
return
def _projects(params=None):
"""
Possible params (accessible via bottle.request.params) is 'd'
"""
return {"All-Projects": {"id": "All-Projects",
"description": "all projects",
"state": "ACTIVE",
"web_links": [{"name": "browse",
"url":
"/plugins/gitiles/All-"
"Projects",
"target": "_blank"}]},
"All-Users": {"id": "All-Users",
"description": "users",
"state": "ACTIVE",
"web_links": [{"name": "browse",
"url": "/plugins/gitiles/"
"All-Users",
"target": "_blank"}]},
"example": {"id": "example",
"description": "example ptoject",
"state": "ACTIVE",
"web_links": [{"name": "browse",
"url": "/plugins/gitiles/example",
"target": "_blank"}]}}
def _changes(self, project, branch, id, commit_id):
# We are looking for result of the job, not the notification about
# starting the job.
if bottle.request.json.get('message', '').startswith('Build Started'):
return
LOG.info(json.dumps(bottle.request.json))
def main():
app = App()
app.run(port=8181, host='localhost', debug=False, quiet=True)
if __name__ == "__main__":
# development version, meant to be run as stand alone module, like
# python -m ferrit.rest
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
LOG.addHandler(handler)
app = App()
app.run(port=8181, host='localhost', debug=True)
| 40.386473 | 78 | 0.419019 |
5db4325121b9c3a9ff4363541a260307742e7a63 | 120 | py | Python | capitulo-02/ex03.py | bryan-lima/exercicios-livro-introd-prog-python-3ed | b6bc26dced9728510865704a80cb0d97f81f756b | [
"MIT"
] | 3 | 2021-11-09T17:54:10.000Z | 2022-01-30T22:32:25.000Z | capitulo-02/ex03.py | bryan-lima/exercicios-livro-introd-prog-python-3ed | b6bc26dced9728510865704a80cb0d97f81f756b | [
"MIT"
] | null | null | null | capitulo-02/ex03.py | bryan-lima/exercicios-livro-introd-prog-python-3ed | b6bc26dced9728510865704a80cb0d97f81f756b | [
"MIT"
] | null | null | null | # Faça um programa que exiba seu nome na tela
# name = str(input('Digite seu nome: '))
name = 'Bryan Lima'
print(name)
| 20 | 45 | 0.683333 |
4117deb1d3fcd0525e8d2756e4551a89c6aaed53 | 26,469 | py | Python | schema_salad/schema.py | drjrm3/schema_salad | b6a752fccf79883e96e5d47298b5987fed8c5e8c | [
"Apache-2.0"
] | null | null | null | schema_salad/schema.py | drjrm3/schema_salad | b6a752fccf79883e96e5d47298b5987fed8c5e8c | [
"Apache-2.0"
] | null | null | null | schema_salad/schema.py | drjrm3/schema_salad | b6a752fccf79883e96e5d47298b5987fed8c5e8c | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import copy
import hashlib
import logging
from typing import (IO, Any, AnyStr, Dict, List, MutableMapping,
MutableSequence, Set, Tuple, TypeVar, Union, cast)
import avro
import avro.schema # pylint: disable=no-name-in-module,import-error
from avro.schema import Names # pylint: disable=no-name-in-module,import-error
from avro.schema import SchemaParseException
from pkg_resources import resource_stream
from ruamel import yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from six import iteritems, string_types
from six.moves import urllib
from typing_extensions import Text # pylint: disable=unused-import
# move to a regular typing import when Python 3.3-3.6 is no longer supported
from schema_salad.utils import (add_dictlist, aslist, convert_to_dict, flatten,
json_dumps)
from . import jsonld_context, ref_resolver, validate
from .ref_resolver import Loader
from .sourceline import (SourceLine, add_lc_filename, bullets, relname,
strip_dup_lineno)
_logger = logging.getLogger("salad")
salad_files = ('metaschema.yml',
'metaschema_base.yml',
'salad.md',
'field_name.yml',
'import_include.md',
'link_res.yml',
'ident_res.yml',
'vocab_res.yml',
'vocab_res.yml',
'field_name_schema.yml',
'field_name_src.yml',
'field_name_proc.yml',
'ident_res_schema.yml',
'ident_res_src.yml',
'ident_res_proc.yml',
'link_res_schema.yml',
'link_res_src.yml',
'link_res_proc.yml',
'vocab_res_schema.yml',
'vocab_res_src.yml',
'vocab_res_proc.yml',
'map_res.yml',
'map_res_schema.yml',
'map_res_src.yml',
'map_res_proc.yml',
'typedsl_res.yml',
'typedsl_res_schema.yml',
'typedsl_res_src.yml',
'typedsl_res_proc.yml')
def get_metaschema():
# type: () -> Tuple[Names, List[Dict[Text, Any]], Loader]
loader = ref_resolver.Loader({
"Any": "https://w3id.org/cwl/salad#Any",
"ArraySchema": "https://w3id.org/cwl/salad#ArraySchema",
"Array_symbol": "https://w3id.org/cwl/salad#ArraySchema/type/Array_symbol",
"DocType": "https://w3id.org/cwl/salad#DocType",
"Documentation": "https://w3id.org/cwl/salad#Documentation",
"Documentation_symbol": "https://w3id.org/cwl/salad#Documentation/type/Documentation_symbol",
"Documented": "https://w3id.org/cwl/salad#Documented",
"EnumSchema": "https://w3id.org/cwl/salad#EnumSchema",
"Enum_symbol": "https://w3id.org/cwl/salad#EnumSchema/type/Enum_symbol",
"JsonldPredicate": "https://w3id.org/cwl/salad#JsonldPredicate",
"NamedType": "https://w3id.org/cwl/salad#NamedType",
"PrimitiveType": "https://w3id.org/cwl/salad#PrimitiveType",
"RecordField": "https://w3id.org/cwl/salad#RecordField",
"RecordSchema": "https://w3id.org/cwl/salad#RecordSchema",
"Record_symbol": "https://w3id.org/cwl/salad#RecordSchema/type/Record_symbol",
"SaladEnumSchema": "https://w3id.org/cwl/salad#SaladEnumSchema",
"SaladRecordField": "https://w3id.org/cwl/salad#SaladRecordField",
"SaladRecordSchema": "https://w3id.org/cwl/salad#SaladRecordSchema",
"SchemaDefinedType": "https://w3id.org/cwl/salad#SchemaDefinedType",
"SpecializeDef": "https://w3id.org/cwl/salad#SpecializeDef",
"_container": "https://w3id.org/cwl/salad#JsonldPredicate/_container",
"_id": {
"@id": "https://w3id.org/cwl/salad#_id",
"@type": "@id",
"identity": True
},
"_type": "https://w3id.org/cwl/salad#JsonldPredicate/_type",
"abstract": "https://w3id.org/cwl/salad#SaladRecordSchema/abstract",
"array": "https://w3id.org/cwl/salad#array",
"boolean": "http://www.w3.org/2001/XMLSchema#boolean",
"dct": "http://purl.org/dc/terms/",
"default": {
"@id": "https://w3id.org/cwl/salad#default",
"noLinkCheck": True
},
"doc": "rdfs:comment",
"docAfter": {
"@id": "https://w3id.org/cwl/salad#docAfter",
"@type": "@id"
},
"docChild": {
"@id": "https://w3id.org/cwl/salad#docChild",
"@type": "@id"
},
"docParent": {
"@id": "https://w3id.org/cwl/salad#docParent",
"@type": "@id"
},
"documentRoot": "https://w3id.org/cwl/salad#SchemaDefinedType/documentRoot",
"documentation": "https://w3id.org/cwl/salad#documentation",
"double": "http://www.w3.org/2001/XMLSchema#double",
"enum": "https://w3id.org/cwl/salad#enum",
"extends": {
"@id": "https://w3id.org/cwl/salad#extends",
"@type": "@id",
"refScope": 1
},
"fields": {
"@id": "https://w3id.org/cwl/salad#fields",
"mapPredicate": "type",
"mapSubject": "name"
},
"float": "http://www.w3.org/2001/XMLSchema#float",
"identity": "https://w3id.org/cwl/salad#JsonldPredicate/identity",
"inVocab": "https://w3id.org/cwl/salad#NamedType/inVocab",
"int": "http://www.w3.org/2001/XMLSchema#int",
"items": {
"@id": "https://w3id.org/cwl/salad#items",
"@type": "@vocab",
"refScope": 2
},
"jsonldPredicate": "sld:jsonldPredicate",
"long": "http://www.w3.org/2001/XMLSchema#long",
"mapPredicate": "https://w3id.org/cwl/salad#JsonldPredicate/mapPredicate",
"mapSubject": "https://w3id.org/cwl/salad#JsonldPredicate/mapSubject",
"name": "@id",
"noLinkCheck": "https://w3id.org/cwl/salad#JsonldPredicate/noLinkCheck",
"null": "https://w3id.org/cwl/salad#null",
"rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#",
"rdfs": "http://www.w3.org/2000/01/rdf-schema#",
"record": "https://w3id.org/cwl/salad#record",
"refScope": "https://w3id.org/cwl/salad#JsonldPredicate/refScope",
"sld": "https://w3id.org/cwl/salad#",
"specialize": {
"@id": "https://w3id.org/cwl/salad#specialize",
"mapPredicate": "specializeTo",
"mapSubject": "specializeFrom"
},
"specializeFrom": {
"@id": "https://w3id.org/cwl/salad#specializeFrom",
"@type": "@id",
"refScope": 1
},
"specializeTo": {
"@id": "https://w3id.org/cwl/salad#specializeTo",
"@type": "@id",
"refScope": 1
},
"string": "http://www.w3.org/2001/XMLSchema#string",
"subscope": "https://w3id.org/cwl/salad#JsonldPredicate/subscope",
"symbols": {
"@id": "https://w3id.org/cwl/salad#symbols",
"@type": "@id",
"identity": True
},
"type": {
"@id": "https://w3id.org/cwl/salad#type",
"@type": "@vocab",
"refScope": 2,
"typeDSL": True
},
"typeDSL": "https://w3id.org/cwl/salad#JsonldPredicate/typeDSL",
"xsd": "http://www.w3.org/2001/XMLSchema#"
})
for f in salad_files:
rs = resource_stream(__name__, 'metaschema/' + f)
loader.cache["https://w3id.org/cwl/" + f] = rs.read()
rs.close()
rs = resource_stream(__name__, 'metaschema/metaschema.yml')
loader.cache["https://w3id.org/cwl/salad"] = rs.read()
rs.close()
j = yaml.round_trip_load(loader.cache["https://w3id.org/cwl/salad"])
add_lc_filename(j, "metaschema.yml")
j, _ = loader.resolve_all(j, "https://w3id.org/cwl/salad#")
(sch_names, sch_obj) = make_avro_schema(j, loader)
if isinstance(sch_names, Exception):
_logger.error("Metaschema error, avro was:\n%s",
json_dumps(sch_obj, indent=4))
raise sch_names
validate_doc(sch_names, j, loader, strict=True)
return (sch_names, j, loader)
def add_namespaces(metadata, namespaces):
# type: (MutableMapping[Text, Any], MutableMapping[Text, Text]) -> None
for k,v in metadata.items():
if k not in namespaces:
namespaces[k] = v
elif namespaces[k] != v:
raise validate.ValidationException("Namespace prefix '%s' has conflicting definitions '%s' and '%s'",
namespaces[k], v)
def collect_namespaces(metadata): # type: (MutableMapping[Text, Any]) -> MutableMapping[Text, Text]
namespaces = {} # type: Dict[Text, Text]
if "$import_metadata" in metadata:
for k,v in metadata["$import_metadata"].items():
add_namespaces(collect_namespaces(v), namespaces)
if "$namespaces" in metadata:
add_namespaces(metadata["$namespaces"], namespaces)
return namespaces
def load_schema(schema_ref, # type: Union[CommentedMap, CommentedSeq, Text]
cache=None # type: Dict
):
# type: (...) -> Tuple[Loader, Union[Names, SchemaParseException], Dict[Text, Any], Loader]
"""Load a schema that can be used to validate documents using load_and_validate.
return document_loader, avsc_names, schema_metadata, metaschema_loader"""
metaschema_names, metaschema_doc, metaschema_loader = get_metaschema()
if cache is not None:
metaschema_loader.cache.update(cache)
schema_doc, schema_metadata = metaschema_loader.resolve_ref(schema_ref, "")
if not isinstance(schema_doc, MutableSequence):
raise ValueError("Schema reference must resolve to a list.")
validate_doc(metaschema_names, schema_doc, metaschema_loader, True)
metactx = schema_metadata.get("@context", {})
metactx.update(collect_namespaces(schema_metadata))
(schema_ctx, rdfs) = jsonld_context.salad_to_jsonld_context(
schema_doc, metactx)
# Create the loader that will be used to load the target document.
document_loader = Loader(schema_ctx, cache=cache)
# Make the Avro validation that will be used to validate the target
# document
(avsc_names, avsc_obj) = make_avro_schema(schema_doc, document_loader)
return document_loader, avsc_names, schema_metadata, metaschema_loader
def load_and_validate(document_loader, # type: Loader
avsc_names, # type: Names
document, # type: Union[CommentedMap, Text]
strict, # type: bool
strict_foreign_properties=False # type: bool
):
# type: (...) -> Tuple[Any, Dict[Text, Any]]
"""Load a document and validate it with the provided schema.
return data, metadata
"""
try:
if isinstance(document, CommentedMap):
source = document["id"]
data, metadata = document_loader.resolve_all(
document, document["id"], checklinks=True,
strict_foreign_properties=strict_foreign_properties)
else:
source = document
data, metadata = document_loader.resolve_ref(
document, checklinks=True,
strict_foreign_properties=strict_foreign_properties)
validate_doc(avsc_names, data, document_loader, strict, source=source,
strict_foreign_properties=strict_foreign_properties)
return data, metadata
except validate.ValidationException as v:
raise validate.ValidationException(strip_dup_lineno(str(v)))
def validate_doc(schema_names, # type: Names
doc, # type: Union[Dict[Text, Any], List[Dict[Text, Any]], Text, None]
loader, # type: Loader
strict, # type: bool
source=None,
strict_foreign_properties=False # type: bool
):
# type: (...) -> None
has_root = False
for r in schema_names.names.values():
if ((hasattr(r, 'get_prop') and r.get_prop(u"documentRoot")) or (
u"documentRoot" in r.props)):
has_root = True
break
if not has_root:
raise validate.ValidationException(
"No document roots defined in the schema")
if isinstance(doc, MutableSequence):
validate_doc = doc
elif isinstance(doc, CommentedMap):
validate_doc = CommentedSeq([doc])
validate_doc.lc.add_kv_line_col(0, [doc.lc.line, doc.lc.col])
validate_doc.lc.filename = doc.lc.filename
else:
raise validate.ValidationException("Document must be dict or list")
roots = []
for r in schema_names.names.values():
if ((hasattr(r, "get_prop") and r.get_prop(u"documentRoot")) or (
r.props.get(u"documentRoot"))):
roots.append(r)
anyerrors = []
for pos, item in enumerate(validate_doc):
sl = SourceLine(validate_doc, pos, Text)
success = False
for r in roots:
success = validate.validate_ex(
r, item, loader.identifiers, strict,
foreign_properties=loader.foreign_properties,
raise_ex=False, skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties)
if success:
break
if not success:
errors = [] # type: List[Text]
for r in roots:
if hasattr(r, "get_prop"):
name = r.get_prop(u"name")
elif hasattr(r, "name"):
name = r.name
try:
validate.validate_ex(
r, item, loader.identifiers, strict,
foreign_properties=loader.foreign_properties,
raise_ex=True, skip_foreign_properties=loader.skip_schemas,
strict_foreign_properties=strict_foreign_properties)
except validate.ClassValidationException as e:
errors = [sl.makeError(u"tried `%s` but\n%s" % (
name, validate.indent(str(e), nolead=False)))]
break
except validate.ValidationException as e:
errors.append(sl.makeError(u"tried `%s` but\n%s" % (
name, validate.indent(str(e), nolead=False))))
objerr = sl.makeError(u"Invalid")
for ident in loader.identifiers:
if ident in item:
objerr = sl.makeError(
u"Object `%s` is not valid because"
% (relname(item[ident])))
break
anyerrors.append(u"%s\n%s" %
(objerr, validate.indent(bullets(errors, "- "))))
if len(anyerrors) > 0:
raise validate.ValidationException(
strip_dup_lineno(bullets(anyerrors, "* ")))
def get_anon_name(rec):
# type: (MutableMapping[Text, Any]) -> Text
if "name" in rec:
return rec["name"]
anon_name = ""
if rec['type'] in ('enum', 'https://w3id.org/cwl/salad#enum'):
for sym in rec["symbols"]:
anon_name += sym
return "enum_"+hashlib.sha1(anon_name.encode("UTF-8")).hexdigest()
elif rec['type'] in ('record', 'https://w3id.org/cwl/salad#record'):
for f in rec["fields"]:
anon_name += f["name"]
return "record_"+hashlib.sha1(anon_name.encode("UTF-8")).hexdigest()
elif rec['type'] in ('array', 'https://w3id.org/cwl/salad#array'):
return ""
else:
raise validate.ValidationException("Expected enum or record, was %s" % rec['type'])
def replace_type(items, spec, loader, found, find_embeds=True, deepen=True):
# type: (Any, Dict[Text, Any], Loader, Set[Text], bool, bool) -> Any
""" Go through and replace types in the 'spec' mapping"""
if isinstance(items, MutableMapping):
# recursively check these fields for types to replace
if items.get("type") in ("record", "enum") and items.get("name"):
if items["name"] in found:
return items["name"]
else:
found.add(items["name"])
if not deepen:
return items
items = copy.copy(items)
if not items.get("name"):
items["name"] = get_anon_name(items)
for n in ("type", "items", "fields"):
if n in items:
items[n] = replace_type(items[n], spec, loader, found,
find_embeds=find_embeds, deepen=find_embeds)
if isinstance(items[n], MutableSequence):
items[n] = flatten(items[n])
return items
elif isinstance(items, MutableSequence):
# recursively transform list
return [replace_type(i, spec, loader, found, find_embeds=find_embeds, deepen=deepen) for i in items]
elif isinstance(items, string_types):
# found a string which is a symbol corresponding to a type.
replace_with = None
if items in loader.vocab:
# If it's a vocabulary term, first expand it to its fully qualified
# URI
items = loader.vocab[items]
if items in spec:
# Look up in specialization map
replace_with = spec[items]
if replace_with:
return replace_type(replace_with, spec, loader, found, find_embeds=find_embeds)
else:
found.add(items)
return items
def avro_name(url): # type: (AnyStr) -> AnyStr
doc_url, frg = urllib.parse.urldefrag(url)
if frg != '':
if '/' in frg:
return frg[frg.rindex('/') + 1:]
else:
return frg
return url
Avro = TypeVar('Avro', Dict[Text, Any], List[Any], Text)
def make_valid_avro(items, # type: Avro
alltypes, # type: Dict[Text, Dict[Text, Any]]
found, # type: Set[Text]
union=False # type: bool
):
# type: (...) -> Union[Avro, Dict, Text]
if isinstance(items, MutableMapping):
items = copy.copy(items)
if items.get("name") and items.get("inVocab", True):
items["name"] = avro_name(items["name"])
if "type" in items and items["type"] in ("https://w3id.org/cwl/salad#record", "https://w3id.org/cwl/salad#enum", "record", "enum"):
if (hasattr(items, "get") and items.get("abstract")) or ("abstract"
in items):
return items
if items["name"] in found:
return cast(Text, items["name"])
else:
found.add(items["name"])
for n in ("type", "items", "values", "fields"):
if n in items:
items[n] = make_valid_avro(
items[n], alltypes, found, union=True)
if "symbols" in items:
items["symbols"] = [avro_name(sym) for sym in items["symbols"]]
return items
if isinstance(items, MutableSequence):
ret = []
for i in items:
ret.append(make_valid_avro(i, alltypes, found, union=union)) # type: ignore
return ret
if union and isinstance(items, string_types):
if items in alltypes and avro_name(items) not in found:
return cast(Dict, make_valid_avro(alltypes[items], alltypes, found,
union=union))
items = avro_name(items)
return items
def deepcopy_strip(item): # type: (Any) -> Any
"""Make a deep copy of list and dict objects.
Intentionally do not copy attributes. This is to discard CommentedMap and
CommentedSeq metadata which is very expensive with regular copy.deepcopy.
"""
if isinstance(item, MutableMapping):
return {k: deepcopy_strip(v) for k, v in iteritems(item)}
elif isinstance(item, MutableSequence):
return [deepcopy_strip(k) for k in item]
else:
return item
def extend_and_specialize(items, loader):
# type: (List[Dict[Text, Any]], Loader) -> List[Dict[Text, Any]]
"""Apply 'extend' and 'specialize' to fully materialize derived record
types."""
items = deepcopy_strip(items)
types = {t["name"]: t for t in items} # type: Dict[Text, Any]
n = []
for t in items:
if "extends" in t:
spec = {} # type: Dict[Text, Text]
if "specialize" in t:
for sp in aslist(t["specialize"]):
spec[sp["specializeFrom"]] = sp["specializeTo"]
exfields = [] # type: List[Text]
exsym = [] # type: List[Text]
for ex in aslist(t["extends"]):
if ex not in types:
raise Exception("Extends %s in %s refers to invalid base type" % (
t["extends"], t["name"]))
basetype = copy.copy(types[ex])
if t["type"] == "record":
if len(spec) > 0:
basetype["fields"] = replace_type(
basetype.get("fields", []), spec, loader, set())
for f in basetype.get("fields", []):
if "inherited_from" not in f:
f["inherited_from"] = ex
exfields.extend(basetype.get("fields", []))
elif t["type"] == "enum":
exsym.extend(basetype.get("symbols", []))
if t["type"] == "record":
t = copy.copy(t)
exfields.extend(t.get("fields", []))
t["fields"] = exfields
fieldnames = set() # type: Set[Text]
for field in t["fields"]:
if field["name"] in fieldnames:
raise validate.ValidationException(
"Field name %s appears twice in %s" % (field["name"], t["name"]))
else:
fieldnames.add(field["name"])
elif t["type"] == "enum":
t = copy.copy(t)
exsym.extend(t.get("symbols", []))
t["symbol"] = exsym
types[t["name"]] = t
n.append(t)
ex_types = {}
for t in n:
ex_types[t["name"]] = t
extended_by = {} # type: Dict[Text, Text]
for t in n:
if "extends" in t:
for ex in aslist(t["extends"]):
if ex_types[ex].get("abstract"):
add_dictlist(extended_by, ex, ex_types[t["name"]])
add_dictlist(extended_by, avro_name(ex), ex_types[ex])
for t in n:
if t.get("abstract") and t["name"] not in extended_by:
raise validate.ValidationException(
"%s is abstract but missing a concrete subtype" % t["name"])
for t in n:
if "fields" in t:
t["fields"] = replace_type(t["fields"], extended_by, loader, set())
return n
def AvroSchemaFromJSONData(j, names): # type: (Any, avro.schema.Names) -> Any
return avro.schema.make_avsc_object(convert_to_dict(j), names)
def make_avro_schema(i, # type: List[Dict[Text, Any]]
loader # type: Loader
):
# type: (...) -> Tuple[Union[Names, SchemaParseException], MutableSequence[MutableMapping[Text, Any]]]
names = avro.schema.Names()
j = extend_and_specialize(i, loader)
name_dict = {} # type: Dict[Text, Dict[Text, Any]]
for t in j:
name_dict[t["name"]] = t
j2 = make_valid_avro(j, name_dict, set())
j3 = [t for t in j2 if isinstance(t, MutableMapping) and not t.get(
"abstract") and t.get("type") != "documentation"]
try:
AvroSchemaFromJSONData(j3, names)
except avro.schema.SchemaParseException as e:
return (e, j3)
return (names, j3)
def shortname(inputid):
# type: (Text) -> Text
d = urllib.parse.urlparse(inputid)
if d.fragment:
return d.fragment.split(u"/")[-1]
else:
return d.path.split(u"/")[-1]
def print_inheritance(doc, stream):
# type: (List[Dict[Text, Any]], IO) -> None
stream.write("digraph {\n")
for d in doc:
if d["type"] == "record":
label = shortname(d["name"])
if len(d.get("fields", [])) > 0:
label += "\\n* %s\\l" % ("\\l* ".join(shortname(f["name"]) for f in d.get("fields", [])))
stream.write("\"%s\" [shape=%s label=\"%s\"];\n" % (shortname(d["name"]), "ellipse" if d.get("abstract") else "box", label))
if "extends" in d:
for e in aslist(d["extends"]):
stream.write("\"%s\" -> \"%s\";\n" % (shortname(e), shortname(d["name"])))
stream.write("}\n")
def print_fieldrefs(doc, loader, stream):
# type: (List[Dict[Text, Any]], Loader, IO) -> None
j = extend_and_specialize(doc, loader)
primitives = set(("http://www.w3.org/2001/XMLSchema#string",
"http://www.w3.org/2001/XMLSchema#boolean",
"http://www.w3.org/2001/XMLSchema#int",
"http://www.w3.org/2001/XMLSchema#long",
"https://w3id.org/cwl/salad#null",
"https://w3id.org/cwl/salad#enum",
"https://w3id.org/cwl/salad#array",
"https://w3id.org/cwl/salad#record",
"https://w3id.org/cwl/salad#Any"
))
stream.write("digraph {\n")
for d in j:
if d.get("abstract"):
continue
if d["type"] == "record":
label = shortname(d["name"])
for f in d.get("fields", []):
found = set() # type: Set[Text]
replace_type(f["type"], {}, loader, found, find_embeds=False)
for each_type in found:
if each_type not in primitives:
stream.write("\"%s\" -> \"%s\" [label=\"%s\"];\n" % (label, shortname(each_type), shortname(f["name"])))
stream.write("}\n")
| 40.349085 | 139 | 0.557142 |
21dccd0598383c8565b9afa9c6de240ea6ef7638 | 686 | py | Python | env/Lib/site-packages/plotly/validators/cone/_legendgrouptitle.py | andresgreen-byte/Laboratorio-1--Inversion-de-Capital | 8a4707301d19c3826c31026c4077930bcd6a8182 | [
"MIT"
] | 11,750 | 2015-10-12T07:03:39.000Z | 2022-03-31T20:43:15.000Z | venv/Lib/site-packages/plotly/validators/cone/_legendgrouptitle.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,951 | 2015-10-12T00:41:25.000Z | 2022-03-31T22:19:26.000Z | venv/Lib/site-packages/plotly/validators/cone/_legendgrouptitle.py | wakisalvador/constructed-misdirection | 74779e9ec640a11bc08d5d1967c85ac4fa44ea5e | [
"Unlicense"
] | 2,623 | 2015-10-15T14:40:27.000Z | 2022-03-28T16:05:50.000Z | import _plotly_utils.basevalidators
class LegendgrouptitleValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="legendgrouptitle", parent_name="cone", **kwargs):
super(LegendgrouptitleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Legendgrouptitle"),
data_docs=kwargs.pop(
"data_docs",
"""
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""",
),
**kwargs
)
| 32.666667 | 85 | 0.593294 |
de9f135586bd6268088ee2af0a6be149d2a21104 | 2,165 | py | Python | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/response/AlipayEcoMycarParkingOrderPayResponse.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | 32 | 2018-05-24T08:40:15.000Z | 2019-04-04T20:54:55.000Z | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/response/AlipayEcoMycarParkingOrderPayResponse.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | 7 | 2018-05-24T08:42:59.000Z | 2020-09-06T23:18:46.000Z | payment_lib_examples/alipay-sdk-python/virtual_environment/lib/python3.4/site-packages/alipay/aop/api/response/AlipayEcoMycarParkingOrderPayResponse.py | cuhk-mobitec/S3KVetter | 9ae79a242afbe6edae27c17065a88feca2896cf6 | [
"Apache-2.0"
] | 13 | 2018-04-25T11:27:58.000Z | 2021-03-15T12:22:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
class AlipayEcoMycarParkingOrderPayResponse(AlipayResponse):
def __init__(self):
super(AlipayEcoMycarParkingOrderPayResponse, self).__init__()
self._fund_bill_list = None
self._gmt_payment = None
self._out_trade_no = None
self._total_fee = None
self._trade_no = None
self._user_id = None
@property
def fund_bill_list(self):
return self._fund_bill_list
@fund_bill_list.setter
def fund_bill_list(self, value):
self._fund_bill_list = value
@property
def gmt_payment(self):
return self._gmt_payment
@gmt_payment.setter
def gmt_payment(self, value):
self._gmt_payment = value
@property
def out_trade_no(self):
return self._out_trade_no
@out_trade_no.setter
def out_trade_no(self, value):
self._out_trade_no = value
@property
def total_fee(self):
return self._total_fee
@total_fee.setter
def total_fee(self, value):
self._total_fee = value
@property
def trade_no(self):
return self._trade_no
@trade_no.setter
def trade_no(self, value):
self._trade_no = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def parse_response_content(self, response_content):
response = super(AlipayEcoMycarParkingOrderPayResponse, self).parse_response_content(response_content)
if 'fund_bill_list' in response:
self.fund_bill_list = response['fund_bill_list']
if 'gmt_payment' in response:
self.gmt_payment = response['gmt_payment']
if 'out_trade_no' in response:
self.out_trade_no = response['out_trade_no']
if 'total_fee' in response:
self.total_fee = response['total_fee']
if 'trade_no' in response:
self.trade_no = response['trade_no']
if 'user_id' in response:
self.user_id = response['user_id']
| 28.486842 | 110 | 0.658661 |
5d3a57fcf355cb67f68a110e36c87f05637db089 | 2,974 | py | Python | backend/omdb/lista_filmes/apps.py | tiagodomp/tiagoflix | 87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce | [
"MIT"
] | null | null | null | backend/omdb/lista_filmes/apps.py | tiagodomp/tiagoflix | 87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce | [
"MIT"
] | 9 | 2020-06-05T20:17:54.000Z | 2022-02-26T21:54:45.000Z | backend/omdb/lista_filmes/apps.py | tiagodomp/tiagoflix | 87b36450e55d0c9b8de9321b6cb4d44bb9fe31ce | [
"MIT"
] | null | null | null | from datetime import date
import gzip
import requests
import shutil
import os
import json
extension = '.gz'
hoje = date.today()
filmes_ids = 'movie_ids'
# {"adult":false,"id":3924,"original_title":"Blondie","popularity":2.063,"video":false}
series_ids = 'tv_series_ids'
# {"id":2,"original_name":"Clerks: The Animated Series","popularity":7.037}
filmes_url = 'http://files.tmdb.org/p/exports/' + filmes_ids + hoje.strftime('_%m_%d_%Y') + '.json' + extension
series_url = 'http://files.tmdb.org/p/exports/' + series_ids + hoje.strftime('_%m_%d_%Y') + '.json' + extension
def busca(tipo, quantidade):
if tipo == 'filme':
t = filmes_ids
url = filmes_url
elif tipo == 'serie':
t = series_ids
url = series_url
else:
return None
arquivo = t + hoje.strftime('_%m_%d_%Y') #+ '.json'
if os.path.isfile(arquivo):
return ler_arquivo(tipo, quantidade)
else:
dirs = os.listdir('./')
for file in dirs:
print(file.endswith('_2020.json'))
print(file)
exit()
if get_arquivo(tipo):
return ler_arquivo(tipo, quantidade)
def ler_arquivo(tipo, quantidade):
if tipo == 'filme':
t = filmes_ids
url = filmes_url
elif tipo == 'serie':
t = series_ids
url = series_url
else:
return False
arquivo = t + hoje.strftime('_%m_%d_%Y') + '.json'
if os.path.isfile(arquivo):
data = {}
d = {}
with open(arquivo, 'rb') as arq:
for i, item in enumerate(arq.readlines()):
linha = json.loads(item)
if linha['popularity'] >= 100.0: # PONTUAÇÂO DA LISTA DE FILMES
if tipo == 'filme':
d['titulo'] = linha['original_title']
if tipo == 'serie':
d['titulo'] = linha['original_name']
d['pontos'] = linha['popularity']
data[linha['id']] = d
if len(data) >= quantidade:
return data
return {}
def get_arquivo(tipo):
if tipo == 'filme':
t = filmes_ids
url = filmes_url
elif tipo == 'serie':
t = series_ids
url = series_url
else:
return False
#Aplicar esse método dentro de uma queue, chama-lo por um cron no servidor rodando todo dia as 08h00
try:
arq = requests.get(url)
with gzip.open(t + hoje.strftime('_%m_%d_%Y') + '.json' + extension, 'rb') as entrada:
with open(t + hoje.strftime('_%m_%d_%Y') + '.json', 'wb') as saida:
shutil.copyfileobj(entrada, saida) # seria interessante ao invés de manter em texto subir os dados para o REDIS
os.remove(t + hoje.strftime('_%m_%d_%Y') + '.json' + extension)#incluir validação e resultado em LOG -
return True
except:
return False
| 29.156863 | 127 | 0.548083 |
0cd2f7908597e57d8dbc256abb9c3a1e71c48059 | 5,834 | py | Python | configs/development.py | trooster/PowerDNS-Admin | 0cdf4bc560d1fd5a8e084448ea63ad76202602c6 | [
"MIT"
] | null | null | null | configs/development.py | trooster/PowerDNS-Admin | 0cdf4bc560d1fd5a8e084448ea63ad76202602c6 | [
"MIT"
] | null | null | null | configs/development.py | trooster/PowerDNS-Admin | 0cdf4bc560d1fd5a8e084448ea63ad76202602c6 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.abspath(os.path.dirname(__file__)))
### BASIC APP CONFIG
SALT = '$2b$12$yLUMTIfl21FKJQpTkRQXCu'
SECRET_KEY = 'e951e5a1f4b94151b360f47edf596dd2'
BIND_ADDRESS = '0.0.0.0'
PORT = 9191
OFFLINE_MODE = False
### DATABASE CONFIG
SQLA_DB_USER = 'pda'
SQLA_DB_PASSWORD = 'changeme'
SQLA_DB_HOST = '127.0.0.1'
SQLA_DB_NAME = 'pda'
SQLALCHEMY_TRACK_MODIFICATIONS = True
### DATABASE - MySQL
# SQLALCHEMY_DATABASE_URI = 'mysql://' + SQLA_DB_USER + ':' + SQLA_DB_PASSWORD + '@' + SQLA_DB_HOST + '/' + SQLA_DB_NAME
### DATABASE - SQLite
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'pdns.db')
### SMTP config
# MAIL_SERVER = 'localhost'
# MAIL_PORT = 25
# MAIL_DEBUG = False
# MAIL_USE_TLS = False
# MAIL_USE_SSL = False
# MAIL_USERNAME = None
# MAIL_PASSWORD = None
# MAIL_DEFAULT_SENDER = ('PowerDNS-Admin', 'noreply@domain.ltd')
# SAML Authnetication
SAML_ENABLED = False
# SAML_DEBUG = True
# SAML_PATH = os.path.join(os.path.dirname(__file__), 'saml')
# ##Example for ADFS Metadata-URL
# SAML_METADATA_URL = 'https://<hostname>/FederationMetadata/2007-06/FederationMetadata.xml'
# #Cache Lifetime in Seconds
# SAML_METADATA_CACHE_LIFETIME = 1
# # SAML SSO binding format to use
# ## Default: library default (urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect)
# #SAML_IDP_SSO_BINDING = 'urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST'
# ## EntityID of the IdP to use. Only needed if more than one IdP is
# ## in the SAML_METADATA_URL
# ### Default: First (only) IdP in the SAML_METADATA_URL
# ### Example: https://idp.example.edu/idp
# #SAML_IDP_ENTITY_ID = 'https://idp.example.edu/idp'
# ## NameID format to request
# ### Default: The SAML NameID Format in the metadata if present,
# ### otherwise urn:oasis:names:tc:SAML:1.1:nameid-format:unspecified
# ### Example: urn:oid:0.9.2342.19200300.100.1.1
# #SAML_NAMEID_FORMAT = 'urn:oid:0.9.2342.19200300.100.1.1'
# Following parameter defines RequestedAttributes section in SAML metadata
# since certain iDPs require explicit attribute request. If not provided section
# will not be available in metadata.
#
# Possible attributes:
# name (mandatory), nameFormat, isRequired, friendlyName
#
# NOTE: This parameter requires to be entered in valid JSON format as displayed below
# and multiple attributes can given
#
# Following example:
#
# SAML_SP_REQUESTED_ATTRIBUTES = '[ \
# {"name": "urn:oid:0.9.2342.19200300.100.1.3", "nameFormat": "urn:oasis:names:tc:SAML:2.0:attrname-format:uri", "isRequired": true, "friendlyName": "email"}, \
# {"name": "mail", "isRequired": false, "friendlyName": "test-field"} \
# ]'
#
# produces following metadata section:
# <md:AttributeConsumingService index="1">
# <md:RequestedAttribute Name="urn:oid:0.9.2342.19200300.100.1.3" NameFormat="urn:oasis:names:tc:SAML:2.0:attrname-format:uri" FriendlyName="email" isRequired="true"/>
# <md:RequestedAttribute Name="mail" FriendlyName="test-field"/>
# </md:AttributeConsumingService>
# ## Attribute to use for Email address
# ### Default: email
# ### Example: urn:oid:0.9.2342.19200300.100.1.3
# #SAML_ATTRIBUTE_EMAIL = 'urn:oid:0.9.2342.19200300.100.1.3'
# ## Attribute to use for Given name
# ### Default: givenname
# ### Example: urn:oid:2.5.4.42
# #SAML_ATTRIBUTE_GIVENNAME = 'urn:oid:2.5.4.42'
# ## Attribute to use for Surname
# ### Default: surname
# ### Example: urn:oid:2.5.4.4
# #SAML_ATTRIBUTE_SURNAME = 'urn:oid:2.5.4.4'
# ## Attribute to use for username
# ### Default: Use NameID instead
# ### Example: urn:oid:0.9.2342.19200300.100.1.1
# #SAML_ATTRIBUTE_USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
# ## Attribute to get admin status from
# ### Default: Don't control admin with SAML attribute
# ### Example: https://example.edu/pdns-admin
# ### If set, look for the value 'true' to set a user as an administrator
# ### If not included in assertion, or set to something other than 'true',
# ### the user is set as a non-administrator user.
# #SAML_ATTRIBUTE_ADMIN = 'https://example.edu/pdns-admin'
# ## Attribute to get account names from
# ### Default: Don't control accounts with SAML attribute
# ### If set, the user will be added and removed from accounts to match
# ### what's in the login assertion. Accounts that don't exist will
# ### be created and the user added to them.
# SAML_ATTRIBUTE_ACCOUNT = 'https://example.edu/pdns-account'
# SAML_SP_ENTITY_ID = 'http://<SAML SP Entity ID>'
# SAML_SP_CONTACT_NAME = '<contact name>'
# SAML_SP_CONTACT_MAIL = '<contact mail>'
# Configures the path to certificate file and it's respective private key file
# This pair is used for signing metadata, encrypting tokens and all other signing/encryption
# tasks during communication between iDP and SP
# NOTE: if this two parameters aren't explicitly provided, self-signed certificate-key pair
# will be generated in "PowerDNS-Admin" root directory
# ###########################################################################################
# CAUTION: For production use, usage of self-signed certificates it's highly discouraged.
# Use certificates from trusted CA instead
# ###########################################################################################
# SAML_CERT_FILE = '/etc/pki/powerdns-admin/cert.crt'
# SAML_CERT_KEY = '/etc/pki/powerdns-admin/key.pem'
# Cofigures if SAML tokens should be encrypted.
# SAML_SIGN_REQUEST = False
# #Use SAML standard logout mechanism retreived from idp metadata
# #If configured false don't care about SAML session on logout.
# #Logout from PowerDNS-Admin only and keep SAML session authenticated.
# SAML_LOGOUT = False
# #Configure to redirect to a different url then PowerDNS-Admin login after SAML logout
# #for example redirect to google.com after successful saml logout
# #SAML_LOGOUT_URL = 'https://google.com'
# #SAML_ASSERTION_ENCRYPTED = True
| 40.513889 | 167 | 0.711347 |
69c55b9ac425b351038dda39e7f6cb2837da2ff0 | 3,310 | py | Python | teraserver/python/tests/clients/test_websocket_user_client.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 10 | 2020-03-16T14:46:06.000Z | 2022-02-11T16:07:38.000Z | teraserver/python/tests/clients/test_websocket_user_client.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | 114 | 2019-09-16T13:02:50.000Z | 2022-03-22T19:17:36.000Z | teraserver/python/tests/clients/test_websocket_user_client.py | introlab/opentera | bfc4de672c9de40b7c9a659be2138731e7ee4e94 | [
"Apache-2.0"
] | null | null | null | import unittest
import os
from requests import get
import json
import websocket
import ssl
from opentera.messages.python.UserRegisterToEvent_pb2 import UserRegisterToEvent
from opentera.messages.python.TeraModuleMessage_pb2 import TeraModuleMessage
from google.protobuf.json_format import MessageToJson
from google.protobuf.json_format import Parse, ParseError
from google.protobuf.any_pb2 import Any
import datetime
class WebSocketUserClient(unittest.TestCase):
host = 'localhost'
port = 40075
login_endpoint = '/api/user/login'
ws = None
token = None
uuid = None
def _make_url(self, hostname, port, endpoint):
return 'https://' + hostname + ':' + str(port) + endpoint
def _http_auth(self, username, password):
url = self._make_url(self.host, self.port, self.login_endpoint)
result = get(url=url, verify=False, auth=(username, password))
self.assertEqual(result.status_code, 200)
self.assertEqual(result.headers['Content-Type'], 'application/json')
json_data = result.json()
self.assertGreater(len(json_data), 0)
# Validate fields in json response
self.assertTrue(json_data.__contains__('websocket_url'))
self.assertTrue(json_data.__contains__('user_uuid'))
self.assertTrue(json_data.__contains__('user_token'))
self.assertGreater(len(json_data['websocket_url']), 0)
self.assertGreater(len(json_data['user_uuid']), 0)
self.assertGreater(len(json_data['user_token']), 0)
return json_data
def _create_tera_message(self, dest='websocket', seq=0):
tera_message = TeraModuleMessage()
tera_message.head.version = 1
tera_message.head.time = datetime.datetime.now().timestamp()
tera_message.head.seq = seq
tera_message.head.source = 'WebSocketUserClient'
tera_message.head.dest = 'websocket.user.' + self.uuid
return tera_message
def _register_event(self, action, event_type):
event = UserRegisterToEvent()
event.action = action
event.event_type = event_type
message = self._create_tera_message()
# Need to use Any container
any_message = Any()
any_message.Pack(event)
message.data.extend([any_message])
# Send to websocket
json_data = MessageToJson(message, including_default_value_fields=True)
ret = self.ws.send(json_data)
# Wait for answer
val = self.ws.recv()
return val
def setUp(self):
# Using siteadmin default user information
json_data = self._http_auth('siteadmin', 'siteadmin')
# Create websocket
self.ws = websocket.WebSocket(sslopt={"cert_reqs": ssl.CERT_NONE})
self.ws.connect(json_data['websocket_url'])
self.token = json_data['user_token']
self.uuid = json_data['user_uuid']
self.assertTrue(self.ws.connected)
def tearDown(self):
if self.ws:
self.ws.close()
pass
def test_events_valid_user_httpauth(self):
# websocket is created in setUp function.
# Test register event
self._register_event(UserRegisterToEvent.ACTION_REGISTER, UserRegisterToEvent.EVENT_USER_CONNECTED)
ret2 = self.ws.recv()
print(ret2)
| 32.45098 | 107 | 0.68006 |
08ecc670e762b16246dd1fe743722629ca6a21dd | 13,208 | py | Python | hw/ip/rom_ctrl/util/scramble_image.py | gezalore/opentitan | b1ddd9b3d649dd050ff3567c02d9c891c0d90e07 | [
"Apache-2.0"
] | null | null | null | hw/ip/rom_ctrl/util/scramble_image.py | gezalore/opentitan | b1ddd9b3d649dd050ff3567c02d9c891c0d90e07 | [
"Apache-2.0"
] | null | null | null | hw/ip/rom_ctrl/util/scramble_image.py | gezalore/opentitan | b1ddd9b3d649dd050ff3567c02d9c891c0d90e07 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
'''Script for scrambling a ROM image'''
import argparse
from typing import Dict, List
import hjson # type: ignore
from mem import MemChunk, MemFile
ROM_BASE_WORD = 0x8000 // 4
ROM_SIZE_WORDS = 4096
PRINCE_SBOX4 = [
0xb, 0xf, 0x3, 0x2,
0xa, 0xc, 0x9, 0x1,
0x6, 0x7, 0x8, 0x0,
0xe, 0x5, 0xd, 0x4
]
PRINCE_SBOX4_INV = [
0xb, 0x7, 0x3, 0x2,
0xf, 0xd, 0x8, 0x9,
0xa, 0x6, 0x4, 0x0,
0x5, 0xe, 0xc, 0x1
]
PRESENT_SBOX4 = [
0xc, 0x5, 0x6, 0xb,
0x9, 0x0, 0xa, 0xd,
0x3, 0xe, 0xf, 0x8,
0x4, 0x7, 0x1, 0x2
]
PRESENT_SBOX4_INV = [
0x5, 0xe, 0xf, 0x8,
0xc, 0x1, 0x2, 0xd,
0xb, 0x4, 0x6, 0x3,
0x0, 0x7, 0x9, 0xa
]
PRINCE_SHIFT_ROWS64 = [
0x4, 0x9, 0xe, 0x3,
0x8, 0xd, 0x2, 0x7,
0xc, 0x1, 0x6, 0xb,
0x0, 0x5, 0xa, 0xf
]
PRINCE_SHIFT_ROWS64_INV = [
0xc, 0x9, 0x6, 0x3,
0x0, 0xd, 0xa, 0x7,
0x4, 0x1, 0xe, 0xb,
0x8, 0x5, 0x2, 0xf
]
PRINCE_ROUND_CONSTS = [
0x0000000000000000,
0x13198a2e03707344,
0xa4093822299f31d0,
0x082efa98ec4e6c89,
0x452821e638d01377,
0xbe5466cf34e90c6c,
0x7ef84f78fd955cb1,
0x85840851f1ac43aa,
0xc882d32f25323c54,
0x64a51195e0e3610d,
0xd3b5a399ca0c2399,
0xc0ac29b7c97c50dd
]
PRINCE_SHIFT_ROWS_CONSTS = [0x7bde, 0xbde7, 0xde7b, 0xe7bd]
_UDict = Dict[object, object]
def sbox(data: int, width: int, coeffs: List[int]) -> int:
assert 0 <= width
assert 0 <= data < (1 << width)
full_mask = (1 << width) - 1
sbox_mask = (1 << (4 * (width // 4))) - 1
ret = data & (full_mask & ~sbox_mask)
for i in range(width // 4):
nibble = (data >> (4 * i)) & 0xf
sb_nibble = coeffs[nibble]
ret |= sb_nibble << (4 * i)
return ret
def subst_perm_enc(data: int, key: int, width: int, num_rounds: int) -> int:
'''A model of prim_subst_perm in encrypt mode'''
assert 0 <= width
assert 0 <= data < (1 << width)
assert 0 <= key < (1 << width)
full_mask = (1 << width) - 1
bfly_mask = (1 << (2 * (width // 2))) - 1
for rnd in range(num_rounds):
data_xor = data ^ key
# SBox layer
data_sbox = sbox(data_xor, width, PRESENT_SBOX4)
# Reverse the vector
data_rev = 0
for i in range(width):
bit = (data_sbox >> i) & 1
data_rev |= bit << (width - 1 - i)
# Butterfly
data_bfly = data_rev & (full_mask & ~bfly_mask)
for i in range(width // 2):
# data_bfly[i] = data_rev[2i]
bit = (data_rev >> (2 * i)) & 1
data_bfly |= bit << i
# data_bfly[width/2 + i] = data_rev[2i+1]
bit = (data_rev >> (2 * i + 1)) & 1
data_bfly |= bit << (width // 2 + i)
data = data_bfly
return data ^ key
def subst_perm_dec(data: int, key: int, width: int, num_rounds: int) -> int:
'''A model of prim_subst_perm in decrypt mode'''
assert 0 <= width
assert 0 <= data < (1 << width)
assert 0 <= key < (1 << width)
full_mask = (1 << width) - 1
bfly_mask = (1 << (2 * (width // 2))) - 1
for rnd in range(num_rounds):
data_xor = data ^ key
# Butterfly
data_bfly = data_xor & (full_mask & ~bfly_mask)
for i in range(width // 2):
# data_bfly[2i] = data_xor[i]
bit = (data_xor >> i) & 1
data_bfly |= bit << (2 * i)
# data_bfly[2i+1] = data_xor[i + width // 2]
bit = (data_xor >> (i + width // 2)) & 1
data_bfly |= bit << (2 * i + 1)
# Reverse the vector
data_rev = 0
for i in range(width):
bit = (data_bfly >> i) & 1
data_rev |= bit << (width - 1 - i)
# Inverse SBox layer
data = sbox(data_rev, width, PRESENT_SBOX4_INV)
return data ^ key
def prince_nibble_red16(data: int) -> int:
assert 0 <= data < (1 << 16)
nib0 = (data >> 0) & 0xf
nib1 = (data >> 4) & 0xf
nib2 = (data >> 8) & 0xf
nib3 = (data >> 12) & 0xf
return nib0 ^ nib1 ^ nib2 ^ nib3
def prince_mult_prime(data: int) -> int:
assert 0 <= data < (1 << 64)
ret = 0
for blk_idx in range(4):
data_hw = (data >> (16 * blk_idx)) & 0xffff
start_sr_idx = 0 if blk_idx in [0, 3] else 1
for nibble_idx in range(4):
sr_idx = (start_sr_idx + 3 - nibble_idx) % 4
sr_const = PRINCE_SHIFT_ROWS_CONSTS[sr_idx]
nibble = prince_nibble_red16(data_hw & sr_const)
ret |= nibble << (16 * blk_idx + 4 * nibble_idx)
return ret
def prince_shiftrows(data: int, inv: bool) -> int:
assert 0 <= data < (1 << 64)
shifts = PRINCE_SHIFT_ROWS64_INV if inv else PRINCE_SHIFT_ROWS64
ret = 0
for nibble_idx in range(64 // 4):
src_nibble_idx = shifts[nibble_idx]
src_nibble = (data >> (4 * src_nibble_idx)) & 0xf
ret |= src_nibble << (4 * nibble_idx)
return ret
def prince_fwd_round(rc: int, key: int, data: int) -> int:
assert 0 <= rc < (1 << 64)
assert 0 <= key < (1 << 64)
assert 0 <= data < (1 << 64)
data = sbox(data, 64, PRINCE_SBOX4)
data = prince_mult_prime(data)
data = prince_shiftrows(data, False)
data ^= rc
data ^= key
return data
def prince_inv_round(rc: int, key: int, data: int) -> int:
assert 0 <= rc < (1 << 64)
assert 0 <= key < (1 << 64)
assert 0 <= data < (1 << 64)
data ^= key
data ^= rc
data = prince_shiftrows(data, True)
data = prince_mult_prime(data)
data = sbox(data, 64, PRINCE_SBOX4_INV)
return data
def prince(data: int, key: int, num_rounds_half: int) -> int:
'''Run the PRINCE cipher
This uses the new keyschedule proposed by Dinur in "Cryptanalytic
Time-Memory-Data Tradeoffs for FX-Constructions with Applications to PRINCE
and PRIDE".
'''
assert 0 <= data < (1 << 64)
assert 0 <= key < (1 << 128)
assert 0 <= num_rounds_half <= 5
# TODO: This matches the RTL in prim_prince.sv, but seems to be the other
# way around in the original paper.
k1 = key & ((1 << 64) - 1)
k0 = key >> 64
k0_rot1 = ((k0 & 1) << 63) | (k0 >> 1)
k0_prime = k0_rot1 ^ (k0 >> 63)
data ^= k0
data ^= k1
data ^= PRINCE_ROUND_CONSTS[0]
for hri in range(num_rounds_half):
round_idx = 1 + hri
rc = PRINCE_ROUND_CONSTS[round_idx]
rk = k0 if round_idx & 1 else k1
data = prince_fwd_round(rc, rk, data)
data = sbox(data, 64, PRINCE_SBOX4)
data = prince_mult_prime(data)
data = sbox(data, 64, PRINCE_SBOX4_INV)
for hri in range(num_rounds_half):
round_idx = 11 - num_rounds_half + hri
rc = PRINCE_ROUND_CONSTS[round_idx]
rk = k1 if round_idx & 1 else k0
data = prince_inv_round(rc, rk, data)
data ^= PRINCE_ROUND_CONSTS[11]
data ^= k1
data ^= k0_prime
return data
class Scrambler:
def __init__(self, nonce: int, key: int, rom_size_words: int):
assert 0 <= nonce < (1 << 64)
assert 0 <= key < (1 << 128)
assert 0 < rom_size_words < (1 << 64)
self.nonce = nonce
self.key = key
self.rom_size_words = rom_size_words
self._addr_width = (rom_size_words - 1).bit_length()
@staticmethod
def _get_rom_ctrl(modules: List[object]) -> _UDict:
rom_ctrls = [] # type: List[_UDict]
for entry in modules:
assert isinstance(entry, dict)
entry_type = entry.get('type')
assert isinstance(entry_type, str)
if entry_type == 'rom_ctrl':
rom_ctrls.append(entry)
assert len(rom_ctrls) == 1
return rom_ctrls[0]
@staticmethod
def _get_params(module: _UDict) -> Dict[str, _UDict]:
params = module.get('param_list')
assert isinstance(params, list)
named_params = {} # type: Dict[str, _UDict]
for param in params:
name = param.get('name')
assert isinstance(name, str)
assert name not in named_params
named_params[name] = param
return named_params
@staticmethod
def _get_param_value(params: Dict[str, _UDict],
name: str,
width: int) -> int:
param = params.get(name)
assert isinstance(param, dict)
default = param.get('default')
assert isinstance(default, str)
int_val = int(default, 0)
assert 0 <= int_val < (1 << width)
return int_val
@staticmethod
def from_hjson_path(path: str, rom_size_words: int) -> 'Scrambler':
assert 0 < rom_size_words
with open(path) as handle:
top = hjson.load(handle, use_decimal=True)
assert isinstance(top, dict)
modules = top.get('module')
assert isinstance(modules, list)
rom_ctrl = Scrambler._get_rom_ctrl(modules)
params = Scrambler._get_params(rom_ctrl)
nonce = Scrambler._get_param_value(params, 'RndCnstScrNonce', 64)
key = Scrambler._get_param_value(params, 'RndCnstScrKey', 128)
return Scrambler(nonce, key, rom_size_words)
def flatten(self, mem: MemFile) -> MemFile:
'''Flatten and pad mem up to the correct size
This adds 8 trailing zero words as space to store the expected hash.
These are (obviously!) not the right hash: we inject them properly
later.
'''
digest_size_words = 8
initial_len = self.rom_size_words - digest_size_words
seed = self.key + self.nonce
flattened = mem.flatten(initial_len, seed)
assert len(flattened.chunks) == 1
assert len(flattened.chunks[0].words) == initial_len
# Add the 8 trailing zero words. We do it here, rather than passing
# rom_size_words to mem.flatten, to make sure that we see the error if
# mem is too big.
flattened.chunks[0].words += [0] * digest_size_words
return flattened
def scramble40(self, mem: MemFile) -> MemFile:
assert len(mem.chunks) == 1
assert len(mem.chunks[0].words) == self.rom_size_words
word_width = 40
# Write addr_sp, data_sp for the S&P networks for address and data,
# respectively. Write clr[i] for unscrambled data word i and scr[i] for
# scrambled data word i. We need to construct scr[0], scr[1], ...,
# scr[self.rom_size_words].
#
# Then, for all i, we have:
#
# clr[i] = PRINCE(i) ^ data_sp(scr[addr_sp(i)])
#
# Change coordinates by evaluating at addr_sp_inv(i):
#
# clr[addr_sp_inv(i)] = PRINCE(addr_sp_inv(i)) ^ data_sp(scr[i])
#
# so
#
# scr[i] = data_sp_inv(clr[addr_sp_inv(i)] ^ PRINCE(addr_sp_inv(i)))
subst_perm_rounds = 2
num_rounds_half = 2
assert word_width <= 64
word_mask = (1 << word_width) - 1
data_nonce_width = 64 - self._addr_width
data_scr_nonce = self.nonce & ((1 << data_nonce_width) - 1)
addr_scr_nonce = self.nonce >> data_nonce_width
scrambled = []
for phy_addr in range(self.rom_size_words):
log_addr = subst_perm_dec(phy_addr, addr_scr_nonce,
self._addr_width, subst_perm_rounds)
assert 0 <= log_addr < self.rom_size_words
to_scramble = (data_scr_nonce << self._addr_width) | log_addr
keystream = prince(to_scramble, self.key, num_rounds_half)
keystream_trunc = keystream & word_mask
clr_data = mem.chunks[0].words[log_addr]
assert 0 <= clr_data < word_mask
sp_scr_data = keystream_trunc ^ clr_data
scr_data = subst_perm_enc(sp_scr_data, 0, word_width, subst_perm_rounds)
assert 0 <= scr_data < word_mask
scrambled.append(scr_data)
return MemFile(mem.width, [MemChunk(0, scrambled)])
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument('hjson')
parser.add_argument('infile', type=argparse.FileType('rb'))
parser.add_argument('outfile', type=argparse.FileType('w'))
args = parser.parse_args()
scrambler = Scrambler.from_hjson_path(args.hjson, ROM_SIZE_WORDS)
# Load the input ELF file
clr_mem = MemFile.load_elf32(args.infile, 4 * ROM_BASE_WORD)
# Flatten the file, padding with pseudo-random data and ensuring it's
# exactly scrambler.rom_size_words words long.
clr_flat = scrambler.flatten(clr_mem)
# Extend from 32 bits to 39 by adding Hsiao (39,32) ECC bits.
clr_flat.add_ecc32()
# Zero-extend the cleartext memory by one more bit (this is the size we
# actually use in the physical ROM)
assert clr_flat.width == 39
clr_flat.width = 40
# Scramble the memory
scr_mem = scrambler.scramble40(clr_flat)
# TODO: Calculate and insert the expected hash here.
scr_mem.write_vmem(args.outfile)
if __name__ == '__main__':
main()
| 28.713043 | 84 | 0.589945 |
030a2cc8d94803e071b75638137af8c13ebb4780 | 11,278 | py | Python | model.py | quanghuy17111999/LMTracker | 221ff8ff97e8d81ad03c0bab187ef1dbbad42c12 | [
"MIT"
] | null | null | null | model.py | quanghuy17111999/LMTracker | 221ff8ff97e8d81ad03c0bab187ef1dbbad42c12 | [
"MIT"
] | 1 | 2022-02-22T01:40:43.000Z | 2022-02-22T01:40:43.000Z | model.py | quanghuy17111999/LMTracker | 221ff8ff97e8d81ad03c0bab187ef1dbbad42c12 | [
"MIT"
] | 1 | 2022-02-22T01:09:28.000Z | 2022-02-22T01:09:28.000Z | from distutils.log import WARN
from pdb import post_mortem
from typing import Dict, List, Optional, Tuple
import torch
from torch import Tensor
from torch.nn import Embedding
from torch.utils.data import DataLoader
from torch_sparse import SparseTensor
from torch_geometric.typing import EdgeType, NodeType, OptTensor
EPS = 1e-15
class MetaPath2Vec(torch.nn.Module):
r"""The MetaPath2Vec model from the `"metapath2vec: Scalable Representation
Learning for Heterogeneous Networks"
<https://ericdongyx.github.io/papers/
KDD17-dong-chawla-swami-metapath2vec.pdf>`_ paper where random walks based
on a given :obj:`metapath` are sampled in a heterogeneous graph, and node
embeddings are learned via negative sampling optimization.
.. note::
For an example of using MetaPath2Vec, see
`examples/hetero/metapath2vec.py
<https://github.com/pyg-team/pytorch_geometric/blob/master/examples/
hetero/metapath2vec.py>`_.
Args:
edge_index_dict (Dict[Tuple[str, str, str], Tensor]): Dictionary
holding edge indices for each
:obj:`(src_node_type, rel_type, dst_node_type)` present in the
heterogeneous graph.
embedding_dim (int): The size of each embedding vector.
metapath (List[Tuple[str, str, str]]): The metapath described as a list
of :obj:`(src_node_type, rel_type, dst_node_type)` tuples.
walk_length (int): The walk length.
context_size (int): The actual context size which is considered for
positive samples. This parameter increases the effective sampling
rate by reusing samples across different source nodes.
walks_per_node (int, optional): The number of walks to sample for each
node. (default: :obj:`1`)
num_negative_samples (int, optional): The number of negative samples to
use for each positive sample. (default: :obj:`1`)
num_nodes_dict (Dict[str, int], optional): Dictionary holding the
number of nodes for each node type. (default: :obj:`None`)
sparse (bool, optional): If set to :obj:`True`, gradients w.r.t. to the
weight matrix will be sparse. (default: :obj:`False`)
"""
def __init__(
self,
edge_index_dict: Dict[EdgeType, Tensor],
embedding_dim: int,
metapath: List[EdgeType],
walk_length: int,
context_size: int,
walks_per_node: int = 1,
num_negative_samples: int = 1,
num_nodes_dict: Optional[Dict[NodeType, int]] = None,
sparse: bool = False,
):
super().__init__()
if num_nodes_dict is None:
num_nodes_dict = {}
for keys, edge_index in edge_index_dict.items():
key = keys[0]
N = int(edge_index[0].max() + 1)
num_nodes_dict[key] = max(N, num_nodes_dict.get(key, N))
key = keys[-1]
N = int(edge_index[1].max() + 1)
num_nodes_dict[key] = max(N, num_nodes_dict.get(key, N))
adj_dict = {}
for keys, edge_index in edge_index_dict.items():
sizes = (num_nodes_dict[keys[0]], num_nodes_dict[keys[-1]])
row, col = edge_index
# print(edge_index)
adj = SparseTensor(row=row, col=col, sparse_sizes=sizes)
adj = adj.to('cpu')
adj_dict[keys] = adj
assert walk_length + 1 >= context_size
if walk_length > len(metapath) and metapath[0][0] != metapath[-1][-1]:
raise AttributeError(
"The 'walk_length' is longer than the given 'metapath', but "
"the 'metapath' does not denote a cycle")
self.adj_dict = adj_dict
self.embedding_dim = embedding_dim
self.metapath = metapath
self.walk_length = walk_length
self.context_size = context_size
self.walks_per_node = walks_per_node
self.num_negative_samples = num_negative_samples
self.num_nodes_dict = num_nodes_dict
types = set([x[0] for x in metapath]) | set([x[-1] for x in metapath])
types = sorted(list(types))
count = 0
self.start, self.end = {}, {}
for key in types:
self.start[key] = count
count += num_nodes_dict[key]
self.end[key] = count
offset = [self.start[metapath[0][0]]]
offset += [self.start[keys[-1]] for keys in metapath
]* int((walk_length / len(metapath)) + 1)
offset = offset[:walk_length + 1]
assert len(offset) == walk_length + 1
self.offset = torch.tensor(offset)
# + 1 denotes a dummy node used to link to for isolated nodes.
self.embedding = Embedding(count + 1, embedding_dim, sparse=sparse)
self.dummy_idx = count
self.reset_parameters()
def reset_parameters(self):
self.embedding.reset_parameters()
def forward(self, node_type: str, batch: OptTensor = None) -> Tensor:
r"""Returns the embeddings for the nodes in :obj:`batch` of type
:obj:`node_type`."""
emb = self.embedding.weight[self.start[node_type]:self.end[node_type]]
return emb if batch is None else emb.index_select(0, batch)
def loader(self, **kwargs):
r"""Returns the data loader that creates both positive and negative
random walks on the heterogeneous graph.
Args:
**kwargs (optional): Arguments of
:class:`torch.utils.data.DataLoader`, such as
:obj:`batch_size`, :obj:`shuffle`, :obj:`drop_last` or
:obj:`num_workers`.
"""
return DataLoader(range(self.num_nodes_dict[self.metapath[0][0]]),
collate_fn=self._sample, **kwargs)
def _pos_sample(self, batch: Tensor) -> Tensor:
batch = batch.repeat(self.walks_per_node)
rws = [batch]
for i in range(self.walk_length):
keys = self.metapath[i % len(self.metapath)]
adj = self.adj_dict[keys]
batch = sample(adj, batch, num_neighbors=1,
dummy_idx=self.dummy_idx).view(-1)
rws.append(batch)
rw = torch.stack(rws, dim=-1)
rw.add_(self.offset.view(1, -1))
rw[rw > self.dummy_idx] = self.dummy_idx
walks = []
num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size
for j in range(num_walks_per_rw):
walks.append(rw[:, j:j + self.context_size])
return torch.cat(walks, dim=0)
def _neg_sample(self, batch: Tensor) -> Tensor:
batch = batch.repeat(self.walks_per_node * self.num_negative_samples)
rws = [batch]
for i in range(self.walk_length):
keys = self.metapath[i % len(self.metapath)]
batch = torch.randint(0, self.num_nodes_dict[keys[-1]],
(batch.size(0), ), dtype=torch.long)
rws.append(batch)
rw = torch.stack(rws, dim=-1)
rw.add_(self.offset.view(1, -1))
walks = []
num_walks_per_rw = 1 + self.walk_length + 1 - self.context_size
for j in range(num_walks_per_rw):
walks.append(rw[:, j:j + self.context_size])
return torch.cat(walks, dim=0)
def _sample(self, batch: List[int]) -> Tuple[Tensor, Tensor]:
if not isinstance(batch, Tensor):
batch = torch.tensor(batch, dtype=torch.long)
return self._pos_sample(batch), self._neg_sample(batch)
def loss(self, pos_rw: Tensor, neg_rw: Tensor) -> Tensor:
r"""Computes the loss given positive and negative random walks."""
start, rest = pos_rw[:, 0], pos_rw[:, 1:].contiguous()
h_start = self.embedding(start).view(pos_rw.size(0), 1,
self.embedding_dim)
h_rest = self.embedding(rest.view(-1)).view(pos_rw.size(0), -1,
self.embedding_dim)
out = (h_start * h_rest).sum(dim=-1).view(-1)
pos_loss = -torch.log(torch.sigmoid(out) + EPS).mean()
# Negative loss.
start, rest = neg_rw[:, 0], neg_rw[:, 1:].contiguous()
h_start = self.embedding(start).view(neg_rw.size(0), 1,
self.embedding_dim)
h_rest = self.embedding(rest.view(-1)).view(neg_rw.size(0), -1,
self.embedding_dim)
out = (h_start * h_rest).sum(dim=-1).view(-1)
neg_loss = -torch.log(1 - torch.sigmoid(out) + EPS).mean()
return pos_loss + neg_loss
def get_embedding(self, pos_rw: Tensor) -> Tensor:
r"""Computes the loss given positive and negative random walks."""
# Positive loss.
start, rest = pos_rw[0], pos_rw[1:].contiguous()
# print(pos_rw)
# print(pos_rw.size())
# print(pos_rw[1:].contiguous().size())
# print(pos_rw[1:].contiguous().view(-1).size())
# h_start = self.embedding(start).view(pos_rw.size(0), 1,
# self.embedding_dim)
# h_rest = self.embedding(rest.view(-1)).view(pos_rw.size(0), -1,
# self.embedding_dim)
h_start = self.embedding(start)
h_rest = self.embedding(rest.view(-1))
# out = [h_start]
# for i in range(h_rest.size()[0]):
# out.append(h_rest[i][:])
# # print(out)
# out = torch.stack(out,1).mean(dim=-1)
# print(out)
# qeqqewwq
# out = h_start
# out = h_rest[0]
out = (h_start * h_rest).mean(dim=0)
return out
def test(self, train_z: Tensor, train_y: Tensor, test_z: Tensor,
test_y: Tensor, solver: str = "lbfgs", multi_class: str = "auto",
*args, **kwargs) -> float:
r"""Evaluates latent space quality via a logistic regression downstream
task."""
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(solver=solver, multi_class=multi_class, *args,
**kwargs).fit(train_z.detach().cpu().numpy(),
train_y.detach().cpu().numpy())
return clf.score(test_z.detach().cpu().numpy(),
test_y.detach().cpu().numpy())
def __repr__(self) -> str:
return (f'{self.__class__.__name__}('
f'{self.embedding.weight.size(0) - 1}, '
f'{self.embedding.weight.size(1)})')
def sample(src: SparseTensor, subset: Tensor, num_neighbors: int,
dummy_idx: int) -> Tensor:
mask = subset < dummy_idx
rowcount = torch.zeros_like(subset)
rowcount[mask] = src.storage.rowcount()[subset[mask]]
mask = mask & (rowcount > 0)
offset = torch.zeros_like(subset)
offset[mask] = src.storage.rowptr()[subset[mask]]
rand = torch.rand((rowcount.size(0), num_neighbors), device=subset.device)
rand.mul_(rowcount.to(rand.dtype).view(-1, 1))
rand = rand.to(torch.long)
rand.add_(offset.view(-1, 1))
col = src.storage.col()[rand]
col[~mask] = dummy_idx
return col | 39.296167 | 79 | 0.585831 |
afaafc15130cd12eab09089a5be343df5eda3766 | 1,417 | py | Python | mmocr/models/textdet/detectors/ocr_mask_rcnn.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | mmocr/models/textdet/detectors/ocr_mask_rcnn.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | mmocr/models/textdet/detectors/ocr_mask_rcnn.py | xyzhu8/mmocr | f62b4513f5411bde9f24e1902b1cb1945340022a | [
"Apache-2.0"
] | null | null | null | from mmdet.models.builder import DETECTORS
from mmdet.models.detectors import MaskRCNN
from mmocr.models.textdet.detectors.text_detector_mixin import \
TextDetectorMixin
@DETECTORS.register_module()
class OCRMaskRCNN(TextDetectorMixin, MaskRCNN):
"""Mask RCNN tailored for OCR."""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
text_repr_type='quad',
show_score=False,
init_cfg=None):
TextDetectorMixin.__init__(self, show_score)
MaskRCNN.__init__(
self,
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
assert text_repr_type in ['quad', 'poly']
self.text_repr_type = text_repr_type
def simple_test(self, img, img_metas, proposals=None, rescale=False):
results = super().simple_test(img, img_metas, proposals, rescale)
boundaries = self.get_boundary(results[0])
boundaries = boundaries if isinstance(boundaries,
list) else [boundaries]
return boundaries
| 31.488889 | 73 | 0.579393 |
168614e6e514dd1acb0e4f7ed2ff26c80e3ca0de | 3,473 | py | Python | echoremote.py | petermoon/echoir | dac36ab518a014b227b8ba74d61e1a7c8932bfc4 | [
"Apache-2.0"
] | null | null | null | echoremote.py | petermoon/echoir | dac36ab518a014b227b8ba74d61e1a7c8932bfc4 | [
"Apache-2.0"
] | null | null | null | echoremote.py | petermoon/echoir | dac36ab518a014b227b8ba74d61e1a7c8932bfc4 | [
"Apache-2.0"
] | null | null | null | #!/home/pi/.virtualenvs/python34/bin/python
import boto3
import boto3.session
import json
from subprocess import call
from time import sleep
import os.path
from botocore.exceptions import ClientError
from botocore.exceptions import BotoCoreError
import logging
LOG_FILENAME = '/home/pi/log/echoremote.log'
logging.basicConfig(filename=LOG_FILENAME, level=logging.INFO)
def log(msg):
logging.info(msg)
sequences = {'all_off': ['tv_off.txt', 'receiver_off.txt',
'xbox_off.txt', 'apple_tv_home.txt'],
'apple_tv': ['tv_on.txt',
'receiver_apple_tv.txt',
'apple_tv_home.txt'],
'xbox': ['tv_on.txt', 'receiver_xbox.txt', 'xbox_on.txt'],
'raspberry_pie': ['tv_on.txt', 'receiver_raspberry_pie.txt'],
'bluetooth': ['receiver_bluetooth.txt']}
keys = ('ActionA', 'ActionB', 'ActionC', 'ActionD', 'ActionE', 'ActionF')
signals_path = '/home/pi/scribbles/python/echoir/signals/'
def handle_seq(sequence):
for signal in sequence:
send_signal(signal)
sleep(0.2)
def send_signal(signal):
signal_full = os.path.join(signals_path, signal)
log('SENT: {}'.format(signal_full))
if os.path.isfile(signal_full):
call(['/usr/bin/igclient', '--send', signal_full])
log(" Sent: {}".format(signal))
else:
log(" NOT SENT: {}".format(signal))
def process_msg(msg):
log(msg.body)
intent = json.loads(msg.body)
action = intent['name']
if action == 'Power':
handle_power(intent['slots'])
elif action == 'Volume':
handle_volume(intent['slots'])
elif action == 'Launch':
handle_launch(intent['slots'])
elif action == 'Action':
handle_action(intent['slots'])
msg.delete()
def handle_power(slots):
device = slots['Device']['value'].replace('the ', '').replace(' ', '_')
onoff = slots['OnOff']['value']
if device == 'everything':
handle_seq(sequences['all_off'])
else:
signal = '{}_{}.txt'.format(device, onoff)
send_signal(signal)
def handle_volume(slots):
signal = 'receiver_volume_{}.txt'.format(slots['UpDown']['value'])
try:
reps = int(slots['Repeat']['value'])
sequence = [signal for i in range(reps)]
except (ValueError):
sequence = [signal]
handle_seq(sequence)
def handle_launch(slots):
action = slots['Activity']['value'].replace(' ', '_')
if action in sequences:
handle_seq(sequences[action])
def handle_action(slots):
device = slots['Device']['value'].replace('the ', '').replace(' ', '_')
device = device.lower()
sequence = []
for key in keys:
if 'value' in slots[key] and slots[key]['value'] is not None:
sequence.append(slots[key]['value'].replace(' ', '_').lower())
signals = ['{}_{}.txt'.format(device, a) for a in sequence]
handle_seq(signals)
url = 'https://queue.amazonaws.com/720549148055/echoRemote'
s = boto3.session.Session(region_name='us-east-1')
queue = s.resource('sqs').Queue(url)
while True:
try:
log("Polling for messages...")
messages = queue.receive_messages(WaitTimeSeconds=20)
for message in messages:
log(" Processing a message.")
process_msg(message)
except (ClientError, BotoCoreError) as e:
log("Request failed. Sleeping for a minute.")
log(str(e))
sleep(60)
| 30.2 | 75 | 0.61647 |
945f7b1da8ba1f49eedd1458f2a47378bd4ccd27 | 1,173 | py | Python | flowtorch/bijectors/sigmoid.py | brianjo/flowtorch | 88dcc5cf0a7e1899de18d0fc88157690a1d189ac | [
"MIT"
] | 5 | 2020-12-09T22:05:16.000Z | 2020-12-17T23:43:15.000Z | flowtorch/bijectors/sigmoid.py | stefanwebb/flowtorch-old | 7dfc0ccb1ba43d0b0190611f27e966f55ef7784e | [
"MIT"
] | 12 | 2020-12-09T21:03:36.000Z | 2020-12-17T23:36:30.000Z | flowtorch/bijectors/sigmoid.py | stefanwebb/flowtorch-old | 7dfc0ccb1ba43d0b0190611f27e966f55ef7784e | [
"MIT"
] | null | null | null | # Copyright (c) FlowTorch Development Team. All Rights Reserved
# SPDX-License-Identifier: MIT
from typing import Optional
import torch
import torch.distributions.constraints as constraints
import torch.nn.functional as F
import flowtorch
from flowtorch.utils import clipped_sigmoid
class Sigmoid(flowtorch.Bijector):
codomain = constraints.unit_interval
def _forward(
self,
x: torch.Tensor,
params: Optional[flowtorch.ParamsModule] = None,
context: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return clipped_sigmoid(x)
def _inverse(
self,
y: torch.Tensor,
params: Optional[flowtorch.ParamsModule] = None,
context: Optional[torch.Tensor] = None,
) -> torch.Tensor:
finfo = torch.finfo(y.dtype)
y = y.clamp(min=finfo.tiny, max=1.0 - finfo.eps)
return y.log() - (-y).log1p()
def _log_abs_det_jacobian(
self,
x: torch.Tensor,
y: torch.Tensor,
params: Optional[flowtorch.ParamsModule] = None,
context: Optional[torch.Tensor] = None,
) -> torch.Tensor:
return -F.softplus(-x) - F.softplus(x)
| 27.27907 | 63 | 0.649616 |
4131c0081ccef7fe6bf1466162270137efd76ed3 | 4,738 | py | Python | squamish/squamish/main.py | FabianHinder/drifting-features-analysis | ac02c2c872057f08f35730cc5a9ac394267ce5d4 | [
"MIT"
] | 2 | 2020-12-02T08:55:13.000Z | 2021-06-19T05:27:03.000Z | squamish/squamish/main.py | FabianHinder/drifting-features-analysis | ac02c2c872057f08f35730cc5a9ac394267ce5d4 | [
"MIT"
] | null | null | null | squamish/squamish/main.py | FabianHinder/drifting-features-analysis | ac02c2c872057f08f35730cc5a9ac394267ce5d4 | [
"MIT"
] | null | null | null | import logging
from copy import deepcopy
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.exceptions import NotFittedError
from sklearn.preprocessing import scale
from sklearn.utils import check_random_state
from squamish.algorithm import FeatureSorter
from squamish.utils import create_support_AR
from . import models
from .stat import Stats
logger = logging.getLogger(__name__)
class Main(BaseEstimator):
def __init__(
self,
problem_type="classification",
n_resampling=50,
fpr=1e-6,
random_state=None,
n_jobs=-1,
debug=True,
):
"""
Parameters
----------
problem_type : string
"classification", "regression" or "ranking"
n_resampling : int
Number of samples used in statistics creation.
fpr : float
Parameter for t-statistic to control strictness of acceptance.
Lower is more strict, higher allows more false positives.
random_state : object or int
Numpy random state object or int to set seed.
n_jobs : int
Number of parallel threads used.
'-1' makes automatic choice depending on avail. CPUs.
debug : bool
Enable debug output.
"""
self.n_jobs = n_jobs
self.problem_type = problem_type
self.n_resampling = n_resampling
self.fpr = fpr
self.random_state = check_random_state(random_state)
self.debug = debug
if debug:
logger.setLevel(logging.DEBUG)
def _get_support_mask(self):
"""
Returns
-------
self.support_: vector
Vector with boolean class of each input feature with it's computed relevance class.
0 = irrelevant, 1 relevant
"""
return self.support_
def fit(self, X, y):
X = scale(X)
n, d = X.shape
# All relevant set using Boruta
m = models.MyBoruta(
self.problem_type, random_state=self.random_state, n_jobs=self.n_jobs
).fit(X, y)
# bor_score = m.cvscore(X, y)
fset = m.fset(X, y)
AR = np.where(fset)[0]
# Fit a simple Random Forest to get a minimal feature subset
m = models.RF(
self.problem_type, random_state=self.random_state, n_jobs=self.n_jobs
).fit(X, y)
self.score_ = m.score(X, y)
logger.debug(f"RF score {self.score_}")
logger.debug(f"importances {m.estimator.feature_importances_}")
self.rfmodel = deepcopy(m)
self.stat_ = Stats(
m,
X,
y,
n_resampling=self.n_resampling,
fpr=self.fpr,
random_state=self.random_state,
check_importances=True,
debug=self.debug
)
fset = self.rfmodel.fset(X, y, self.stat_)
fset = np.where(fset)
MR = fset[0]
logger.debug(f"Features from Boruta: {AR}")
logger.debug(f"Features from RF: {MR}")
if len(AR) < 1:
raise Exception("No features were selected in AR model. Is model properly fit? (score ok?)")
# Sort features iteratively into strongly (S) and weakly (W) sets
self.fsorter = FeatureSorter(
self.problem_type,
X,
y,
MR,
AR,
self.random_state,
self.stat_,
n_jobs=self.n_jobs,
debug=self.debug
)
self.fsorter.check_each_feature()
# Turn index sets into support vector
# (2 strong relevant,1 weak relevant, 0 irrelevant)
all_rel_support = create_support_AR(d, self.fsorter.S, self.fsorter.W)
self._relevance_classes = all_rel_support
logger.info(f"Relevance Classes: {self.relevance_classes_}")
# Simple boolean vector where relevan features are regarded as one set (1 relevant, 0 irrelevant)
self.support_ = self._relevance_classes > 0
@property
def relevance_classes_(self):
""" Returnss vector of relevance classes. 0 = irrelevant, 1 = weakly relevant, 2 = strongly relevant"""
if self._relevance_classes is None:
raise NotFittedError("Call fit first.")
return self._relevance_classes
def score(self, X, y):
"""
Score of internal random forest model.
Parameters
----------
X : matrix
Data matrix
y : vector
target vector
Returns
-------
score:
score on data matrix
"""
return self.rfmodel.score(X, y)
def predict(self, X):
return self.rfmodel.predict(X)
| 30.766234 | 111 | 0.588645 |
dfd35a15e58332d7e9d3a89539f6ffe4e552fb1e | 1,279 | py | Python | xlsxwriter/test/comparison/test_cond_format02.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_cond_format02.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_cond_format02.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2022, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('cond_format02.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with conditional formatting."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
cell_format = None # Test the null format.
worksheet.write('A1', 10)
worksheet.write('A2', 20)
worksheet.write('A3', 30)
worksheet.write('A4', 40)
worksheet.conditional_format('A1',
{'type': 'cell',
'format': cell_format,
'criteria': '<',
'value': 5
})
workbook.close()
self.assertExcelEqual()
| 27.212766 | 88 | 0.520719 |
0c403ac3c6eac6252468c7d9a0b1ef40d3930797 | 2,028 | py | Python | corehq/ex-submodules/pillowtop/dao/couch.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/ex-submodules/pillowtop/dao/couch.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | corehq/ex-submodules/pillowtop/dao/couch.py | dborowiecki/commcare-hq | f2f4fa67faec09040a98502f5657444075b63f2e | [
"BSD-3-Clause"
] | null | null | null | import six
from couchdbkit import ResourceNotFound
from dimagi.utils.couch.database import iter_docs
from .interface import DocumentStore
from pillowtop.dao.exceptions import DocumentMissingError, DocumentDeletedError, DocumentNotFoundError
ID_CHUNK_SIZE = 10000
class CouchDocumentStore(DocumentStore):
def __init__(self, couch_db, domain=None, doc_type=None):
self._couch_db = couch_db
self.domain = domain
self.doc_type = doc_type
def get_document(self, doc_id):
try:
return self._couch_db.get(doc_id)
except ResourceNotFound as e:
if six.text_type(e) == 'missing':
raise DocumentMissingError()
else:
raise DocumentDeletedError()
def save_document(self, doc_id, document):
document['_id'] = doc_id
self._couch_db.save_doc(document)
def delete_document(self, doc_id):
try:
return self._couch_db.delete_doc(doc_id)
except ResourceNotFound:
raise DocumentNotFoundError()
def iter_document_ids(self, last_id=None):
from corehq.apps.domain.dbaccessors import iterate_doc_ids_in_domain_by_type
if not (self.domain and self.doc_type):
raise ValueError('This function requires a domain and doc_type set!')
start_key = None
if last_id:
last_doc = self.get_document(last_id)
start_key = [self.domain, self.doc_type]
if self.doc_type in list(_DATE_MAP):
start_key.append(last_doc[_DATE_MAP[self.doc_type]])
return iterate_doc_ids_in_domain_by_type(
self.domain,
self.doc_type,
chunk_size=ID_CHUNK_SIZE,
database=self._couch_db,
startkey=start_key,
startkey_docid=last_id
)
def iter_documents(self, ids):
return iter_docs(self._couch_db, ids, chunksize=500)
_DATE_MAP = {
'XFormInstance': 'received_on',
'CommCareCase': 'opened_on',
}
| 30.727273 | 102 | 0.656805 |
6a31bc0228e04f005a50e9f3543a459c724e929b | 578 | py | Python | participant_profile/migrations/0037_alter_studentfile_ijazah.py | zidandff/primaseru | a14fa7326098af220e0671c3bf3870b4016ab4bf | [
"MIT"
] | null | null | null | participant_profile/migrations/0037_alter_studentfile_ijazah.py | zidandff/primaseru | a14fa7326098af220e0671c3bf3870b4016ab4bf | [
"MIT"
] | null | null | null | participant_profile/migrations/0037_alter_studentfile_ijazah.py | zidandff/primaseru | a14fa7326098af220e0671c3bf3870b4016ab4bf | [
"MIT"
] | 2 | 2021-08-10T09:09:58.000Z | 2021-08-10T10:54:51.000Z | # Generated by Django 3.2.5 on 2021-09-21 02:13
from django.db import migrations, models
import participant_profile.models
class Migration(migrations.Migration):
dependencies = [
('participant_profile', '0036_auto_20210921_0912'),
]
operations = [
migrations.AlterField(
model_name='studentfile',
name='ijazah',
field=models.FileField(blank=True, help_text='Ijazah SMP/MTS dapat menyusul.', null=True, upload_to=participant_profile.models.user_directory_path, verbose_name='Ijazah SMP/MTS'),
),
]
| 28.9 | 191 | 0.683391 |
d2653607dca3849a1920a884a9c7a429ceabbcb8 | 13,914 | py | Python | orio/module/loop/ast_lib/common_lib.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
] | 24 | 2015-01-26T03:14:19.000Z | 2021-09-27T23:10:12.000Z | orio/module/loop/ast_lib/common_lib.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
] | 30 | 2015-04-17T18:14:27.000Z | 2021-05-30T15:01:47.000Z | orio/module/loop/ast_lib/common_lib.py | parsabee/Orio | 64de0f7ee422483b60a9f02793472e20e864aa08 | [
"MIT"
] | 20 | 2015-02-11T08:20:19.000Z | 2022-01-15T17:55:00.000Z | #
# Contain a class that provides a set of common library functions for AST processing
#
import sys
import orio.module.loop.ast
import orio.main.util.globals as g
from functools import reduce
#-----------------------------------------------------------
class CommonLib:
'''A common library set for AST processing'''
def __init__(self):
'''To instantiate a common library object'''
pass
#-------------------------------------------------------
def replaceIdent(self, tnode, iname_from, iname_to):
'''Replace the names of all matching identifiers with the given name'''
if isinstance(tnode, orio.module.loop.ast.NumLitExp):
return tnode
elif isinstance(tnode, orio.module.loop.ast.StringLitExp):
return tnode
elif isinstance(tnode, orio.module.loop.ast.IdentExp):
if tnode.name == iname_from:
tnode.name = iname_to
return tnode
elif isinstance(tnode, orio.module.loop.ast.ArrayRefExp):
tnode.exp = self.replaceIdent(tnode.exp, iname_from, iname_to)
tnode.sub_exp = self.replaceIdent(tnode.sub_exp, iname_from, iname_to)
return tnode
elif isinstance(tnode, orio.module.loop.ast.FunCallExp):
tnode.exp = self.replaceIdent(tnode.exp, iname_from, iname_to)
tnode.args = [self.replaceIdent(a, iname_from, iname_to) for a in tnode.args]
return tnode
elif isinstance(tnode, orio.module.loop.ast.UnaryExp):
tnode.exp = self.replaceIdent(tnode.exp, iname_from, iname_to)
return tnode
elif isinstance(tnode, orio.module.loop.ast.BinOpExp):
tnode.lhs = self.replaceIdent(tnode.lhs, iname_from, iname_to)
tnode.rhs = self.replaceIdent(tnode.rhs, iname_from, iname_to)
return tnode
elif isinstance(tnode, orio.module.loop.ast.ParenthExp):
tnode.exp = self.replaceIdent(tnode.exp, iname_from, iname_to)
return tnode
elif isinstance(tnode, orio.module.loop.ast.ExpStmt):
if tnode.exp:
tnode.exp = self.replaceIdent(tnode.exp, iname_from, iname_to)
return tnode
elif isinstance(tnode, orio.module.loop.ast.CompStmt):
tnode.stmts = [self.replaceIdent(s, iname_from, iname_to) for s in tnode.stmts]
return tnode
elif isinstance(tnode, orio.module.loop.ast.IfStmt):
tnode.test = self.replaceIdent(tnode.test, iname_from, iname_to)
tnode.true_stmt = self.replaceIdent(tnode.true_stmt, iname_from, iname_to)
if tnode.false_stmt:
tnode.false_stmt = self.replaceIdent(tnode.false_stmt, iname_from, iname_to)
return tnode
elif isinstance(tnode, orio.module.loop.ast.ForStmt):
if tnode.init:
tnode.init = self.replaceIdent(tnode.init, iname_from, iname_to)
if tnode.test:
tnode.test = self.replaceIdent(tnode.test, iname_from, iname_to)
if tnode.iter:
tnode.iter = self.replaceIdent(tnode.iter, iname_from, iname_to)
tnode.stmt = self.replaceIdent(tnode.stmt, iname_from, iname_to)
return tnode
elif isinstance(tnode, orio.module.loop.ast.TransformStmt):
g.err('orio.module.loop.ast_lib.common_lib internal error: unexpected AST type: "%s"' % tnode.__class__.__name__)
elif isinstance(tnode, orio.module.loop.ast.NewAST):
return tnode
elif isinstance(tnode, orio.module.loop.ast.Comment):
return tnode
else:
g.err('orio.module.loop.ast_lib.common_lib internal error: unexpected AST type: "%s"' % tnode.__class__.__name__)
#-------------------------------------------------------
def containIdentName(self, exp, iname):
'''
Check if the given expression contains an identifier whose name matches to the given name
'''
if exp == None:
return False
if isinstance(exp, orio.module.loop.ast.NumLitExp):
return False
elif isinstance(exp, orio.module.loop.ast.StringLitExp):
return False
elif isinstance(exp, orio.module.loop.ast.IdentExp):
return exp.name == iname
elif isinstance(exp, orio.module.loop.ast.ArrayRefExp):
return self.containIdentName(exp.exp, iname) or self.containIdentName(exp.sub_exp, iname)
elif isinstance(exp, orio.module.loop.ast.FunCallExp):
has_match = reduce(lambda x,y: x or y,
[self.containIdentName(a, iname) for a in exp.args],
False)
return self.containIdentName(exp.exp, iname) or has_match
elif isinstance(exp, orio.module.loop.ast.UnaryExp):
return self.containIdentName(exp.exp, iname)
elif isinstance(exp, orio.module.loop.ast.BinOpExp):
return self.containIdentName(exp.lhs, iname) or self.containIdentName(exp.rhs, iname)
elif isinstance(exp, orio.module.loop.ast.ParenthExp):
return self.containIdentName(exp.exp, iname)
elif isinstance(exp, orio.module.loop.ast.NewAST):
return False
elif isinstance(exp, orio.module.loop.ast.Comment):
return False
else:
g.err('orio.module.loop.ast_lib.common_lib internal error: unexpected AST type: "%s"' % exp.__class__.__name__)
#-------------------------------------------------------
def isComplexExp(self, exp):
'''
To determine if the given expression is complex. Simple expressions contain only a variable
or a number or a string.
'''
if isinstance(exp, orio.module.loop.ast.NumLitExp):
return False
# a rare case
elif isinstance(exp, orio.module.loop.ast.StringLitExp):
return False
elif isinstance(exp, orio.module.loop.ast.IdentExp):
return False
# a rare case
elif isinstance(exp, orio.module.loop.ast.ArrayRefExp):
return True
elif isinstance(exp, orio.module.loop.ast.FunCallExp):
return True
elif isinstance(exp, orio.module.loop.ast.UnaryExp):
return self.isComplexExp(exp.exp)
elif isinstance(exp, orio.module.loop.ast.BinOpExp):
return True
elif isinstance(exp, orio.module.loop.ast.ParenthExp):
return self.isComplexExp(exp.exp)
# a rare case
elif isinstance(exp, orio.module.loop.ast.NewAST):
return True
elif isinstance(exp, orio.module.loop.ast.Comment):
return False
else:
g.err('orio.module.loop.ast_lib.common_lib internal error: unexpected AST type: "%s"' % exp.__class__.__name__)
#------------------------------------------------------------------------------------------------------------------
def collectNode(self, f, n):
''' Collect within the given node a list using the given function: pre-order traversal. '''
if isinstance(n, orio.module.loop.ast.NumLitExp):
return f(n)
elif isinstance(n, orio.module.loop.ast.StringLitExp):
return f(n)
elif isinstance(n, orio.module.loop.ast.IdentExp):
return f(n)
elif isinstance(n, orio.module.loop.ast.ArrayRefExp):
return f(n) + self.collectNode(f, n.exp) + self.collectNode(f, n.sub_exp)
elif isinstance(n, orio.module.loop.ast.FunCallExp):
return reduce(lambda x,y: x + y,
[self.collectNode(f, a) for a in n.args],
f(n))
elif isinstance(n, orio.module.loop.ast.CastExpr):
return f(n) + self.collectNode(f, n.expr)
elif isinstance(n, orio.module.loop.ast.UnaryExp):
return f(n) + self.collectNode(f, n.exp)
elif isinstance(n, orio.module.loop.ast.BinOpExp):
return f(n) + self.collectNode(f, n.lhs) + self.collectNode(f, n.rhs)
elif isinstance(n, orio.module.loop.ast.ParenthExp):
return f(n) + self.collectNode(f, n.exp)
elif isinstance(n, orio.module.loop.ast.Comment):
return f(n) + self.collectNode(f, n.text)
elif isinstance(n, orio.module.loop.ast.ExpStmt):
return f(n) + self.collectNode(f, n.exp)
elif isinstance(n, orio.module.loop.ast.GotoStmt):
return f(n) + self.collectNode(f, n.target)
elif isinstance(n, orio.module.loop.ast.VarDecl):
return f(n)
elif isinstance(n, orio.module.loop.ast.VarDeclInit):
return f(n)
elif isinstance(n, orio.module.loop.ast.CompStmt):
return reduce(lambda x,y: x + y,
[self.collectNode(f, a) for a in n.stmts],
f(n))
elif isinstance(n, orio.module.loop.ast.IfStmt):
result = self.collectNode(f, n.test) + self.collectNode(f, n.true_stmt)
if n.false_stmt:
result += self.collectNode(f, n.false_stmt)
return result
elif isinstance(n, orio.module.loop.ast.ForStmt):
result = []
if n.init:
result += self.collectNode(f, n.init)
if n.test:
result += self.collectNode(f, n.test)
if n.iter:
result += self.collectNode(f, n.iter)
result += self.collectNode(f, n.stmt)
return result
elif isinstance(n, orio.module.loop.ast.AssignStmt):
return f(n) + self.collectNode(f, n.var) + self.collectNode(f, n.exp)
elif isinstance(n, orio.module.loop.ast.TransformStmt):
return f(n) + self.collectNode(f, n.name) + self.collectNode(f, n.args) + self.collectNode(f, n.stmt)
else:
g.err('orio.module.loop.ast_lib.common_lib.collectNode: unexpected AST type: "%s"' % n.__class__.__name__)
#-------------------------------------------------------
def rewriteNode(self, r, n):
''' Rewrite the given node with the given rewrite function: post-order traversal, in-place update. '''
if isinstance(n, orio.module.loop.ast.NumLitExp):
return r(n)
elif isinstance(n, orio.module.loop.ast.StringLitExp):
return r(n)
elif isinstance(n, orio.module.loop.ast.IdentExp):
return r(n)
elif isinstance(n, orio.module.loop.ast.VarDecl):
return r(n)
elif isinstance(n, orio.module.loop.ast.ArrayRefExp):
n.exp = self.rewriteNode(r, n.exp)
n.sub_exp = self.rewriteNode(r, n.sub_exp)
return r(n)
elif isinstance(n, orio.module.loop.ast.FunCallExp):
n.exp = self.rewriteNode(r, n.exp)
n.args = [self.rewriteNode(r, x) for x in n.args]
return r(n)
elif isinstance(n, orio.module.loop.ast.UnaryExp):
n.exp = self.rewriteNode(r, n.exp)
return r(n)
elif isinstance(n, orio.module.loop.ast.BinOpExp):
n.lhs = self.rewriteNode(r, n.lhs)
n.rhs = self.rewriteNode(r, n.rhs)
return r(n)
elif isinstance(n, orio.module.loop.ast.ParenthExp):
n.exp = self.rewriteNode(r, n.exp)
return r(n)
elif isinstance(n, orio.module.loop.ast.Comment):
n.text = self.rewriteNode(r, n.text)
return r(n)
elif isinstance(n, orio.module.loop.ast.ExpStmt):
n.exp = self.rewriteNode(r, n.exp)
return r(n)
elif isinstance(n, orio.module.loop.ast.GotoStmt):
n.target = self.rewriteNode(r, n.target)
return r(n)
elif isinstance(n, orio.module.loop.ast.CompStmt):
n.stmts = [self.rewriteNode(r, x) for x in n.stmts]
return r(n)
elif isinstance(n, orio.module.loop.ast.IfStmt):
n.test = self.rewriteNode(r, n.test)
n.true_stmt = self.rewriteNode(r, n.true_stmt)
if n.false_stmt:
n.false_stmt = self.rewriteNode(r, n.false_stmt)
return r(n)
elif isinstance(n, orio.module.loop.ast.ForStmt):
if n.init:
n.init = self.rewriteNode(r, n.init)
if n.test:
n.test = self.rewriteNode(r, n.test)
if n.iter:
n.iter = self.rewriteNode(r, n.iter)
n.stmt = self.rewriteNode(r, n.stmt)
return r(n)
elif isinstance(n, orio.module.loop.ast.AssignStmt):
n.var = self.rewriteNode(r, n.var)
n.exp = self.rewriteNode(r, n.exp)
return r(n)
elif isinstance(n, orio.module.loop.ast.TransformStmt):
n.name = self.rewriteNode(r, n.name)
n.args = self.rewriteNode(r, n.args)
n.stmt = self.rewriteNode(r, n.stmt)
return r(n)
else:
g.err('orio.module.loop.ast_lib.common_lib.rewriteNode: unexpected AST type: "%s"' % n.__class__.__name__)
#-----------------------------------------------------------------------------------------------------------------------
| 39.416431 | 126 | 0.549662 |
10f7cca5047346f34ad6d0cbb3a98441a903e025 | 65,877 | py | Python | Lib/test/test_coroutines.py | 6r9/cpython | 69524821a87251b7aee966f6e46b3810ff5aaa64 | [
"PSF-2.0"
] | 2 | 2019-01-04T23:21:29.000Z | 2020-02-26T23:30:31.000Z | Lib/test/test_coroutines.py | 6r9/cpython | 69524821a87251b7aee966f6e46b3810ff5aaa64 | [
"PSF-2.0"
] | null | null | null | Lib/test/test_coroutines.py | 6r9/cpython | 69524821a87251b7aee966f6e46b3810ff5aaa64 | [
"PSF-2.0"
] | 2 | 2020-12-03T15:10:21.000Z | 2022-02-05T17:12:11.000Z | import contextlib
import copy
import inspect
import pickle
import re
import sys
import types
import unittest
import warnings
from test import support
from test.support.script_helper import assert_python_ok
class AsyncYieldFrom:
def __init__(self, obj):
self.obj = obj
def __await__(self):
yield from self.obj
class AsyncYield:
def __init__(self, value):
self.value = value
def __await__(self):
yield self.value
def run_async(coro):
assert coro.__class__ in {types.GeneratorType, types.CoroutineType}
buffer = []
result = None
while True:
try:
buffer.append(coro.send(None))
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return buffer, result
def run_async__await__(coro):
assert coro.__class__ is types.CoroutineType
aw = coro.__await__()
buffer = []
result = None
i = 0
while True:
try:
if i % 2:
buffer.append(next(aw))
else:
buffer.append(aw.send(None))
i += 1
except StopIteration as ex:
result = ex.args[0] if ex.args else None
break
return buffer, result
@contextlib.contextmanager
def silence_coro_gc():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield
support.gc_collect()
class AsyncBadSyntaxTest(unittest.TestCase):
def test_badsyntax_1(self):
samples = [
"""def foo():
await something()
""",
"""await something()""",
"""async def foo():
yield from []
""",
"""async def foo():
await await fut
""",
"""async def foo(a=await something()):
pass
""",
"""async def foo(a:await something()):
pass
""",
"""async def foo():
def bar():
[i async for i in els]
""",
"""async def foo():
def bar():
[await i for i in els]
""",
"""async def foo():
def bar():
[i for i in els
async for b in els]
""",
"""async def foo():
def bar():
[i for i in els
for c in b
async for b in els]
""",
"""async def foo():
def bar():
[i for i in els
async for b in els
for c in b]
""",
"""async def foo():
def bar():
[i for i in els
for b in await els]
""",
"""async def foo():
def bar():
[i for i in els
for b in els
if await b]
""",
"""async def foo():
def bar():
[i for i in await els]
""",
"""async def foo():
def bar():
[i for i in els if await i]
""",
"""def bar():
[i async for i in els]
""",
"""def bar():
{i: i async for i in els}
""",
"""def bar():
{i async for i in els}
""",
"""def bar():
[await i for i in els]
""",
"""def bar():
[i for i in els
async for b in els]
""",
"""def bar():
[i for i in els
for c in b
async for b in els]
""",
"""def bar():
[i for i in els
async for b in els
for c in b]
""",
"""def bar():
[i for i in els
for b in await els]
""",
"""def bar():
[i for i in els
for b in els
if await b]
""",
"""def bar():
[i for i in await els]
""",
"""def bar():
[i for i in els if await i]
""",
"""async def foo():
await
""",
"""async def foo():
def bar(): pass
await = 1
""",
"""async def foo():
def bar(): pass
await = 1
""",
"""async def foo():
def bar(): pass
if 1:
await = 1
""",
"""def foo():
async def bar(): pass
if 1:
await a
""",
"""def foo():
async def bar(): pass
await a
""",
"""def foo():
def baz(): pass
async def bar(): pass
await a
""",
"""def foo():
def baz(): pass
# 456
async def bar(): pass
# 123
await a
""",
"""async def foo():
def baz(): pass
# 456
async def bar(): pass
# 123
await = 2
""",
"""def foo():
def baz(): pass
async def bar(): pass
await a
""",
"""async def foo():
def baz(): pass
async def bar(): pass
await = 2
""",
"""async def foo():
def async(): pass
""",
"""async def foo():
def await(): pass
""",
"""async def foo():
def bar():
await
""",
"""async def foo():
return lambda async: await
""",
"""async def foo():
return lambda a: await
""",
"""await a()""",
"""async def foo(a=await b):
pass
""",
"""async def foo(a:await b):
pass
""",
"""def baz():
async def foo(a=await b):
pass
""",
"""async def foo(async):
pass
""",
"""async def foo():
def bar():
def baz():
async = 1
""",
"""async def foo():
def bar():
def baz():
pass
async = 1
""",
"""def foo():
async def bar():
async def baz():
pass
def baz():
42
async = 1
""",
"""async def foo():
def bar():
def baz():
pass\nawait foo()
""",
"""def foo():
def bar():
async def baz():
pass\nawait foo()
""",
"""async def foo(await):
pass
""",
"""def foo():
async def bar(): pass
await a
""",
"""def foo():
async def bar():
pass\nawait a
"""]
for code in samples:
with self.subTest(code=code), self.assertRaises(SyntaxError):
compile(code, "<test>", "exec")
def test_badsyntax_2(self):
samples = [
"""def foo():
await = 1
""",
"""class Bar:
def async(): pass
""",
"""class Bar:
async = 1
""",
"""class async:
pass
""",
"""class await:
pass
""",
"""import math as await""",
"""def async():
pass""",
"""def foo(*, await=1):
pass"""
"""async = 1""",
"""print(await=1)"""
]
for code in samples:
with self.subTest(code=code), self.assertRaises(SyntaxError):
compile(code, "<test>", "exec")
def test_badsyntax_3(self):
with self.assertRaises(SyntaxError):
compile("async = 1", "<test>", "exec")
def test_badsyntax_4(self):
samples = [
'''def foo(await):
async def foo(): pass
async def foo():
pass
return await + 1
''',
'''def foo(await):
async def foo(): pass
async def foo(): pass
return await + 1
''',
'''def foo(await):
async def foo(): pass
async def foo(): pass
return await + 1
''',
'''def foo(await):
"""spam"""
async def foo(): \
pass
# 123
async def foo(): pass
# 456
return await + 1
''',
'''def foo(await):
def foo(): pass
def foo(): pass
async def bar(): return await_
await_ = await
try:
bar().send(None)
except StopIteration as ex:
return ex.args[0] + 1
'''
]
for code in samples:
with self.subTest(code=code), self.assertRaises(SyntaxError):
compile(code, "<test>", "exec")
class TokenizerRegrTest(unittest.TestCase):
def test_oneline_defs(self):
buf = []
for i in range(500):
buf.append('def i{i}(): return {i}'.format(i=i))
buf = '\n'.join(buf)
# Test that 500 consequent, one-line defs is OK
ns = {}
exec(buf, ns, ns)
self.assertEqual(ns['i499'](), 499)
# Test that 500 consequent, one-line defs *and*
# one 'async def' following them is OK
buf += '\nasync def foo():\n return'
ns = {}
exec(buf, ns, ns)
self.assertEqual(ns['i499'](), 499)
self.assertTrue(inspect.iscoroutinefunction(ns['foo']))
class CoroutineTest(unittest.TestCase):
def test_gen_1(self):
def gen(): yield
self.assertFalse(hasattr(gen, '__await__'))
def test_func_1(self):
async def foo():
return 10
f = foo()
self.assertIsInstance(f, types.CoroutineType)
self.assertTrue(bool(foo.__code__.co_flags & inspect.CO_COROUTINE))
self.assertFalse(bool(foo.__code__.co_flags & inspect.CO_GENERATOR))
self.assertTrue(bool(f.cr_code.co_flags & inspect.CO_COROUTINE))
self.assertFalse(bool(f.cr_code.co_flags & inspect.CO_GENERATOR))
self.assertEqual(run_async(f), ([], 10))
self.assertEqual(run_async__await__(foo()), ([], 10))
def bar(): pass
self.assertFalse(bool(bar.__code__.co_flags & inspect.CO_COROUTINE))
def test_func_2(self):
async def foo():
raise StopIteration
with self.assertRaisesRegex(
RuntimeError, "coroutine raised StopIteration"):
run_async(foo())
def test_func_3(self):
async def foo():
raise StopIteration
coro = foo()
self.assertRegex(repr(coro), '^<coroutine object.* at 0x.*>$')
coro.close()
def test_func_4(self):
async def foo():
raise StopIteration
coro = foo()
check = lambda: self.assertRaisesRegex(
TypeError, "'coroutine' object is not iterable")
with check():
list(coro)
with check():
tuple(coro)
with check():
sum(coro)
with check():
iter(coro)
with check():
for i in coro:
pass
with check():
[i for i in coro]
coro.close()
def test_func_5(self):
@types.coroutine
def bar():
yield 1
async def foo():
await bar()
check = lambda: self.assertRaisesRegex(
TypeError, "'coroutine' object is not iterable")
coro = foo()
with check():
for el in coro:
pass
coro.close()
# the following should pass without an error
for el in bar():
self.assertEqual(el, 1)
self.assertEqual([el for el in bar()], [1])
self.assertEqual(tuple(bar()), (1,))
self.assertEqual(next(iter(bar())), 1)
def test_func_6(self):
@types.coroutine
def bar():
yield 1
yield 2
async def foo():
await bar()
f = foo()
self.assertEqual(f.send(None), 1)
self.assertEqual(f.send(None), 2)
with self.assertRaises(StopIteration):
f.send(None)
def test_func_7(self):
async def bar():
return 10
coro = bar()
def foo():
yield from coro
with self.assertRaisesRegex(
TypeError,
"cannot 'yield from' a coroutine object in "
"a non-coroutine generator"):
list(foo())
coro.close()
def test_func_8(self):
@types.coroutine
def bar():
return (yield from coro)
async def foo():
return 'spam'
coro = foo()
self.assertEqual(run_async(bar()), ([], 'spam'))
coro.close()
def test_func_9(self):
async def foo():
pass
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine '.*test_func_9.*foo' was never awaited"):
foo()
support.gc_collect()
with self.assertWarnsRegex(
RuntimeWarning,
r"coroutine '.*test_func_9.*foo' was never awaited"):
with self.assertRaises(TypeError):
# See bpo-32703.
for _ in foo():
pass
support.gc_collect()
def test_func_10(self):
N = 0
@types.coroutine
def gen():
nonlocal N
try:
a = yield
yield (a ** 2)
except ZeroDivisionError:
N += 100
raise
finally:
N += 1
async def foo():
await gen()
coro = foo()
aw = coro.__await__()
self.assertIs(aw, iter(aw))
next(aw)
self.assertEqual(aw.send(10), 100)
self.assertEqual(N, 0)
aw.close()
self.assertEqual(N, 1)
coro = foo()
aw = coro.__await__()
next(aw)
with self.assertRaises(ZeroDivisionError):
aw.throw(ZeroDivisionError, None, None)
self.assertEqual(N, 102)
def test_func_11(self):
async def func(): pass
coro = func()
# Test that PyCoro_Type and _PyCoroWrapper_Type types were properly
# initialized
self.assertIn('__await__', dir(coro))
self.assertIn('__iter__', dir(coro.__await__()))
self.assertIn('coroutine_wrapper', repr(coro.__await__()))
coro.close() # avoid RuntimeWarning
def test_func_12(self):
async def g():
i = me.send(None)
await foo
me = g()
with self.assertRaisesRegex(ValueError,
"coroutine already executing"):
me.send(None)
def test_func_13(self):
async def g():
pass
coro = g()
with self.assertRaisesRegex(
TypeError,
"can't send non-None value to a just-started coroutine"):
coro.send('spam')
coro.close()
def test_func_14(self):
@types.coroutine
def gen():
yield
async def coro():
try:
await gen()
except GeneratorExit:
await gen()
c = coro()
c.send(None)
with self.assertRaisesRegex(RuntimeError,
"coroutine ignored GeneratorExit"):
c.close()
def test_func_15(self):
# See http://bugs.python.org/issue25887 for details
async def spammer():
return 'spam'
async def reader(coro):
return await coro
spammer_coro = spammer()
with self.assertRaisesRegex(StopIteration, 'spam'):
reader(spammer_coro).send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
reader(spammer_coro).send(None)
def test_func_16(self):
# See http://bugs.python.org/issue25887 for details
@types.coroutine
def nop():
yield
async def send():
await nop()
return 'spam'
async def read(coro):
await nop()
return await coro
spammer = send()
reader = read(spammer)
reader.send(None)
reader.send(None)
with self.assertRaisesRegex(Exception, 'ham'):
reader.throw(Exception('ham'))
reader = read(spammer)
reader.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
reader.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
reader.throw(Exception('wat'))
def test_func_17(self):
# See http://bugs.python.org/issue25887 for details
async def coroutine():
return 'spam'
coro = coroutine()
with self.assertRaisesRegex(StopIteration, 'spam'):
coro.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
coro.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
coro.throw(Exception('wat'))
# Closing a coroutine shouldn't raise any exception even if it's
# already closed/exhausted (similar to generators)
coro.close()
coro.close()
def test_func_18(self):
# See http://bugs.python.org/issue25887 for details
async def coroutine():
return 'spam'
coro = coroutine()
await_iter = coro.__await__()
it = iter(await_iter)
with self.assertRaisesRegex(StopIteration, 'spam'):
it.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
it.send(None)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
# Although the iterator protocol requires iterators to
# raise another StopIteration here, we don't want to do
# that. In this particular case, the iterator will raise
# a RuntimeError, so that 'yield from' and 'await'
# expressions will trigger the error, instead of silently
# ignoring the call.
next(it)
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
it.throw(Exception('wat'))
with self.assertRaisesRegex(RuntimeError,
'cannot reuse already awaited coroutine'):
it.throw(Exception('wat'))
# Closing a coroutine shouldn't raise any exception even if it's
# already closed/exhausted (similar to generators)
it.close()
it.close()
def test_func_19(self):
CHK = 0
@types.coroutine
def foo():
nonlocal CHK
yield
try:
yield
except GeneratorExit:
CHK += 1
async def coroutine():
await foo()
coro = coroutine()
coro.send(None)
coro.send(None)
self.assertEqual(CHK, 0)
coro.close()
self.assertEqual(CHK, 1)
for _ in range(3):
# Closing a coroutine shouldn't raise any exception even if it's
# already closed/exhausted (similar to generators)
coro.close()
self.assertEqual(CHK, 1)
def test_coro_wrapper_send_tuple(self):
async def foo():
return (10,)
result = run_async__await__(foo())
self.assertEqual(result, ([], (10,)))
def test_coro_wrapper_send_stop_iterator(self):
async def foo():
return StopIteration(10)
result = run_async__await__(foo())
self.assertIsInstance(result[1], StopIteration)
self.assertEqual(result[1].value, 10)
def test_cr_await(self):
@types.coroutine
def a():
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_RUNNING)
self.assertIsNone(coro_b.cr_await)
yield
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_RUNNING)
self.assertIsNone(coro_b.cr_await)
async def c():
await a()
async def b():
self.assertIsNone(coro_b.cr_await)
await c()
self.assertIsNone(coro_b.cr_await)
coro_b = b()
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_CREATED)
self.assertIsNone(coro_b.cr_await)
coro_b.send(None)
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_SUSPENDED)
self.assertEqual(coro_b.cr_await.cr_await.gi_code.co_name, 'a')
with self.assertRaises(StopIteration):
coro_b.send(None) # complete coroutine
self.assertEqual(inspect.getcoroutinestate(coro_b), inspect.CORO_CLOSED)
self.assertIsNone(coro_b.cr_await)
def test_corotype_1(self):
ct = types.CoroutineType
self.assertIn('into coroutine', ct.send.__doc__)
self.assertIn('inside coroutine', ct.close.__doc__)
self.assertIn('in coroutine', ct.throw.__doc__)
self.assertIn('of the coroutine', ct.__dict__['__name__'].__doc__)
self.assertIn('of the coroutine', ct.__dict__['__qualname__'].__doc__)
self.assertEqual(ct.__name__, 'coroutine')
async def f(): pass
c = f()
self.assertIn('coroutine object', repr(c))
c.close()
def test_await_1(self):
async def foo():
await 1
with self.assertRaisesRegex(TypeError, "object int can.t.*await"):
run_async(foo())
def test_await_2(self):
async def foo():
await []
with self.assertRaisesRegex(TypeError, "object list can.t.*await"):
run_async(foo())
def test_await_3(self):
async def foo():
await AsyncYieldFrom([1, 2, 3])
self.assertEqual(run_async(foo()), ([1, 2, 3], None))
self.assertEqual(run_async__await__(foo()), ([1, 2, 3], None))
def test_await_4(self):
async def bar():
return 42
async def foo():
return await bar()
self.assertEqual(run_async(foo()), ([], 42))
def test_await_5(self):
class Awaitable:
def __await__(self):
return
async def foo():
return (await Awaitable())
with self.assertRaisesRegex(
TypeError, "__await__.*returned non-iterator of type"):
run_async(foo())
def test_await_6(self):
class Awaitable:
def __await__(self):
return iter([52])
async def foo():
return (await Awaitable())
self.assertEqual(run_async(foo()), ([52], None))
def test_await_7(self):
class Awaitable:
def __await__(self):
yield 42
return 100
async def foo():
return (await Awaitable())
self.assertEqual(run_async(foo()), ([42], 100))
def test_await_8(self):
class Awaitable:
pass
async def foo(): return await Awaitable()
with self.assertRaisesRegex(
TypeError, "object Awaitable can't be used in 'await' expression"):
run_async(foo())
def test_await_9(self):
def wrap():
return bar
async def bar():
return 42
async def foo():
db = {'b': lambda: wrap}
class DB:
b = wrap
return (await bar() + await wrap()() + await db['b']()()() +
await bar() * 1000 + await DB.b()())
async def foo2():
return -await bar()
self.assertEqual(run_async(foo()), ([], 42168))
self.assertEqual(run_async(foo2()), ([], -42))
def test_await_10(self):
async def baz():
return 42
async def bar():
return baz()
async def foo():
return await (await bar())
self.assertEqual(run_async(foo()), ([], 42))
def test_await_11(self):
def ident(val):
return val
async def bar():
return 'spam'
async def foo():
return ident(val=await bar())
async def foo2():
return await bar(), 'ham'
self.assertEqual(run_async(foo2()), ([], ('spam', 'ham')))
def test_await_12(self):
async def coro():
return 'spam'
c = coro()
class Awaitable:
def __await__(self):
return c
async def foo():
return await Awaitable()
with self.assertRaisesRegex(
TypeError, r"__await__\(\) returned a coroutine"):
run_async(foo())
c.close()
def test_await_13(self):
class Awaitable:
def __await__(self):
return self
async def foo():
return await Awaitable()
with self.assertRaisesRegex(
TypeError, "__await__.*returned non-iterator of type"):
run_async(foo())
def test_await_14(self):
class Wrapper:
# Forces the interpreter to use CoroutineType.__await__
def __init__(self, coro):
assert coro.__class__ is types.CoroutineType
self.coro = coro
def __await__(self):
return self.coro.__await__()
class FutureLike:
def __await__(self):
return (yield)
class Marker(Exception):
pass
async def coro1():
try:
return await FutureLike()
except ZeroDivisionError:
raise Marker
async def coro2():
return await Wrapper(coro1())
c = coro2()
c.send(None)
with self.assertRaisesRegex(StopIteration, 'spam'):
c.send('spam')
c = coro2()
c.send(None)
with self.assertRaises(Marker):
c.throw(ZeroDivisionError)
def test_await_15(self):
@types.coroutine
def nop():
yield
async def coroutine():
await nop()
async def waiter(coro):
await coro
coro = coroutine()
coro.send(None)
with self.assertRaisesRegex(RuntimeError,
"coroutine is being awaited already"):
waiter(coro).send(None)
def test_await_16(self):
# See https://bugs.python.org/issue29600 for details.
async def f():
return ValueError()
async def g():
try:
raise KeyError
except:
return await f()
_, result = run_async(g())
self.assertIsNone(result.__context__)
def test_with_1(self):
class Manager:
def __init__(self, name):
self.name = name
async def __aenter__(self):
await AsyncYieldFrom(['enter-1-' + self.name,
'enter-2-' + self.name])
return self
async def __aexit__(self, *args):
await AsyncYieldFrom(['exit-1-' + self.name,
'exit-2-' + self.name])
if self.name == 'B':
return True
async def foo():
async with Manager("A") as a, Manager("B") as b:
await AsyncYieldFrom([('managers', a.name, b.name)])
1/0
f = foo()
result, _ = run_async(f)
self.assertEqual(
result, ['enter-1-A', 'enter-2-A', 'enter-1-B', 'enter-2-B',
('managers', 'A', 'B'),
'exit-1-B', 'exit-2-B', 'exit-1-A', 'exit-2-A']
)
async def foo():
async with Manager("A") as a, Manager("C") as c:
await AsyncYieldFrom([('managers', a.name, c.name)])
1/0
with self.assertRaises(ZeroDivisionError):
run_async(foo())
def test_with_2(self):
class CM:
def __aenter__(self):
pass
async def foo():
async with CM():
pass
with self.assertRaisesRegex(AttributeError, '__aexit__'):
run_async(foo())
def test_with_3(self):
class CM:
def __aexit__(self):
pass
async def foo():
async with CM():
pass
with self.assertRaisesRegex(AttributeError, '__aenter__'):
run_async(foo())
def test_with_4(self):
class CM:
def __enter__(self):
pass
def __exit__(self):
pass
async def foo():
async with CM():
pass
with self.assertRaisesRegex(AttributeError, '__aexit__'):
run_async(foo())
def test_with_5(self):
# While this test doesn't make a lot of sense,
# it's a regression test for an early bug with opcodes
# generation
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *exc):
pass
async def func():
async with CM():
assert (1, ) == 1
with self.assertRaises(AssertionError):
run_async(func())
def test_with_6(self):
class CM:
def __aenter__(self):
return 123
def __aexit__(self, *e):
return 456
async def foo():
async with CM():
pass
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aenter__ "
"that does not implement __await__: int"):
# it's important that __aexit__ wasn't called
run_async(foo())
def test_with_7(self):
class CM:
async def __aenter__(self):
return self
def __aexit__(self, *e):
return 444
# Exit with exception
async def foo():
async with CM():
1/0
try:
run_async(foo())
except TypeError as exc:
self.assertRegex(
exc.args[0],
"'async with' received an object from __aexit__ "
"that does not implement __await__: int")
self.assertTrue(exc.__context__ is not None)
self.assertTrue(isinstance(exc.__context__, ZeroDivisionError))
else:
self.fail('invalid asynchronous context manager did not fail')
def test_with_8(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
def __aexit__(self, *e):
return 456
# Normal exit
async def foo():
nonlocal CNT
async with CM():
CNT += 1
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aexit__ "
"that does not implement __await__: int"):
run_async(foo())
self.assertEqual(CNT, 1)
# Exit with 'break'
async def foo():
nonlocal CNT
for i in range(2):
async with CM():
CNT += 1
break
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aexit__ "
"that does not implement __await__: int"):
run_async(foo())
self.assertEqual(CNT, 2)
# Exit with 'continue'
async def foo():
nonlocal CNT
for i in range(2):
async with CM():
CNT += 1
continue
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aexit__ "
"that does not implement __await__: int"):
run_async(foo())
self.assertEqual(CNT, 3)
# Exit with 'return'
async def foo():
nonlocal CNT
async with CM():
CNT += 1
return
with self.assertRaisesRegex(
TypeError,
"'async with' received an object from __aexit__ "
"that does not implement __await__: int"):
run_async(foo())
self.assertEqual(CNT, 4)
def test_with_9(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *e):
1/0
async def foo():
nonlocal CNT
async with CM():
CNT += 1
with self.assertRaises(ZeroDivisionError):
run_async(foo())
self.assertEqual(CNT, 1)
def test_with_10(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *e):
1/0
async def foo():
nonlocal CNT
async with CM():
async with CM():
raise RuntimeError
try:
run_async(foo())
except ZeroDivisionError as exc:
self.assertTrue(exc.__context__ is not None)
self.assertTrue(isinstance(exc.__context__, ZeroDivisionError))
self.assertTrue(isinstance(exc.__context__.__context__,
RuntimeError))
else:
self.fail('exception from __aexit__ did not propagate')
def test_with_11(self):
CNT = 0
class CM:
async def __aenter__(self):
raise NotImplementedError
async def __aexit__(self, *e):
1/0
async def foo():
nonlocal CNT
async with CM():
raise RuntimeError
try:
run_async(foo())
except NotImplementedError as exc:
self.assertTrue(exc.__context__ is None)
else:
self.fail('exception from __aenter__ did not propagate')
def test_with_12(self):
CNT = 0
class CM:
async def __aenter__(self):
return self
async def __aexit__(self, *e):
return True
async def foo():
nonlocal CNT
async with CM() as cm:
self.assertIs(cm.__class__, CM)
raise RuntimeError
run_async(foo())
def test_with_13(self):
CNT = 0
class CM:
async def __aenter__(self):
1/0
async def __aexit__(self, *e):
return True
async def foo():
nonlocal CNT
CNT += 1
async with CM():
CNT += 1000
CNT += 10000
with self.assertRaises(ZeroDivisionError):
run_async(foo())
self.assertEqual(CNT, 1)
def test_for_1(self):
aiter_calls = 0
class AsyncIter:
def __init__(self):
self.i = 0
def __aiter__(self):
nonlocal aiter_calls
aiter_calls += 1
return self
async def __anext__(self):
self.i += 1
if not (self.i % 10):
await AsyncYield(self.i * 10)
if self.i > 100:
raise StopAsyncIteration
return self.i, self.i
buffer = []
async def test1():
async for i1, i2 in AsyncIter():
buffer.append(i1 + i2)
yielded, _ = run_async(test1())
# Make sure that __aiter__ was called only once
self.assertEqual(aiter_calls, 1)
self.assertEqual(yielded, [i * 100 for i in range(1, 11)])
self.assertEqual(buffer, [i*2 for i in range(1, 101)])
buffer = []
async def test2():
nonlocal buffer
async for i in AsyncIter():
buffer.append(i[0])
if i[0] == 20:
break
else:
buffer.append('what?')
buffer.append('end')
yielded, _ = run_async(test2())
# Make sure that __aiter__ was called only once
self.assertEqual(aiter_calls, 2)
self.assertEqual(yielded, [100, 200])
self.assertEqual(buffer, [i for i in range(1, 21)] + ['end'])
buffer = []
async def test3():
nonlocal buffer
async for i in AsyncIter():
if i[0] > 20:
continue
buffer.append(i[0])
else:
buffer.append('what?')
buffer.append('end')
yielded, _ = run_async(test3())
# Make sure that __aiter__ was called only once
self.assertEqual(aiter_calls, 3)
self.assertEqual(yielded, [i * 100 for i in range(1, 11)])
self.assertEqual(buffer, [i for i in range(1, 21)] +
['what?', 'end'])
def test_for_2(self):
tup = (1, 2, 3)
refs_before = sys.getrefcount(tup)
async def foo():
async for i in tup:
print('never going to happen')
with self.assertRaisesRegex(
TypeError, "async for' requires an object.*__aiter__.*tuple"):
run_async(foo())
self.assertEqual(sys.getrefcount(tup), refs_before)
def test_for_3(self):
class I:
def __aiter__(self):
return self
aiter = I()
refs_before = sys.getrefcount(aiter)
async def foo():
async for i in aiter:
print('never going to happen')
with self.assertRaisesRegex(
TypeError,
r"that does not implement __anext__"):
run_async(foo())
self.assertEqual(sys.getrefcount(aiter), refs_before)
def test_for_4(self):
class I:
def __aiter__(self):
return self
def __anext__(self):
return ()
aiter = I()
refs_before = sys.getrefcount(aiter)
async def foo():
async for i in aiter:
print('never going to happen')
with self.assertRaisesRegex(
TypeError,
"async for' received an invalid object.*__anext__.*tuple"):
run_async(foo())
self.assertEqual(sys.getrefcount(aiter), refs_before)
def test_for_6(self):
I = 0
class Manager:
async def __aenter__(self):
nonlocal I
I += 10000
async def __aexit__(self, *args):
nonlocal I
I += 100000
class Iterable:
def __init__(self):
self.i = 0
def __aiter__(self):
return self
async def __anext__(self):
if self.i > 10:
raise StopAsyncIteration
self.i += 1
return self.i
##############
manager = Manager()
iterable = Iterable()
mrefs_before = sys.getrefcount(manager)
irefs_before = sys.getrefcount(iterable)
async def main():
nonlocal I
async with manager:
async for i in iterable:
I += 1
I += 1000
with warnings.catch_warnings():
warnings.simplefilter("error")
# Test that __aiter__ that returns an asynchronous iterator
# directly does not throw any warnings.
run_async(main())
self.assertEqual(I, 111011)
self.assertEqual(sys.getrefcount(manager), mrefs_before)
self.assertEqual(sys.getrefcount(iterable), irefs_before)
##############
async def main():
nonlocal I
async with Manager():
async for i in Iterable():
I += 1
I += 1000
async with Manager():
async for i in Iterable():
I += 1
I += 1000
run_async(main())
self.assertEqual(I, 333033)
##############
async def main():
nonlocal I
async with Manager():
I += 100
async for i in Iterable():
I += 1
else:
I += 10000000
I += 1000
async with Manager():
I += 100
async for i in Iterable():
I += 1
else:
I += 10000000
I += 1000
run_async(main())
self.assertEqual(I, 20555255)
def test_for_7(self):
CNT = 0
class AI:
def __aiter__(self):
1/0
async def foo():
nonlocal CNT
async for i in AI():
CNT += 1
CNT += 10
with self.assertRaises(ZeroDivisionError):
run_async(foo())
self.assertEqual(CNT, 0)
def test_for_8(self):
CNT = 0
class AI:
def __aiter__(self):
1/0
async def foo():
nonlocal CNT
async for i in AI():
CNT += 1
CNT += 10
with self.assertRaises(ZeroDivisionError):
with warnings.catch_warnings():
warnings.simplefilter("error")
# Test that if __aiter__ raises an exception it propagates
# without any kind of warning.
run_async(foo())
self.assertEqual(CNT, 0)
def test_for_11(self):
class F:
def __aiter__(self):
return self
def __anext__(self):
return self
def __await__(self):
1 / 0
async def main():
async for _ in F():
pass
with self.assertRaisesRegex(TypeError,
'an invalid object from __anext__') as c:
main().send(None)
err = c.exception
self.assertIsInstance(err.__cause__, ZeroDivisionError)
def test_for_tuple(self):
class Done(Exception): pass
class AIter(tuple):
i = 0
def __aiter__(self):
return self
async def __anext__(self):
if self.i >= len(self):
raise StopAsyncIteration
self.i += 1
return self[self.i - 1]
result = []
async def foo():
async for i in AIter([42]):
result.append(i)
raise Done
with self.assertRaises(Done):
foo().send(None)
self.assertEqual(result, [42])
def test_for_stop_iteration(self):
class Done(Exception): pass
class AIter(StopIteration):
i = 0
def __aiter__(self):
return self
async def __anext__(self):
if self.i:
raise StopAsyncIteration
self.i += 1
return self.value
result = []
async def foo():
async for i in AIter(42):
result.append(i)
raise Done
with self.assertRaises(Done):
foo().send(None)
self.assertEqual(result, [42])
def test_comp_1(self):
async def f(i):
return i
async def run_list():
return [await c for c in [f(1), f(41)]]
async def run_set():
return {await c for c in [f(1), f(41)]}
async def run_dict1():
return {await c: 'a' for c in [f(1), f(41)]}
async def run_dict2():
return {i: await c for i, c in enumerate([f(1), f(41)])}
self.assertEqual(run_async(run_list()), ([], [1, 41]))
self.assertEqual(run_async(run_set()), ([], {1, 41}))
self.assertEqual(run_async(run_dict1()), ([], {1: 'a', 41: 'a'}))
self.assertEqual(run_async(run_dict2()), ([], {0: 1, 1: 41}))
def test_comp_2(self):
async def f(i):
return i
async def run_list():
return [s for c in [f(''), f('abc'), f(''), f(['de', 'fg'])]
for s in await c]
self.assertEqual(
run_async(run_list()),
([], ['a', 'b', 'c', 'de', 'fg']))
async def run_set():
return {d
for c in [f([f([10, 30]),
f([20])])]
for s in await c
for d in await s}
self.assertEqual(
run_async(run_set()),
([], {10, 20, 30}))
async def run_set2():
return {await s
for c in [f([f(10), f(20)])]
for s in await c}
self.assertEqual(
run_async(run_set2()),
([], {10, 20}))
def test_comp_3(self):
async def f(it):
for i in it:
yield i
async def run_list():
return [i + 1 async for i in f([10, 20])]
self.assertEqual(
run_async(run_list()),
([], [11, 21]))
async def run_set():
return {i + 1 async for i in f([10, 20])}
self.assertEqual(
run_async(run_set()),
([], {11, 21}))
async def run_dict():
return {i + 1: i + 2 async for i in f([10, 20])}
self.assertEqual(
run_async(run_dict()),
([], {11: 12, 21: 22}))
async def run_gen():
gen = (i + 1 async for i in f([10, 20]))
return [g + 100 async for g in gen]
self.assertEqual(
run_async(run_gen()),
([], [111, 121]))
def test_comp_4(self):
async def f(it):
for i in it:
yield i
async def run_list():
return [i + 1 async for i in f([10, 20]) if i > 10]
self.assertEqual(
run_async(run_list()),
([], [21]))
async def run_set():
return {i + 1 async for i in f([10, 20]) if i > 10}
self.assertEqual(
run_async(run_set()),
([], {21}))
async def run_dict():
return {i + 1: i + 2 async for i in f([10, 20]) if i > 10}
self.assertEqual(
run_async(run_dict()),
([], {21: 22}))
async def run_gen():
gen = (i + 1 async for i in f([10, 20]) if i > 10)
return [g + 100 async for g in gen]
self.assertEqual(
run_async(run_gen()),
([], [121]))
def test_comp_4_2(self):
async def f(it):
for i in it:
yield i
async def run_list():
return [i + 10 async for i in f(range(5)) if 0 < i < 4]
self.assertEqual(
run_async(run_list()),
([], [11, 12, 13]))
async def run_set():
return {i + 10 async for i in f(range(5)) if 0 < i < 4}
self.assertEqual(
run_async(run_set()),
([], {11, 12, 13}))
async def run_dict():
return {i + 10: i + 100 async for i in f(range(5)) if 0 < i < 4}
self.assertEqual(
run_async(run_dict()),
([], {11: 101, 12: 102, 13: 103}))
async def run_gen():
gen = (i + 10 async for i in f(range(5)) if 0 < i < 4)
return [g + 100 async for g in gen]
self.assertEqual(
run_async(run_gen()),
([], [111, 112, 113]))
def test_comp_5(self):
async def f(it):
for i in it:
yield i
async def run_list():
return [i + 1 for pair in ([10, 20], [30, 40]) if pair[0] > 10
async for i in f(pair) if i > 30]
self.assertEqual(
run_async(run_list()),
([], [41]))
def test_comp_6(self):
async def f(it):
for i in it:
yield i
async def run_list():
return [i + 1 async for seq in f([(10, 20), (30,)])
for i in seq]
self.assertEqual(
run_async(run_list()),
([], [11, 21, 31]))
def test_comp_7(self):
async def f():
yield 1
yield 2
raise Exception('aaa')
async def run_list():
return [i async for i in f()]
with self.assertRaisesRegex(Exception, 'aaa'):
run_async(run_list())
def test_comp_8(self):
async def f():
return [i for i in [1, 2, 3]]
self.assertEqual(
run_async(f()),
([], [1, 2, 3]))
def test_comp_9(self):
async def gen():
yield 1
yield 2
async def f():
l = [i async for i in gen()]
return [i for i in l]
self.assertEqual(
run_async(f()),
([], [1, 2]))
def test_comp_10(self):
async def f():
xx = {i for i in [1, 2, 3]}
return {x: x for x in xx}
self.assertEqual(
run_async(f()),
([], {1: 1, 2: 2, 3: 3}))
def test_copy(self):
async def func(): pass
coro = func()
with self.assertRaises(TypeError):
copy.copy(coro)
aw = coro.__await__()
try:
with self.assertRaises(TypeError):
copy.copy(aw)
finally:
aw.close()
def test_pickle(self):
async def func(): pass
coro = func()
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(coro, proto)
aw = coro.__await__()
try:
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises((TypeError, pickle.PicklingError)):
pickle.dumps(aw, proto)
finally:
aw.close()
def test_fatal_coro_warning(self):
# Issue 27811
async def func(): pass
with warnings.catch_warnings(), support.captured_stderr() as stderr:
warnings.filterwarnings("error")
func()
support.gc_collect()
self.assertIn("was never awaited", stderr.getvalue())
def test_for_assign_raising_stop_async_iteration(self):
class BadTarget:
def __setitem__(self, key, value):
raise StopAsyncIteration(42)
tgt = BadTarget()
async def source():
yield 10
async def run_for():
with self.assertRaises(StopAsyncIteration) as cm:
async for tgt[0] in source():
pass
self.assertEqual(cm.exception.args, (42,))
return 'end'
self.assertEqual(run_async(run_for()), ([], 'end'))
async def run_list():
with self.assertRaises(StopAsyncIteration) as cm:
return [0 async for tgt[0] in source()]
self.assertEqual(cm.exception.args, (42,))
return 'end'
self.assertEqual(run_async(run_list()), ([], 'end'))
async def run_gen():
gen = (0 async for tgt[0] in source())
a = gen.asend(None)
with self.assertRaises(RuntimeError) as cm:
await a
self.assertIsInstance(cm.exception.__cause__, StopAsyncIteration)
self.assertEqual(cm.exception.__cause__.args, (42,))
return 'end'
self.assertEqual(run_async(run_gen()), ([], 'end'))
def test_for_assign_raising_stop_async_iteration_2(self):
class BadIterable:
def __iter__(self):
raise StopAsyncIteration(42)
async def badpairs():
yield BadIterable()
async def run_for():
with self.assertRaises(StopAsyncIteration) as cm:
async for i, j in badpairs():
pass
self.assertEqual(cm.exception.args, (42,))
return 'end'
self.assertEqual(run_async(run_for()), ([], 'end'))
async def run_list():
with self.assertRaises(StopAsyncIteration) as cm:
return [0 async for i, j in badpairs()]
self.assertEqual(cm.exception.args, (42,))
return 'end'
self.assertEqual(run_async(run_list()), ([], 'end'))
async def run_gen():
gen = (0 async for i, j in badpairs())
a = gen.asend(None)
with self.assertRaises(RuntimeError) as cm:
await a
self.assertIsInstance(cm.exception.__cause__, StopAsyncIteration)
self.assertEqual(cm.exception.__cause__.args, (42,))
return 'end'
self.assertEqual(run_async(run_gen()), ([], 'end'))
class CoroAsyncIOCompatTest(unittest.TestCase):
def test_asyncio_1(self):
# asyncio cannot be imported when Python is compiled without thread
# support
asyncio = support.import_module('asyncio')
class MyException(Exception):
pass
buffer = []
class CM:
async def __aenter__(self):
buffer.append(1)
await asyncio.sleep(0.01)
buffer.append(2)
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
await asyncio.sleep(0.01)
buffer.append(exc_type.__name__)
async def f():
async with CM() as c:
await asyncio.sleep(0.01)
raise MyException
buffer.append('unreachable')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
loop.run_until_complete(f())
except MyException:
pass
finally:
loop.close()
asyncio.set_event_loop(None)
self.assertEqual(buffer, [1, 2, 'MyException'])
class SysSetCoroWrapperTest(unittest.TestCase):
def test_set_wrapper_1(self):
async def foo():
return 'spam'
wrapped = None
def wrap(gen):
nonlocal wrapped
wrapped = gen
return gen
with self.assertWarns(DeprecationWarning):
self.assertIsNone(sys.get_coroutine_wrapper())
with self.assertWarns(DeprecationWarning):
sys.set_coroutine_wrapper(wrap)
with self.assertWarns(DeprecationWarning):
self.assertIs(sys.get_coroutine_wrapper(), wrap)
try:
f = foo()
self.assertTrue(wrapped)
self.assertEqual(run_async(f), ([], 'spam'))
finally:
with self.assertWarns(DeprecationWarning):
sys.set_coroutine_wrapper(None)
f.close()
with self.assertWarns(DeprecationWarning):
self.assertIsNone(sys.get_coroutine_wrapper())
wrapped = None
coro = foo()
self.assertFalse(wrapped)
coro.close()
def test_set_wrapper_2(self):
with self.assertWarns(DeprecationWarning):
self.assertIsNone(sys.get_coroutine_wrapper())
with self.assertRaisesRegex(TypeError, "callable expected, got int"):
with self.assertWarns(DeprecationWarning):
sys.set_coroutine_wrapper(1)
with self.assertWarns(DeprecationWarning):
self.assertIsNone(sys.get_coroutine_wrapper())
def test_set_wrapper_3(self):
async def foo():
return 'spam'
def wrapper(coro):
async def wrap(coro):
return await coro
return wrap(coro)
with self.assertWarns(DeprecationWarning):
sys.set_coroutine_wrapper(wrapper)
try:
with silence_coro_gc(), self.assertRaisesRegex(
RuntimeError,
r"coroutine wrapper.*\.wrapper at 0x.*attempted to "
r"recursively wrap .* wrap .*"):
foo()
finally:
with self.assertWarns(DeprecationWarning):
sys.set_coroutine_wrapper(None)
def test_set_wrapper_4(self):
@types.coroutine
def foo():
return 'spam'
wrapped = None
def wrap(gen):
nonlocal wrapped
wrapped = gen
return gen
with self.assertWarns(DeprecationWarning):
sys.set_coroutine_wrapper(wrap)
try:
foo()
self.assertIs(
wrapped, None,
"generator-based coroutine was wrapped via "
"sys.set_coroutine_wrapper")
finally:
with self.assertWarns(DeprecationWarning):
sys.set_coroutine_wrapper(None)
class OriginTrackingTest(unittest.TestCase):
def here(self):
info = inspect.getframeinfo(inspect.currentframe().f_back)
return (info.filename, info.lineno)
def test_origin_tracking(self):
orig_depth = sys.get_coroutine_origin_tracking_depth()
try:
async def corofn():
pass
sys.set_coroutine_origin_tracking_depth(0)
self.assertEqual(sys.get_coroutine_origin_tracking_depth(), 0)
with contextlib.closing(corofn()) as coro:
self.assertIsNone(coro.cr_origin)
sys.set_coroutine_origin_tracking_depth(1)
self.assertEqual(sys.get_coroutine_origin_tracking_depth(), 1)
fname, lineno = self.here()
with contextlib.closing(corofn()) as coro:
self.assertEqual(coro.cr_origin,
((fname, lineno + 1, "test_origin_tracking"),))
sys.set_coroutine_origin_tracking_depth(2)
self.assertEqual(sys.get_coroutine_origin_tracking_depth(), 2)
def nested():
return (self.here(), corofn())
fname, lineno = self.here()
((nested_fname, nested_lineno), coro) = nested()
with contextlib.closing(coro):
self.assertEqual(coro.cr_origin,
((nested_fname, nested_lineno, "nested"),
(fname, lineno + 1, "test_origin_tracking")))
# Check we handle running out of frames correctly
sys.set_coroutine_origin_tracking_depth(1000)
with contextlib.closing(corofn()) as coro:
self.assertTrue(2 < len(coro.cr_origin) < 1000)
# We can't set depth negative
with self.assertRaises(ValueError):
sys.set_coroutine_origin_tracking_depth(-1)
# And trying leaves it unchanged
self.assertEqual(sys.get_coroutine_origin_tracking_depth(), 1000)
finally:
sys.set_coroutine_origin_tracking_depth(orig_depth)
def test_origin_tracking_warning(self):
async def corofn():
pass
a1_filename, a1_lineno = self.here()
def a1():
return corofn() # comment in a1
a1_lineno += 2
a2_filename, a2_lineno = self.here()
def a2():
return a1() # comment in a2
a2_lineno += 2
def check(depth, msg):
sys.set_coroutine_origin_tracking_depth(depth)
with self.assertWarns(RuntimeWarning) as cm:
a2()
support.gc_collect()
self.assertEqual(msg, str(cm.warning))
orig_depth = sys.get_coroutine_origin_tracking_depth()
try:
msg = check(0, f"coroutine '{corofn.__qualname__}' was never awaited")
check(1, "".join([
f"coroutine '{corofn.__qualname__}' was never awaited\n",
"Coroutine created at (most recent call last)\n",
f' File "{a1_filename}", line {a1_lineno}, in a1\n',
f' return corofn() # comment in a1',
]))
check(2, "".join([
f"coroutine '{corofn.__qualname__}' was never awaited\n",
"Coroutine created at (most recent call last)\n",
f' File "{a2_filename}", line {a2_lineno}, in a2\n',
f' return a1() # comment in a2\n',
f' File "{a1_filename}", line {a1_lineno}, in a1\n',
f' return corofn() # comment in a1',
]))
finally:
sys.set_coroutine_origin_tracking_depth(orig_depth)
def test_unawaited_warning_when_module_broken(self):
# Make sure we don't blow up too bad if
# warnings._warn_unawaited_coroutine is broken somehow (e.g. because
# of shutdown problems)
async def corofn():
pass
orig_wuc = warnings._warn_unawaited_coroutine
try:
warnings._warn_unawaited_coroutine = lambda coro: 1/0
with support.captured_stderr() as stream:
corofn()
support.gc_collect()
self.assertIn("Exception ignored in", stream.getvalue())
self.assertIn("ZeroDivisionError", stream.getvalue())
self.assertIn("was never awaited", stream.getvalue())
del warnings._warn_unawaited_coroutine
with support.captured_stderr() as stream:
corofn()
support.gc_collect()
self.assertIn("was never awaited", stream.getvalue())
finally:
warnings._warn_unawaited_coroutine = orig_wuc
class UnawaitedWarningDuringShutdownTest(unittest.TestCase):
# https://bugs.python.org/issue32591#msg310726
def test_unawaited_warning_during_shutdown(self):
code = ("import asyncio\n"
"async def f(): pass\n"
"asyncio.gather(f())\n")
assert_python_ok("-c", code)
code = ("import sys\n"
"async def f(): pass\n"
"sys.coro = f()\n")
assert_python_ok("-c", code)
code = ("import sys\n"
"async def f(): pass\n"
"sys.corocycle = [f()]\n"
"sys.corocycle.append(sys.corocycle)\n")
assert_python_ok("-c", code)
@support.cpython_only
class CAPITest(unittest.TestCase):
def test_tp_await_1(self):
from _testcapi import awaitType as at
async def foo():
future = at(iter([1]))
return (await future)
self.assertEqual(foo().send(None), 1)
def test_tp_await_2(self):
# Test tp_await to __await__ mapping
from _testcapi import awaitType as at
future = at(iter([1]))
self.assertEqual(next(future.__await__()), 1)
def test_tp_await_3(self):
from _testcapi import awaitType as at
async def foo():
future = at(1)
return (await future)
with self.assertRaisesRegex(
TypeError, "__await__.*returned non-iterator of type 'int'"):
self.assertEqual(foo().send(None), 1)
if __name__=="__main__":
unittest.main()
| 27.44875 | 85 | 0.482945 |
9041a86d0c08efff42e7c167642c30acaa4473f6 | 4,284 | py | Python | test/functional/p2p_add_connections.py | eleccoin/eleccoin | 95f86f28019fe8666816e75e1dc82f1edeee3b31 | [
"MIT"
] | 3 | 2020-04-24T08:03:09.000Z | 2020-06-24T00:53:03.000Z | test/functional/p2p_add_connections.py | eleccoin/eleccoin | 95f86f28019fe8666816e75e1dc82f1edeee3b31 | [
"MIT"
] | 8 | 2021-02-06T16:15:10.000Z | 2022-02-20T20:08:45.000Z | test/functional/p2p_add_connections.py | eleccoin/eleccoin | 95f86f28019fe8666816e75e1dc82f1edeee3b31 | [
"MIT"
] | 7 | 2020-02-26T22:08:49.000Z | 2021-02-06T12:35:40.000Z | #!/usr/bin/env python3
# Copyright (c) 2021 The Eleccoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test add_outbound_p2p_connection test framework functionality"""
from test_framework.p2p import P2PInterface
from test_framework.test_framework import EleccoinTestFramework
from test_framework.util import assert_equal
def check_node_connections(*, node, num_in, num_out):
info = node.getnetworkinfo()
assert_equal(info["connections_in"], num_in)
assert_equal(info["connections_out"], num_out)
class P2PAddConnections(EleccoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def setup_network(self):
self.setup_nodes()
# Don't connect the nodes
def run_test(self):
self.log.info("Add 8 outbounds to node 0")
for i in range(8):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="outbound-full-relay")
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# set p2p_idx based on the outbound connections already open to the
# node, so add 8 to account for the previous full-relay connections
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 8, connection_type="block-relay-only")
self.log.info("Add 2 block-relay-only connections to node 1")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
self.nodes[1].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i, connection_type="block-relay-only")
self.log.info("Add 5 inbound connections to node 1")
for i in range(5):
self.log.info(f"inbound: {i}")
self.nodes[1].add_p2p_connection(P2PInterface())
self.log.info("Add 8 outbounds to node 1")
for i in range(8):
self.log.info(f"outbound: {i}")
# bump p2p_idx to account for the 2 existing outbounds on node 1
self.nodes[1].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 2)
self.log.info("Check the connections opened as expected")
check_node_connections(node=self.nodes[0], num_in=0, num_out=10)
check_node_connections(node=self.nodes[1], num_in=5, num_out=10)
self.log.info("Disconnect p2p connections & try to re-open")
self.nodes[0].disconnect_p2ps()
check_node_connections(node=self.nodes[0], num_in=0, num_out=0)
self.log.info("Add 8 outbounds to node 0")
for i in range(8):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i)
check_node_connections(node=self.nodes[0], num_in=0, num_out=8)
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# bump p2p_idx to account for the 8 existing outbounds on node 0
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 8, connection_type="block-relay-only")
check_node_connections(node=self.nodes[0], num_in=0, num_out=10)
self.log.info("Restart node 0 and try to reconnect to p2ps")
self.restart_node(0)
self.log.info("Add 4 outbounds to node 0")
for i in range(4):
self.log.info(f"outbound: {i}")
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i)
check_node_connections(node=self.nodes[0], num_in=0, num_out=4)
self.log.info("Add 2 block-relay-only connections to node 0")
for i in range(2):
self.log.info(f"block-relay-only: {i}")
# bump p2p_idx to account for the 4 existing outbounds on node 0
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=i + 4, connection_type="block-relay-only")
check_node_connections(node=self.nodes[0], num_in=0, num_out=6)
check_node_connections(node=self.nodes[1], num_in=5, num_out=10)
if __name__ == '__main__':
P2PAddConnections().main()
| 44.164948 | 120 | 0.6669 |
d06235fde05a9287b7070befc763ea1258f3d849 | 5,078 | py | Python | config/settings.py | pawelszopa/django_api_menu | 292c117aa4fea57aed80bbfc9cc2bece5c0da434 | [
"Beerware"
] | null | null | null | config/settings.py | pawelszopa/django_api_menu | 292c117aa4fea57aed80bbfc9cc2bece5c0da434 | [
"Beerware"
] | null | null | null | config/settings.py | pawelszopa/django_api_menu | 292c117aa4fea57aed80bbfc9cc2bece5c0da434 | [
"Beerware"
] | null | null | null | """
Django settings for config project.
Generated by 'django-admin startproject' using Django 3.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = int(os.environ.get("DEBUG", default=0))
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS", '').split(" ")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
# 3rd party
'rest_framework',
'rest_framework.authtoken',
'allauth',
'allauth.account',
'dj_rest_auth',
'dj_rest_auth.registration',
'drf_yasg',
'debug_toolbar',
'django_filters',
# local
'users.apps.UsersConfig',
'menu.apps.MenuConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.environ.get("UTC_ZONE", "UTC")
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.TokenAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.IsAuthenticated'
],
'DEFAULT_FILTER_BACKENDS': ['django_filters.rest_framework.DjangoFilterBackend'],
'DATETIME_FORMAT': "%Y-%m-%d",
}
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
SITE_ID = 1
EMAIL_BACKEND = 'sendgrid_backend.SendgridBackend'
SENDGRID_API_KEY = os.environ.get("SENDGRID_API_KEY")
SENDGRID_SANDBOX_MODE_IN_DEBUG = False
EMAIL_HOST_USER = os.environ.get("EMAIL_HOST_USER")
SCHEDULER_HOUR = int(os.environ.get("SCHEDULER_HOUR", 22))
SCHEDULER_MINUTE = int(os.environ.get("SCHEDULER_MINUTE", 45))
IMAGE_TYPES = ['image/jpeg', 'image/png']
AUTH_USER_MODEL = 'users.CustomUser'
DEBUG_TOOLBAR_CONFIG = {
'SHOW_TOOLBAR_CALLBACK': lambda request: DEBUG
}
| 28.211111 | 91 | 0.703427 |
df342d9e71f0138c976b305d6a53086a0fa0f6ea | 296 | py | Python | tests/demo/zh/demo_pos.py | v-smwang/HanLP | 98db7a649110fca4307acbd6a26f2b5bb1159efc | [
"Apache-2.0"
] | 2 | 2020-07-08T07:29:47.000Z | 2021-04-01T02:51:57.000Z | tests/demo/zh/demo_pos.py | v-smwang/HanLP | 98db7a649110fca4307acbd6a26f2b5bb1159efc | [
"Apache-2.0"
] | 4 | 2020-11-13T19:00:05.000Z | 2022-02-10T02:03:54.000Z | tests/demo/zh/demo_pos.py | v-smwang/HanLP | 98db7a649110fca4307acbd6a26f2b5bb1159efc | [
"Apache-2.0"
] | 1 | 2021-12-27T01:04:42.000Z | 2021-12-27T01:04:42.000Z | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2019-12-28 21:25
import hanlp
from hanlp.pretrained.pos import CTB5_POS_RNN_FASTTEXT_ZH
tagger = hanlp.load(CTB5_POS_RNN_FASTTEXT_ZH)
print(tagger.predict(['我', '的', '希望', '是', '希望', '和平']))
print(tagger.predict([['支持', '批处理'], ['速度', '更', '快']]))
| 32.888889 | 57 | 0.655405 |
d917d9060bd2c46d04ff8d7392a3953188065eef | 636 | py | Python | tests/settings.py | jhselvik/blogging_for_humans | 5376d1d505d0a06c9468cce624c64dac1ddcfa4c | [
"MIT"
] | null | null | null | tests/settings.py | jhselvik/blogging_for_humans | 5376d1d505d0a06c9468cce624c64dac1ddcfa4c | [
"MIT"
] | null | null | null | tests/settings.py | jhselvik/blogging_for_humans | 5376d1d505d0a06c9468cce624c64dac1ddcfa4c | [
"MIT"
] | null | null | null | # -*- coding: utf-8
from __future__ import unicode_literals, absolute_import
import django
DEBUG = True
USE_TZ = True
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "cuu0#7he^njm!s92=f83csr^se)q!3*bm#c6f41u$m=hf+bi#!"
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": ":memory:",
}
}
ROOT_URLCONF = "tests.urls"
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"blogging_for_humans",
]
SITE_ID = 1
if django.VERSION >= (1, 10):
MIDDLEWARE = ()
else:
MIDDLEWARE_CLASSES = ()
| 18.705882 | 66 | 0.657233 |
0133c48560e131c9f392791ed0e2697f360ae6a7 | 2,275 | py | Python | engine/components/clause.py | ariyo21/Expert-System | 75a76940f9ad12c45a51b4cd34df361857a2a741 | [
"MIT"
] | 5 | 2020-11-07T14:41:16.000Z | 2022-01-18T19:50:52.000Z | engine/components/clause.py | ariyo21/Expert-System | 75a76940f9ad12c45a51b4cd34df361857a2a741 | [
"MIT"
] | null | null | null | engine/components/clause.py | ariyo21/Expert-System | 75a76940f9ad12c45a51b4cd34df361857a2a741 | [
"MIT"
] | 4 | 2020-11-19T11:24:44.000Z | 2021-05-11T11:16:59.000Z | """
Clauses parsing from the clause.json file. This refers to questions
asked by the engine. Answers are two types
positive : on rule match
negative : on no rule match
"""
class Clause:
"""
Clause refers to question asked. The rules are matches with the
answers given by the user for the clause.
The answer can be statements separated by the SEP in `constants` file
Attributes
----------
__clause : str
the original question
__negative : str
negative answer for the question
__positive : str
positive answer for the question
"""
def __init__(self):
self.__clause = None
self.__negative = None
self.__positive = None
def addClause(self, clause, negative, positive):
"""
Creating a clause by the taking the question in the input and
using the positive and negative statements to create the
Clause object
Parameters
----------
clause : str
question
negative : str
negative answer for the clause
positive : str
positive answer for the clause
"""
self.__clause = clause
self.__negative = negative
self.__positive = positive
def updateClause(self, clause):
"""
Utility function to update the clause question
Parameters
----------
clause : str
new question
"""
self.__clause = clause
def delClause(self):
"""
Deleting the question
"""
self.__clause = None
def getClause(self):
"""
Get the clause, question
Returns
-------
str
question
"""
return self.__clause
def getPositive(self):
"""
Get the positive answer
Returns
-------
str
positive answer
"""
return self.__positive
def getNegative(self):
"""
Get the negative answer
Returns
-------
str
negative answer
"""
return self.__negative
def __str__(self):
"""
Print the question
"""
return self.__clause
| 20.495495 | 73 | 0.540659 |
9bae97c40a69420a7b57ada76b0fff2fe2e53752 | 7,934 | py | Python | sandbox/ours/algos/ModelMAML/model_maml_vpg.py | jackwilkinson255/mbmpo_master | e9e0eaf542c7895764dcb0bfee28752818124ff2 | [
"MIT"
] | 28 | 2018-11-15T14:14:23.000Z | 2022-01-10T01:53:43.000Z | sandbox/ours/algos/ModelMAML/model_maml_vpg.py | hongzimao/model_ensemble_meta_learning | 8b1351df94dfe530efaff1118022315c8d877774 | [
"MIT"
] | 3 | 2019-05-05T23:39:01.000Z | 2021-06-15T15:28:06.000Z | sandbox/ours/algos/ModelMAML/model_maml_vpg.py | hongzimao/model_ensemble_meta_learning | 8b1351df94dfe530efaff1118022315c8d877774 | [
"MIT"
] | 14 | 2018-11-15T16:47:02.000Z | 2021-05-28T14:58:01.000Z |
from rllab.misc import logger
from rllab_maml.misc import ext
from rllab_maml.misc.overrides import overrides
from sandbox.ours.algos.MAML.batch_maml_polopt import BatchMAMLPolopt
from sandbox_maml.rocky.tf.optimizers.first_order_optimizer import FirstOrderOptimizer
from sandbox_maml.rocky.tf.misc import tensor_utils
from rllab_maml.core.serializable import Serializable
import tensorflow as tf
class MAMLVPG(BatchMAMLPolopt, Serializable):
"""
Vanilla Policy Gradient.
"""
def __init__(
self,
env,
policy,
baseline,
optimizer=None,
optimizer_args=None,
use_maml=True,
**kwargs):
Serializable.quick_init(self, locals())
if optimizer is None:
default_args = dict(
batch_size=None,
max_epochs=1,
)
if optimizer_args is None:
optimizer_args = default_args
else:
optimizer_args = dict(default_args, **optimizer_args)
optimizer = FirstOrderOptimizer(**optimizer_args)
self.optimizer = optimizer
self.opt_info = None
self.use_maml = use_maml
super(MAMLVPG, self).__init__(env=env, policy=policy, baseline=baseline, use_maml=use_maml, **kwargs)
def make_vars(self, stepnum='0'):
# lists over the meta_batch_size
obs_vars, action_vars, adv_vars = [], [], []
for i in range(self.meta_batch_size):
obs_vars.append(self.env.observation_space.new_tensor_variable(
'obs' + stepnum + '_' + str(i),
extra_dims=1,
))
action_vars.append(self.env.action_space.new_tensor_variable(
'action' + stepnum + '_' + str(i),
extra_dims=1,
))
adv_vars.append(tensor_utils.new_tensor(
name='advantage' + stepnum + '_' + str(i),
ndim=1, dtype=tf.float32,
))
return obs_vars, action_vars, adv_vars
@overrides
def init_opt(self):
# TODO Commented out all KL stuff for now, since it is only used for logging
# To see how it can be turned on, see maml_npo.py
is_recurrent = int(self.policy.recurrent)
assert not is_recurrent # not supported right now.
dist = self.policy.distribution
old_dist_info_vars, old_dist_info_vars_list = [], []
for i in range(self.meta_batch_size):
old_dist_info_vars.append({
k: tf.placeholder(tf.float32, shape=[None] + list(shape), name='old_%s_%s' % (i, k))
for k, shape in dist.dist_info_specs
})
old_dist_info_vars_list += [old_dist_info_vars[i][k] for k in dist.dist_info_keys]
state_info_vars, state_info_vars_list = {}, []
all_surr_objs, input_list = [], []
new_params = None
for j in range(self.num_grad_updates):
obs_vars, action_vars, adv_vars = self.make_vars(str(j))
surr_objs = []
_surr_objs_ph = []
cur_params = new_params
new_params = []
for i in range(self.meta_batch_size):
if j == 0:
dist_info_vars, params = self.policy.dist_info_sym(obs_vars[i], state_info_vars, all_params=self.policy.all_params)
else:
dist_info_vars, params = self.policy.updated_dist_info_sym(i, all_surr_objs[-1][i], obs_vars[i], params_dict=cur_params[i])
new_params.append(params)
logli = dist.log_likelihood_sym(action_vars[i], dist_info_vars)
# formulate as a minimization problem
# The gradient of the surrogate objective is the policy gradient
surr_objs.append(- tf.reduce_mean(logli * adv_vars[i]))
if j == 0:
_dist_info_vars, _ = self.policy.dist_info_sym(obs_vars[i], state_info_vars,
all_params=self.policy.all_params_ph[i])
_logli = dist.log_likelihood_sym(action_vars[i], _dist_info_vars)
_surr_objs_ph.append(- tf.reduce_mean(_logli * adv_vars[i]))
input_list += obs_vars + action_vars + adv_vars + state_info_vars_list
if j == 0:
# For computing the fast update for sampling
self.policy.set_init_surr_obj(input_list, _surr_objs_ph)
init_input_list = input_list
all_surr_objs.append(surr_objs)
obs_vars, action_vars, adv_vars = self.make_vars('test')
surr_objs = []
kls = []
for i in range(self.meta_batch_size):
dist_info_vars, _ = self.policy.updated_dist_info_sym(i, all_surr_objs[-1][i], obs_vars[i], params_dict=new_params[i])
logli = dist.log_likelihood_sym(action_vars[i], dist_info_vars)
surr_objs.append(- tf.reduce_mean(logli * adv_vars[i]))
kls.append(dist.kl_sym(old_dist_info_vars[i], dist_info_vars))
surr_obj = tf.reduce_mean(tf.stack(surr_objs, 0))
mean_kl = tf.reduce_mean(tf.concat(kls, 0))
max_kl = tf.reduce_max(tf.concat(kls, 0))
input_list += obs_vars + action_vars + adv_vars
if self.use_maml:
self.optimizer.update_opt(loss=surr_obj, target=self.policy, inputs=input_list)
else: # baseline method of just training initial policy
self.optimizer.update_opt(loss=tf.reduce_mean(tf.stack(all_surr_objs[0],0)), target=self.policy,
inputs=init_input_list)
f_kl = tensor_utils.compile_function(
inputs=input_list + old_dist_info_vars_list,
outputs=[mean_kl, max_kl],
)
self.opt_info = dict(
f_kl=f_kl,
)
#f_kl = tensor_utils.compile_function(
# inputs=input_list + old_dist_info_vars_list,
# outputs=[mean_kl, max_kl],
#)
#self.opt_info = dict(
# f_kl=f_kl,
#)
@overrides
def optimize_policy(self, itr, all_samples_data):
logger.log("optimizing policy")
assert len(all_samples_data) == self.num_grad_updates + 1
if not self.use_maml:
all_samples_data = [all_samples_data[0]]
input_list = []
for step in range(len(all_samples_data)):
obs_list, action_list, adv_list = [], [], []
for i in range(self.meta_batch_size):
inputs = ext.extract(
all_samples_data[step][i],
"observations", "actions", "advantages"
)
obs_list.append(inputs[0])
action_list.append(inputs[1])
adv_list.append(inputs[2])
input_list += obs_list + action_list + adv_list
if step == 0:
init_inputs = input_list
loss_before = self.optimizer.loss(input_list)
self.optimizer.optimize(input_list)
loss_after = self.optimizer.loss(input_list)
logger.record_tabular("LossBefore", loss_before)
logger.record_tabular("LossAfter", loss_after)
dist_info_list = []
for i in range(self.meta_batch_size):
agent_infos = all_samples_data[-1][i]['agent_infos']
dist_info_list += [agent_infos[k] for k in self.policy.distribution.dist_info_keys]
if self.use_maml:
mean_kl, max_kl = self.opt_info['f_kl'](*(list(input_list) + dist_info_list))
logger.record_tabular('MeanKL', mean_kl)
logger.record_tabular('MaxKL', max_kl)
@overrides
def get_itr_snapshot(self, itr, samples_data):
return dict(
itr=itr,
policy=self.policy,
baseline=self.baseline,
env=self.env,
)
| 38.892157 | 143 | 0.594026 |
b67833270026f6660c7a1d463d07927c3b5e71c3 | 1,152 | py | Python | hardwareControl2/main.py | untrobotics/IEEE-2019-R5 | 799bffc95b7be1c939d1ad1858b10faabb3cc842 | [
"MIT"
] | null | null | null | hardwareControl2/main.py | untrobotics/IEEE-2019-R5 | 799bffc95b7be1c939d1ad1858b10faabb3cc842 | [
"MIT"
] | 6 | 2019-03-06T01:10:24.000Z | 2020-06-17T05:04:43.000Z | hardwareControl2/main.py | untrobotics/IEEE-2019-R5 | 799bffc95b7be1c939d1ad1858b10faabb3cc842 | [
"MIT"
] | 3 | 2019-03-01T05:11:39.000Z | 2019-11-22T15:01:02.000Z | from lidar import lidarControl
from hardwareControl import hardwareControl
from yaw import yaw
import threading
import time
import math
ROUND = 1
if (ROUND == 1):
NUMOFBLOCKS = 2
NUMOFOBS = 5
elif (ROUND == 2):
NUMOFBLOCKS = 4
NUMOFOBS = 10
else:
NUMOFBLOCKS = 6
NUMOFOBS = 15
yawObj = yaw()
lidarObj = lidarControl()
controller = hardwareControl(lidarObj, yawObj)
def backgroundLoop():
while 1:
yawObj.loop()
time.sleep(.01)
def foreground():
#print("LIDAR: ", lidarObj.getReading())
# controller.initScan()
#call the c++ file to write to code
controller.drive(1,1)
# controller.readandMove()
b = threading.Thread(name='background', target=backgroundLoop)
f = threading.Thread(name='foreground', target=foreground)
b.start()
f.start()
#controller.move(1,2)
# 360 scan
# detect objects
# MAYBE: openCV
# for each block
# make path
# nav to 1st block
# pickup block procedure
# make path to mothership
# drop blocks off
# line up with first bin
# foreach block
#line up bin
# drop block
# move left
# RTH
| 15.157895 | 62 | 0.646701 |
87416f44f0b814f4a645e9085242e3b86d3a1030 | 4,709 | py | Python | src/sage/coding/codes_catalog.py | fchapoton/sage | 765c5cb3e24dd134708eca97e4c52e0221cd94ba | [
"BSL-1.0"
] | 4 | 2020-07-17T04:49:44.000Z | 2020-07-29T06:33:51.000Z | src/sage/coding/codes_catalog.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | null | null | null | src/sage/coding/codes_catalog.py | Ivo-Maffei/sage | 467fbc70a08b552b3de33d9065204ee9cbfb02c7 | [
"BSL-1.0"
] | 3 | 2020-03-29T17:13:36.000Z | 2021-05-03T18:11:28.000Z | r"""
Index of code constructions
The ``codes`` object may be used to access the codes that Sage can build.
Families of Codes (Rich representation)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: @
:meth:`~sage.coding.parity_check_code.ParityCheckCode` @ Parity check codes
:meth:`~sage.coding.cyclic_code.CyclicCode` @ Cyclic codes
:meth:`~sage.coding.bch_code.BCHCode` @ BCH Codes
:meth:`~sage.coding.grs_code.GeneralizedReedSolomonCode` @ Generalized Reed-Solomon codes
:meth:`~sage.coding.grs_code.ReedSolomonCode` @ Reed-Solomon codes
:meth:`~sage.coding.reed_muller_code.BinaryReedMullerCode` @ Binary Reed-Muller codes
:meth:`~sage.coding.reed_muller_code.ReedMullerCode` @ q-ary Reed-Muller codes
:meth:`~sage.coding.hamming_code.HammingCode` @ Hamming codes
:meth:`~sage.coding.golay_code.GolayCode` @ Golay codes
:meth:`~sage.coding.goppa_code.GoppaCode` @ Goppa codes
Families of Codes (Generator matrix representation)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: @
:meth:`~sage.coding.code_constructions.DuadicCodeEvenPair` @ Duadic codes, even pair
:meth:`~sage.coding.code_constructions.DuadicCodeOddPair` @ Duadic codes, odd pair
:meth:`~sage.coding.code_constructions.QuadraticResidueCode` @ Quadratic residue codes
:meth:`~sage.coding.code_constructions.ExtendedQuadraticResidueCode` @ Extended quadratic residue codes
:meth:`~sage.coding.code_constructions.QuadraticResidueCodeEvenPair` @ Even-like quadratic residue codes
:meth:`~sage.coding.code_constructions.QuadraticResidueCodeOddPair` @ Odd-like quadratic residue codes
:meth:`~sage.coding.guava.QuasiQuadraticResidueCode` @ Quasi quadratic residue codes (Requires GAP/Guava)
:meth:`~sage.coding.code_constructions.ToricCode` @ Toric codes
:meth:`~sage.coding.code_constructions.WalshCode` @ Walsh codes
:meth:`~sage.coding.code_constructions.from_parity_check_matrix` @ Construct a code from a parity check matrix
:meth:`~sage.coding.code_constructions.random_linear_code` @ Construct a random linear code
:meth:`~sage.coding.guava.RandomLinearCodeGuava` @ Construct a random linear code through Guava (Requires GAP/Guava)
Derived Codes
^^^^^^^^^^^^^
.. csv-table::
:class: contentstable
:widths: 30, 70
:delim: @
:meth:`~sage.coding.subfield_subcode.SubfieldSubcode` @ Subfield subcodes
:meth:`~sage.coding.extended_code.ExtendedCode` @ Extended codes
:meth:`~sage.coding.punctured_code.PuncturedCode` @ Puncturedcodes
.. NOTE::
To import these names into the global namespace, use:
sage: from sage.coding.codes_catalog import *
"""
#*****************************************************************************
# Copyright (C) 2009 David Lucas <david.lucas@inria.fr>
#
# Distributed under the terms of the GNU General Public License (GPL),
# version 2 or later (at your preference).
#
# https://www.gnu.org/licenses/
#*****************************************************************************
from __future__ import absolute_import
# This module is imported as "codes" in all.py so that codes.<tab> is
# available in the global namespace.
from sage.misc.lazy_import import lazy_import as _lazy_import
from .linear_code import LinearCode
_lazy_import('sage.coding.code_constructions',
['DuadicCodeEvenPair', 'DuadicCodeOddPair',
'ExtendedQuadraticResidueCode', 'from_parity_check_matrix',
'QuadraticResidueCode', 'QuadraticResidueCodeEvenPair',
'QuadraticResidueCodeOddPair', 'random_linear_code',
'ToricCode', 'WalshCode'])
_lazy_import('sage.coding.subfield_subcode', 'SubfieldSubcode')
_lazy_import('sage.coding.extended_code', 'ExtendedCode')
_lazy_import('sage.coding.punctured_code', 'PuncturedCode')
_lazy_import('sage.coding.parity_check_code', 'ParityCheckCode')
_lazy_import('sage.coding.cyclic_code', 'CyclicCode')
_lazy_import('sage.coding.bch_code', 'BCHCode')
_lazy_import('sage.coding.grs_code', ['GeneralizedReedSolomonCode', 'ReedSolomonCode'])
_lazy_import('sage.coding.reed_muller_code', ['BinaryReedMullerCode', 'ReedMullerCode'])
_lazy_import('sage.coding.hamming_code', 'HammingCode')
_lazy_import('sage.coding.golay_code', 'GolayCode')
_lazy_import('sage.coding.goppa_code', 'GoppaCode')
_lazy_import('sage.coding.guava', ['QuasiQuadraticResidueCode', 'RandomLinearCodeGuava'])
from . import decoders_catalog as decoders
from . import encoders_catalog as encoders
from . import bounds_catalog as bounds
_lazy_import('sage.coding','databases')
| 42.423423 | 120 | 0.71289 |
f8c0051562c92b9aa22af6c040179e439d9e292a | 107,027 | py | Python | pysb/macros.py | maurosilber/pysb | 30eb4158b9d28eff704b883c2139387a89d9c5eb | [
"BSD-2-Clause"
] | 1 | 2019-10-23T16:29:29.000Z | 2019-10-23T16:29:29.000Z | pysb/macros.py | zhwycsz/pysb | d1afd8bed83cc09476ea871ffcc106b18498dc7f | [
"BSD-2-Clause"
] | null | null | null | pysb/macros.py | zhwycsz/pysb | d1afd8bed83cc09476ea871ffcc106b18498dc7f | [
"BSD-2-Clause"
] | null | null | null | """
A collection of generally useful modeling macros.
These macros are written to be as generic and reusable as possible, serving as a
collection of best practices and implementation ideas. They conform to the
following general guidelines:
* All components created by the macro are implicitly added to the current model
and explicitly returned in a ComponentSet.
* Parameters may be passed as Parameter or Expression objects, or as plain
numbers for which Parameter objects will be automatically created using an
appropriate naming convention.
* Arguments which accept a MonomerPattern should also accept Monomers, which are
to be interpreted as MonomerPatterns on that Monomer with an empty condition
list. This is typically implemented by having the macro apply the "call"
(parentheses) operator to the argument with an empty argument list and using
the resulting value instead of the original argument when creating Rules, e.g.
``arg = arg()``. Calling a Monomer will return a MonomerPattern, and calling a
MonomerPattern will return a copy of itself, so calling either is guaranteed
to return a MonomerPattern.
The _macro_rule helper function contains much of the logic needed to follow
these guidelines. Every macro in this module either uses _macro_rule directly or
calls another macro which does.
Another useful function is _verify_sites which will raise an exception if a
Monomer or MonomerPattern does not possess every one of a given list of sites.
This can be used to trigger such errors up front rather than letting an
exception occur at the point where the macro tries to use the invalid site in a
pattern, which can be harder for the caller to debug.
"""
import inspect
from pysb import *
import pysb.core
from pysb.core import ComponentSet, as_reaction_pattern, as_complex_pattern, MonomerPattern, ComplexPattern
import numbers
import functools
import itertools
__all__ = ['equilibrate',
'bind', 'bind_table',
'catalyze', 'catalyze_state', 'catalyze_complex',
'catalyze_one_step', 'catalyze_one_step_reversible',
'synthesize', 'degrade', 'synthesize_degrade_table',
'assemble_pore_sequential', 'pore_transport', 'pore_bind', 'assemble_chain_sequential_base',
'bind_complex', 'bind_table_complex', 'drug_binding']
# Suppress ModelExistsWarnings in our doctests.
_pysb_doctest_suppress_modelexistswarning = True
# Internal helper functions
# =========================
def _complex_pattern_label(cp):
"""Return a string label for a ComplexPattern."""
if cp is None:
return ''
mp_labels = [_monomer_pattern_label(mp) for mp in cp.monomer_patterns]
return ''.join(mp_labels)
def _monomer_pattern_label(mp):
"""Return a string label for a MonomerPattern."""
site_values = [str(x) for x in mp.site_conditions.values()
if x is not None
and not isinstance(x, list)
and not isinstance(x, tuple)
and not isinstance(x, numbers.Real)]
return mp.monomer.name + ''.join(site_values)
def _rule_name_generic(rule_expression):
"""Return a generic string label for a RuleExpression."""
# Get ReactionPatterns
react_p = rule_expression.reactant_pattern
prod_p = rule_expression.product_pattern
# Build the label components
lhs_label = [_complex_pattern_label(cp) for cp in react_p.complex_patterns]
lhs_label = '_'.join(lhs_label)
rhs_label = [_complex_pattern_label(cp) for cp in prod_p.complex_patterns]
rhs_label = '_'.join(rhs_label)
return '%s_to_%s' % (lhs_label, rhs_label)
def _macro_rule(rule_prefix, rule_expression, klist, ksuffixes,
name_func=_rule_name_generic):
"""
A helper function for writing macros that generates a single rule.
Parameters
----------
rule_prefix : string
The prefix that is prepended to the (automatically generated) name for
the rule.
rule_expression : RuleExpression
An expression specifying the form of the rule; gets passed directly
to the Rule constructor.
klist : list of Parameters or Expressions, or list of numbers
If the rule is unidirectional, the list must contain one element
(either a Parameter/Expression or number); if the rule is reversible,
it must contain two elements. If the rule is reversible, the first
element in the list is taken to be the forward rate, and the second
element is taken as the reverse rate.
ksuffixes : list of strings
If klist contains numbers rather than Parameters or Expressions, the
strings in ksuffixes are used to automatically generate the necessary
Parameter objects. The suffixes are appended to the rule name to
generate the associated parameter name. ksuffixes must contain one
element if the rule is unidirectional, two if it is reversible.
name_func : function, optional
A function which takes a RuleExpression and returns a string label for
it, to be called as part of the automatic rule name generation. If not
provided, a built-in default naming function will be used.
Returns
-------
components : ComponentSet
The generated components. Contains the generated Rule and up to two
generated Parameter objects (if klist was given as numbers).
Notes
-----
The default naming scheme (if `name_func` is not passed) follows the form::
'%s_%s_to_%s' % (rule_prefix, lhs_label, rhs_label)
where lhs_label and rhs_label are each concatenations of the Monomer names
and specified sites in the ComplexPatterns on each side of the
RuleExpression. The actual implementation is in the function
_rule_name_generic, which in turn calls _complex_pattern_label and
_monomer_pattern_label. For some specialized reactions it may be helpful to
devise a custom naming scheme rather than rely on this default.
Examples
--------
Using distinct Monomers for substrate and product::
>>> from pysb import *
>>> from pysb.macros import _macro_rule
>>>
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('A', ['s'])
Monomer('A', ['s'])
>>> Monomer('B', ['s'])
Monomer('B', ['s'])
>>>
>>> _macro_rule('bind', A(s=None) + B(s=None) | A(s=1) % B(s=1),
... [1e6, 1e-1], ['kf', 'kr']) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_A_B_to_AB', A(s=None) + B(s=None) | A(s=1) % B(s=1),
bind_A_B_to_AB_kf, bind_A_B_to_AB_kr),
Parameter('bind_A_B_to_AB_kf', 1000000.0),
Parameter('bind_A_B_to_AB_kr', 0.1),
])
"""
r_name = '%s_%s' % (rule_prefix, name_func(rule_expression))
# If rule is unidirectional, make sure we only have one parameter
if (not rule_expression.is_reversible):
if len(klist) != 1 or len(ksuffixes) != 1:
raise ValueError("A unidirectional rule must have one parameter.")
# If rule is bidirectional, make sure we have two parameters
else:
if len(klist) != 2 or len(ksuffixes) != 2:
raise ValueError("A bidirectional rule must have two parameters.")
if all(isinstance(x, (Parameter, Expression)) for x in klist):
k1 = klist[0]
if rule_expression.is_reversible:
k2 = klist[1]
params_created = ComponentSet()
# if klist is numbers, generate the Parameters
elif all(isinstance(x, numbers.Real) for x in klist):
k1 = Parameter('%s_%s' % (r_name, ksuffixes[0]), klist[0])
params_created = ComponentSet([k1])
if rule_expression.is_reversible:
k2 = Parameter('%s_%s' % (r_name, ksuffixes[1]),
klist[1])
params_created.add(k2)
else:
raise ValueError("klist must contain Parameters, Expressions, or numbers.")
if rule_expression.is_reversible:
r = Rule(r_name, rule_expression, k1, k2)
else:
r = Rule(r_name, rule_expression, k1)
# Build a set of components that were created
return ComponentSet([r]) | params_created
def _verify_sites(m, *site_list):
"""
Checks that the monomer m contains all of the sites in site_list.
Parameters
----------
m : Monomer or MonomerPattern
The monomer to check.
site1, site2, ... : string
One or more site names to check on m
Returns
-------
True if m contains all sites; raises a ValueError otherwise.
Raises
------
ValueError
If any of the sites are not found.
"""
if isinstance(m, ComplexPattern):
return _verify_sites_complex(m, *site_list)
else:
for site in site_list:
if site not in m().monomer.sites:
raise ValueError("Monomer '%s' must contain the site '%s'" %
(m().monomer.name, site))
return True
def _verify_sites_complex(c, *site_list):
"""
Checks that the complex c contains all of the sites in site_list.
Parameters
----------
c : ComplexPattern
The complex to check.
site1, site2, ... : string
One or more site names to check on c
Returns
-------
If all sites are found within the complex, a dictionary of monomers and the sites within site_list they contain. Raises a ValueError if one or more sites not in the complex.
Raises
------
ValueError
If any of the sites are not found within the complex.
"""
allsitesdict = {}
for mon in c.monomer_patterns:
allsitesdict[mon] = mon.monomer.sites
for site in site_list:
specsitesdict = {}
for monomer, li in allsitesdict.items():
for s in li:
if site in li:
specsitesdict[monomer] = site
if len(specsitesdict) == 0:
raise ValueError("Site '%s' not found in complex '%s'" % (site, c))
return specsitesdict
# Unimolecular patterns
# =====================
def equilibrate(s1, s2, klist):
"""
Generate the unimolecular reversible equilibrium reaction S1 <-> S2.
Parameters
----------
s1, s2 : Monomer or MonomerPattern
S1 and S2 in the above reaction.
klist : list of 2 Parameters or list of 2 numbers
Forward (S1 -> S2) and reverse rate constants (in that order). If
Parameters are passed, they will be used directly in the generated
Rules. If numbers are passed, Parameters will be created with
automatically generated names based on the names and states of S1 and S2
and these parameters will be included at the end of the returned
component list.
Returns
-------
components : ComponentSet
The generated components. Contains one reversible Rule and optionally
two Parameters if klist was given as plain numbers.
Examples
--------
Simple two-state equilibrium between A and B::
Model()
Monomer('A')
Monomer('B')
equilibrate(A(), B(), [1, 1])
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('A')
Monomer('A')
>>> Monomer('B')
Monomer('B')
>>> equilibrate(A(), B(), [1, 1]) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('equilibrate_A_to_B', A() | B(), equilibrate_A_to_B_kf, equilibrate_A_to_B_kr),
Parameter('equilibrate_A_to_B_kf', 1.0),
Parameter('equilibrate_A_to_B_kr', 1.0),
])
"""
# turn any Monomers into MonomerPatterns
return _macro_rule('equilibrate', s1 | s2, klist, ['kf', 'kr'])
# Binding
# =======
def bind(s1, site1, s2, site2, klist):
"""
Generate the reversible binding reaction S1 + S2 | S1:S2.
Parameters
----------
s1, s2 : Monomer or MonomerPattern
Monomers participating in the binding reaction.
site1, site2 : string
The names of the sites on s1 and s2 used for binding.
klist : list of 2 Parameters or list of 2 numbers
Forward and reverse rate constants (in that order). If Parameters are
passed, they will be used directly in the generated Rules. If numbers
are passed, Parameters will be created with automatically generated
names based on the names and states of S1 and S2 and these parameters
will be included at the end of the returned component list.
Returns
-------
components : ComponentSet
The generated components. Contains the bidirectional binding Rule
and optionally two Parameters if klist was given as numbers.
Examples
--------
Binding between A and B::
Model()
Monomer('A', ['x'])
Monomer('B', ['y'])
bind(A, 'x', B, 'y', [1e-4, 1e-1])
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('A', ['x'])
Monomer('A', ['x'])
>>> Monomer('B', ['y'])
Monomer('B', ['y'])
>>> bind(A, 'x', B, 'y', [1e-4, 1e-1]) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_A_B', A(x=None) + B(y=None) | A(x=1) % B(y=1), bind_A_B_kf, bind_A_B_kr),
Parameter('bind_A_B_kf', 0.0001),
Parameter('bind_A_B_kr', 0.1),
])
"""
_verify_sites(s1, site1)
_verify_sites(s2, site2)
return _macro_rule('bind',
s1(**{site1: None}) + s2(**{site2: None}) |
s1(**{site1: 1}) % s2(**{site2: 1}),
klist, ['kf', 'kr'], name_func=bind_name_func)
def bind_name_func(rule_expression):
# Get ComplexPatterns
react_cps = rule_expression.reactant_pattern.complex_patterns
# Build the label components
return '_'.join(_complex_pattern_label(cp) for cp in react_cps)
def bind_complex(s1, site1, s2, site2, klist, m1=None, m2=None):
"""
Generate the reversible binding reaction ``S1 + S2 | S1:S2``,
with optional complexes attached to either
``S1`` (``C1:S1 + S2 | C1:S1:S2``), ``S2`` (``S1 + C2:S2 | C2:S2:S1``),
or both (``C1:S1 + C2:S2 | C1:S1:S2:C2``).
Parameters
----------
s1, s2 : Monomer, MonomerPattern, or ComplexPattern
Monomers or complexes participating in the binding reaction.
site1, site2 : string
The names of the sites on s1 and s2 used for binding.
klist : list of 2 Parameters or list of 2 numbers
Forward and reverse rate constants (in that order). If Parameters are
passed, they will be used directly in the generated Rules. If numbers
are passed, Parameters will be created with automatically generated
names based on the names and states of S1 and S2 and these parameters
will be included at the end of the returned component list.
m1, m2 : Monomer or MonomerPattern
If s1 or s2 binding site is present in multiple monomers
within a complex, the specific monomer desired for binding must be specified.
Returns
-------
components : ComponentSet
The generated components. Contains the bidirectional binding Rule
and optionally two Parameters if klist was given as numbers.
Examples
--------
Binding between ``A:B`` and ``C:D``:
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' ...>
>>> Monomer('A', ['a', 'b'])
Monomer('A', ['a', 'b'])
>>> Monomer('B', ['c', 'd'])
Monomer('B', ['c', 'd'])
>>> Monomer('C', ['e', 'f'])
Monomer('C', ['e', 'f'])
>>> Monomer('D', ['g', 'h'])
Monomer('D', ['g', 'h'])
>>> bind_complex(A(a=1) % B(c=1), 'b', C(e=2) % D(g=2), 'h', [1e-4, \
1e-1]) #doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_AB_DC', A(a=1, b=None) % B(c=1) + D(g=3, h=None) % C(e=3)
| A(a=1, b=50) % B(c=1) % D(g=3, h=50) % C(e=3), bind_AB_DC_kf,
bind_AB_DC_kr),
Parameter('bind_AB_DC_kf', 0.0001),
Parameter('bind_AB_DC_kr', 0.1),
])
Execution:
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' ...>
>>> Monomer('A', ['a', 'b'])
Monomer('A', ['a', 'b'])
>>> Monomer('B', ['c', 'd'])
Monomer('B', ['c', 'd'])
>>> Monomer('C', ['e', 'f'])
Monomer('C', ['e', 'f'])
>>> Monomer('D', ['g', 'h'])
Monomer('D', ['g', 'h'])
>>> bind(A, 'a', B, 'c', [1e4, 1e-1]) #doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_A_B',
A(a=None) + B(c=None) | A(a=1) % B(c=1),
bind_A_B_kf, bind_A_B_kr),
Parameter('bind_A_B_kf', 10000.0),
Parameter('bind_A_B_kr', 0.1),
])
>>> bind(C, 'e', D, 'g', [1e4, 1e-1]) #doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_C_D',
C(e=None) + D(g=None) | C(e=1) % D(g=1),
bind_C_D_kf, bind_C_D_kr),
Parameter('bind_C_D_kf', 10000.0),
Parameter('bind_C_D_kr', 0.1),
])
>>> bind_complex(A(a=1) % B(c=1), 'b', C(e=2) % D(g=2), 'h', [1e-4, \
1e-1]) #doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_AB_DC',
A(a=1, b=None) % B(c=1) + D(g=3, h=None) % C(e=3) | A(a=1,
b=50) % B(c=1) % D(g=3, h=50) % C(e=3),
bind_AB_DC_kf, bind_AB_DC_kr),
Parameter('bind_AB_DC_kf', 0.0001),
Parameter('bind_AB_DC_kr', 0.1),
])
"""
if isinstance(m1, Monomer):
m1 = m1()
if isinstance(m2, Monomer):
m2 = m2()
#Define some functions for checking complex sites, building complexes up from monomers, and creating rules.
def comp_mono_func(s1, site1, s2, site2, m1):
_verify_sites(s2, site2)
#Retrieve a dictionary specifying the MonomerPattern within the complex that contains the given binding site.
specsites = list(_verify_sites_complex(s1, site1))
s1complexpatub, s1complexpatb = check_sites_comp_build(s1, site1, m1, specsites)
return create_rule(s1complexpatub, s1complexpatb, s2({site2:None}), s2({site2: 50}))
def check_sites_comp_build(s1, site1, m1, specsites):
#Return error if binding site exists on multiple monomers and a monomer for binding (m1) hasn't been specified.
if len(specsites) > 1 and m1==None:
raise ValueError("Binding site '%s' present in more than one monomer in complex '%s'. Specify variable m1, the monomer used for binding within the complex." % (site1, s1))
if not s1.is_concrete:
raise ValueError("Complex '%s' must be concrete." % (s1))
#If the given binding site is only present in one monomer in the complex:
if m1==None:
#Build up ComplexPattern for use in rule (with state of given binding site specified).
s1complexpatub = specsites[0]({site1:None})
s1complexpatb = specsites[0]({site1:50})
for monomer in s1.monomer_patterns:
if monomer not in specsites:
s1complexpatub %= monomer
s1complexpatb %= monomer
#If the binding site is present on more than one monomer in the complex, the monomer must be specified by the user. Use specified m1 to build ComplexPattern.
else:
#Make sure binding states of MonomerPattern m1 match those of the monomer within the ComplexPattern s1 (ComplexPattern monomer takes precedence if not).
i = 0
identical_monomers = []
other_monomers = []
for mon in s1.monomer_patterns:
#Only change the binding site for the first monomer that matches. Keep any others unchanged to add to final complex that is returned.
if mon.monomer.name == m1.monomer.name and mon.site_conditions==m1.site_conditions:
i += 1
if i == 1:
s1complexpatub = mon({site1:None})
s1complexpatb = mon({site1:50})
else:
identical_monomers.append(mon)
else:
other_monomers.append(mon)
#Throw an error if no monomer pattern in the complex matched the pattern given for m1
if i == 0:
raise ValueError("No monomer pattern in complex '%s' matches the pattern given for m1, '%s'." % (s1, m1))
#Build up ComplexPattern for use in rule (with state of given binding site on m1 specified).
for mon in other_monomers:
s1complexpatub %= mon
s1complexpatb %= mon
if identical_monomers:
for i in range(len(identical_monomers)):
s1complexpatub %= identical_monomers[i]
s1complexpatb %= identical_monomers[i]
return s1complexpatub, s1complexpatb
#Create rules.
def create_rule(s1ub, s1b, s2ub, s2b):
return _macro_rule('bind',
s1ub + s2ub |
s1b % s2b,
klist, ['kf', 'kr'], name_func=bind_name_func)
#If no complexes given, revert to normal bind macro.
if (isinstance(s1, MonomerPattern) or isinstance(s1, Monomer)) and (isinstance(s2, MonomerPattern) or isinstance(s2, Monomer)):
_verify_sites(s1, site1)
_verify_sites(s2, site2)
return bind(s1, site1, s2, site2, klist)
#Create rules if only one complex or the other is present.
elif isinstance(s1, ComplexPattern) and (isinstance(s2, MonomerPattern) or isinstance(s2, Monomer)):
return comp_mono_func(s1, site1, s2, site2, m1)
elif (isinstance(s1, MonomerPattern) or isinstance(s1, Monomer)) and isinstance(s2, ComplexPattern):
return comp_mono_func(s2, site2, s1, site1, m2)
#Create rule when both s1 and s2 are complexes.
else:
#Retrieve a dictionary specifiying the MonomerPattern within
#the complex that contains the given binding site. Convert to list.
specsites1 = list(_verify_sites_complex(s1, site1))
specsites2 = list(_verify_sites_complex(s2, site2))
#Return error if binding site exists on multiple monomers and a monomer for binding (m1/m2) hasn't been specified.
if len(specsites1) > 1 and m1==None:
raise ValueError("Binding site '%s' present in more than one monomer in complex '%s'. Specify variable m1, the monomer used for binding within the complex." % (site1, s1))
if len(specsites2) > 1 and m2==None:
raise ValueError("Binding site '%s' present in more than one monomer in complex '%s'. Specify variable m2, the monomer used for binding within the complex." % (site2, s2))
if not s1.is_concrete:
raise ValueError("Complex '%s' must be concrete." % (s1))
if not s2.is_concrete:
raise ValueError("Complex '%s' must be concrete." % (s2))
#To avoid creating rules with multiple bonds to the same site when combining the two complexes, check for the maximum bond integer in s1 and add to all s2 bond integers.
maxint = 0
for monomer in s1.monomer_patterns:
for stateint in monomer.site_conditions.values():
if isinstance(stateint, int):
if stateint > maxint:
maxint = stateint
match = 'N'
for monomer in s2.monomer_patterns:
if m2 is not None:
if m2.site_conditions == monomer.site_conditions and m2.monomer.name == monomer.monomer.name:
match = 'Y'
for site, stateint in monomer.site_conditions.items():
if isinstance(stateint, int):
monomer.site_conditions[site] += maxint
if match == 'Y':
m2.site_conditions = monomer.site_conditions
match = 'N'
#Actually create rules
s1complexpatub, s1complexpatb = check_sites_comp_build(s1, site1, m1, specsites1)
s2complexpatub, s2complexpatb = check_sites_comp_build(s2, site2, m2, specsites2)
return create_rule(s1complexpatub, s1complexpatb, s2complexpatub, s2complexpatb)
def bind_table(bindtable, row_site, col_site, kf=None):
"""
Generate a table of reversible binding reactions.
Given two lists of species R and C, calls the `bind` macro on each pairwise
combination (R[i], C[j]). The species lists and the parameter values are
passed as a list of lists (i.e. a table) with elements of R passed as the
"row headers", elements of C as the "column headers", and forward / reverse
rate pairs (in that order) as tuples in the "cells". For example with two
elements in each of R and C, the table would appear as follows (note that
the first row has one fewer element than the subsequent rows)::
[[ C1, C2],
[R1, (1e-4, 1e-1), (2e-4, 2e-1)],
[R2, (3e-4, 3e-1), (4e-4, 4e-1)]]
Each parameter tuple may contain Parameters or numbers. If Parameters are
passed, they will be used directly in the generated Rules. If numbers are
passed, Parameters will be created with automatically generated names based
on the names and states of the relevant species and these parameters will be
included at the end of the returned component list. To omit any individual
reaction, pass None in place of the corresponding parameter tuple.
Alternately, single kd values (dissociation constant, kr/kf) may be
specified instead of (kf, kr) tuples. If kds are used, a single shared kf
Parameter or number must be passed as an extra `kf` argument. kr values for
each binding reaction will be calculated as kd*kf. It is important to
remember that the forward rate constant is a single parameter shared across
the entire bind table, as this may have implications for parameter fitting.
Parameters
----------
bindtable : list of lists
Table of reactants and rates, as described above.
row_site, col_site : string
The names of the sites on the elements of R and C, respectively, used
for binding.
kf : Parameter or number, optional
If the "cells" in bindtable are given as single kd values, this is the
shared kf used to calculate the kr values.
Returns
-------
components : ComponentSet
The generated components. Contains the bidirectional binding Rules and
optionally the Parameters for any parameters given as numbers.
Examples
--------
Binding table for two species types (R and C), each with two members::
Model()
Monomer('R1', ['x'])
Monomer('R2', ['x'])
Monomer('C1', ['y'])
Monomer('C2', ['y'])
bind_table([[ C1, C2],
[R1, (1e-4, 1e-1), (2e-4, 2e-1)],
[R2, (3e-4, 3e-1), None]],
'x', 'y')
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('R1', ['x'])
Monomer('R1', ['x'])
>>> Monomer('R2', ['x'])
Monomer('R2', ['x'])
>>> Monomer('C1', ['y'])
Monomer('C1', ['y'])
>>> Monomer('C2', ['y'])
Monomer('C2', ['y'])
>>> bind_table([[ C1, C2],
... [R1, (1e-4, 1e-1), (2e-4, 2e-1)],
... [R2, (3e-4, 3e-1), None]],
... 'x', 'y') # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_R1_C1', R1(x=None) + C1(y=None) | R1(x=1) % C1(y=1),
bind_R1_C1_kf, bind_R1_C1_kr),
Parameter('bind_R1_C1_kf', 0.0001),
Parameter('bind_R1_C1_kr', 0.1),
Rule('bind_R1_C2', R1(x=None) + C2(y=None) | R1(x=1) % C2(y=1),
bind_R1_C2_kf, bind_R1_C2_kr),
Parameter('bind_R1_C2_kf', 0.0002),
Parameter('bind_R1_C2_kr', 0.2),
Rule('bind_R2_C1', R2(x=None) + C1(y=None) | R2(x=1) % C1(y=1),
bind_R2_C1_kf, bind_R2_C1_kr),
Parameter('bind_R2_C1_kf', 0.0003),
Parameter('bind_R2_C1_kr', 0.3),
])
"""
# extract species lists and matrix of rates
s_rows = [row[0] for row in bindtable[1:]]
s_cols = bindtable[0]
kmatrix = [row[1:] for row in bindtable[1:]]
# ensure kf is passed when necessary
kiter = itertools.chain.from_iterable(kmatrix)
if any(isinstance(x, numbers.Real) for x in kiter) and kf is None:
raise ValueError("must specify kf when using single kd values")
# loop over interactions
components = ComponentSet()
for r, s_row in enumerate(s_rows):
for c, s_col in enumerate(s_cols):
klist = kmatrix[r][c]
if klist is not None:
# if user gave a single kd, calculate kr
if isinstance(klist, numbers.Real):
kd = klist
klist = (kf, kd*kf)
components |= bind(s_row(), row_site, s_col(), col_site, klist)
return components
def bind_table_complex(bindtable, row_site, col_site, m1=None, m2=None, kf=None):
"""
Generate a table of reversible binding reactions when either the row or column species (or both) have a complex bound to them.
Given two lists of species R and C (which can be complexes or monomers),
calls the `bind_complex` macro on each pairwise
combination (R[i], C[j]). The species lists and the parameter values are
passed as a list of lists (i.e. a table) with elements of R passed as the
"row headers", elements of C as the "column headers", and forward / reverse
rate pairs (in that order) as tuples in the "cells". For example with two
elements in each of R and C, the table would appear as follows (note that
the first row has one fewer element than the subsequent rows)::
[[ C1, C2],
[R1, (1e-4, 1e-1), (2e-4, 2e-1)],
[R2, (3e-4, 3e-1), (4e-4, 4e-1)]]
Each parameter tuple may contain Parameters or numbers. If Parameters are
passed, they will be used directly in the generated Rules. If numbers are
passed, Parameters will be created with automatically generated names based
on the names and states of the relevant species and these parameters will be
included at the end of the returned component list. To omit any individual
reaction, pass None in place of the corresponding parameter tuple.
Alternately, single kd values (dissociation constant, kr/kf) may be
specified instead of (kf, kr) tuples. If kds are used, a single shared kf
Parameter or number must be passed as an extra `kf` argument. kr values for
each binding reaction will be calculated as kd*kf. It is important to
remember that the forward rate constant is a single parameter shared across
the entire bind table, as this may have implications for parameter fitting.
Parameters
----------
bindtable : list of lists
Table of reactants and rates, as described above.
row_site, col_site : string
The names of the sites on the elements of R and C, respectively, used
for binding.
m1 : Monomer or MonomerPattern, optional
Monomer in row complex for binding. Must be specified if there are multiple monomers
that have the row_site within a complex.
m2 : Monomer or MonomerPattern, optional
Monomer in column complex for binding. Must be specified if there are multiple monomers
that have the col_site within a complex.
kf : Parameter or number, optional
If the "cells" in bindtable are given as single kd values, this is the
shared kf used to calculate the kr values.
Returns
-------
components : ComponentSet
The generated components. Contains the bidirectional binding Rules and
optionally the Parameters for any parameters given as numbers.
Examples
--------
Binding table for two species types (R and C, which can be complexes or monomers)::
Model()
Monomer('R1', ['x', 'c1'])
Monomer('R2', ['x', 'c1'])
Monomer('C1', ['y', 'c2'])
Monomer('C2', ['y', 'c2'])
bind(C1(y=None), 'c2', C1(y=None), 'c2', (1e-3, 1e-2))
bind(R1(x=None), 'c1', R2(x=None), 'c1', (1e-3, 1e-2))
bind_table_complex([[ C1(c2=1, y=None)%C1(c2=1), C2],
[R1()%R2(), (1e-4, 1e-1), (2e-4, 2e-1)],
[R2, (3e-4, 3e-1), None]],
'x', 'y', m1=R1(), m2=C1(y=None, c2=1))
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('R1', ['x', 'c1'])
Monomer('R1', ['x', 'c1'])
>>> Monomer('R2', ['x', 'c1'])
Monomer('R2', ['x', 'c1'])
>>> Monomer('C1', ['y', 'c2'])
Monomer('C1', ['y', 'c2'])
>>> Monomer('C2', ['y', 'c2'])
Monomer('C2', ['y', 'c2'])
>>> bind(C1(y=None), 'c2', C1(y=None), 'c2', (1e-3, 1e-2)) #doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_C1_C1', C1(y=None, c2=None) + C1(y=None, c2=None) | C1(y=None, c2=1) % C1(y=None, c2=1), bind_C1_C1_kf, bind_C1_C1_kr),
Parameter('bind_C1_C1_kf', 0.001),
Parameter('bind_C1_C1_kr', 0.01),
])
>>> bind(R1(x=None), 'c1', R2(x=None), 'c1', (1e-3, 1e-2)) #doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_R1_R2', R1(x=None, c1=None) + R2(x=None, c1=None) | R1(x=None, c1=1) % R2(x=None, c1=1), bind_R1_R2_kf, bind_R1_R2_kr),
Parameter('bind_R1_R2_kf', 0.001),
Parameter('bind_R1_R2_kr', 0.01),
])
>>> bind_table_complex([[ C1(c2=1, y=None)%C1(c2=1), C2],
... [R1()%R2(), (1e-4, 1e-1), (2e-4, 2e-1)],
... [R2, (3e-4, 3e-1), None]],
... 'x', 'y', m1=R1(), m2=C1(y=None, c2=1)) #doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_R1R2_C1C1', R1(x=None) % R2() + C1(y=None, c2=1) % C1(c2=1) | R1(x=50) % R2() % C1(y=50, c2=1) % C1(c2=1), bind_R1R2_C1C1_kf, bind_R1R2_C1C1_kr),
Parameter('bind_R1R2_C1C1_kf', 0.0001),
Parameter('bind_R1R2_C1C1_kr', 0.1),
Rule('bind_R1R2_C2', R1(x=None) % R2() + C2(y=None) | R1(x=50) % R2() % C2(y=50), bind_R1R2_C2_kf, bind_R1R2_C2_kr),
Parameter('bind_R1R2_C2_kf', 0.0002),
Parameter('bind_R1R2_C2_kr', 0.2),
Rule('bind_C1C1_R2', C1(y=None, c2=1) % C1(c2=1) + R2(x=None) | C1(y=50, c2=1) % C1(c2=1) % R2(x=50), bind_C1C1_R2_kf, bind_C1C1_R2_kr),
Parameter('bind_C1C1_R2_kf', 0.0003),
Parameter('bind_C1C1_R2_kr', 0.3),
])
"""
# extract species lists and matrix of rates
s_rows = [row[0] for row in bindtable[1:]]
s_cols = bindtable[0]
kmatrix = [row[1:] for row in bindtable[1:]]
# ensure kf is passed when necessary
kiter = itertools.chain.from_iterable(kmatrix)
if any(isinstance(x, numbers.Real) for x in kiter) and kf is None:
raise ValueError("must specify kf when using single kd values")
# loop over interactions
components = ComponentSet()
for r, s_row in enumerate(s_rows):
for c, s_col in enumerate(s_cols):
klist = kmatrix[r][c]
if klist is not None:
# if user gave a single kd, calculate kr
if isinstance(klist, numbers.Real):
kd = klist
klist = (kf, kd*kf)
components |= bind_complex(s_row, row_site, s_col, col_site, klist, m1, m2)
return components
def create_t_obs():
"""
Generate a rule to simulate passing of time and create a time observable
that can be used in complex Expression rates.
.. warning::
This macro is usually used to create rate laws that depend on time.
Time tracking rate laws using this macro only work for deterministic simulations.
Returns
-------
components : ComponentSet
The generated components. Contains the time monomer, Parameter rate of time creation,
Rule to simulate passing of time, ime Observable.
Examples
--------
Create rule to simulate passing of time and time observable::
Model()
create_t_obs()
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> create_t_obs()
ComponentSet([
Rule('synthesize___t', None >> __t(), __k_t),
Monomer('__t'),
Parameter('__k_t', 1.0),
Observable('t', __t()),
])
"""
# Add a time monomer and reaction to be able to create an observable to
# track the time within the simulation
time = Monomer('__t')
k_time = Parameter('__k_t', 1)
time_obs = Observable('t', time())
components = synthesize(time(), k_time)
components |= [time, k_time, time_obs]
return components
def drug_binding(drug, d_site, substrate, s_site, t_action, klist):
"""
Generate the reversible binding reaction DRUG + SUBSTRATE | DRUG:SUBSTRATE
that only gets triggered when the simulation reaches the time point t_action.
The idea of this macro is to mimic experimental settings when a reaction is
started and later on some kind of perturbation is added to the system.
.. warning::
This macro only works when a model is simulated using a deterministic simulator.
Parameters
----------
drug, substrate: Monomer or MonomerPattern
Monomers participating in the binding reaction.
d_site, s_site: string
The names of the sites on s1 and s2 used for binding.
t_action: float
Time of the simulation at which the drug is added
klist: list of 2 Parameters or list of 2 numbers
Forward and reverse rate constants (in that order). If Parameters are
passed, they will be used directly in the generated Rules. If numbers
are passed, Parameters will be created with automatically generated
names based on the names and states of S1 and S2 and these parameters
will be included at the end of the returned component list.
Returns
-------
components : ComponentSet
The generated components. Contains the bidirectional binding Rule,
the time monomer, Parameter rate of time creation, Rule to simulate passing of time,
time Observable, two Expression rates that take into account when the interaction
between the drug and that substrate start to occur and optionally two Parameters
if klist was given as numbers
as numbers
Examples
--------
Binding between drug and substrate::
Model()
Monomer('drug', ['b'])
Monomer('substrate', ['b'])
drug_binding(drug(), 'b', substrate(), 'b', 10, [2,4])
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('drug', ['b'])
Monomer('drug', ['b'])
>>> Monomer('substrate', ['b'])
Monomer('substrate', ['b'])
>>> drug_binding(drug(), 'b', substrate(), 'b', 10, [0.1, 0.01])
ComponentSet([
Rule('bind_drug_substrate_to_drugsubstrate', drug(b=None) + substrate(b=None) | drug(b=1) % substrate(b=1), kf_expr_drug_substrate, kr_expr_drug_substrate),
Parameter('kf_drug_substrate', 0.1),
Parameter('kr_drug_substrate', 0.01),
Rule('synthesize___t', None >> __t(), __k_t),
Monomer('__t'),
Parameter('__k_t', 1.0),
Observable('t', __t()),
Expression('kf_expr_drug_substrate', (t > 10)*kf_drug_substrate),
Expression('kr_expr_drug_substrate', (t > 10)*kr_drug_substrate),
])
"""
_verify_sites(drug, d_site)
_verify_sites(substrate, s_site)
# Create a time observable using the create_t_obs macro
components_time_obs = create_t_obs()
time_obs = components_time_obs.t
# Set up some aliases to the patterns we'll use in the rules
drug_free = drug({d_site: None})
# retain any existing state for substrate's s_site, otherwise set it to None
if s_site in substrate.site_conditions:
substrate_free = substrate()
s_state = (substrate.site_conditions[s_site], 1)
else:
substrate_free = substrate({s_site: None})
s_state = 1
ds_complex = drug({d_site: 1}) % substrate({s_site: s_state})
substrate_monomer_name = substrate.monomer.name
drug_monomer_name = drug.monomer.name
if all(isinstance(x, (Parameter, Expression)) for x in klist):
k1 = klist[0]
k2 = klist[1]
params_created = ComponentSet()
elif all(isinstance(x, numbers.Real) for x in klist):
k1 = Parameter('kf_{0}_{1}'.format(drug_monomer_name, substrate_monomer_name), klist[0])
params_created = ComponentSet([k1])
k2 = Parameter('kr_{0}_{1}'.format(drug_monomer_name, substrate_monomer_name), klist[1])
params_created.add(k2)
else:
raise ValueError("klist must contain Parameters, Expressions, or numbers.")
kf_expr = Expression('kf_expr_{0}_{1}'.format(drug_monomer_name,
substrate_monomer_name), (time_obs > t_action) * k1)
kr_expr = Expression('kr_expr_{0}_{1}'.format(drug_monomer_name,
substrate_monomer_name), (time_obs > t_action) * k2)
bind_kpars = [kf_expr, kr_expr]
components_added_macro = components_time_obs
components_added_macro |= bind_kpars
components = _macro_rule('bind', drug_free + substrate_free | ds_complex, bind_kpars, ['kf', 'kr'])
components |= params_created
components |= components_added_macro
return components
# Catalysis
# =========
def catalyze(enzyme, e_site, substrate, s_site, product, klist):
"""
Generate the two-step catalytic reaction E + S | E:S >> E + P.
Parameters
----------
enzyme, substrate, product : Monomer or MonomerPattern
E, S and P in the above reaction.
e_site, s_site : string
The names of the sites on `enzyme` and `substrate` (respectively) where
they bind each other to form the E:S complex.
klist : list of 3 Parameters or list of 3 numbers
Forward, reverse and catalytic rate constants (in that order). If
Parameters are passed, they will be used directly in the generated
Rules. If numbers are passed, Parameters will be created with
automatically generated names based on the names and states of enzyme,
substrate and product and these parameters will be included at the end
of the returned component list.
Returns
-------
components : ComponentSet
The generated components. Contains two Rules (bidirectional complex
formation and unidirectional product dissociation), and optionally three
Parameters if klist was given as plain numbers.
Notes
-----
When passing a MonomerPattern for `enzyme` or `substrate`, do not include
`e_site` or `s_site` in the respective patterns. The macro will handle this.
Examples
--------
Using distinct Monomers for substrate and product::
Model()
Monomer('E', ['b'])
Monomer('S', ['b'])
Monomer('P')
catalyze(E(), 'b', S(), 'b', P(), (1e-4, 1e-1, 1))
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('E', ['b'])
Monomer('E', ['b'])
>>> Monomer('S', ['b'])
Monomer('S', ['b'])
>>> Monomer('P')
Monomer('P')
>>> catalyze(E(), 'b', S(), 'b', P(), (1e-4, 1e-1, 1)) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_E_S_to_ES', E(b=None) + S(b=None) | E(b=1) % S(b=1),
bind_E_S_to_ES_kf, bind_E_S_to_ES_kr),
Parameter('bind_E_S_to_ES_kf', 0.0001),
Parameter('bind_E_S_to_ES_kr', 0.1),
Rule('catalyze_ES_to_E_P', E(b=1) % S(b=1) >> E(b=None) + P(),
catalyze_ES_to_E_P_kc),
Parameter('catalyze_ES_to_E_P_kc', 1.0),
])
Using a single Monomer for substrate and product with a state change::
Monomer('Kinase', ['b'])
Monomer('Substrate', ['b', 'y'], {'y': ('U', 'P')})
catalyze(Kinase(), 'b', Substrate(y='U'), 'b', Substrate(y='P'),
(1e-4, 1e-1, 1))
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Kinase', ['b'])
Monomer('Kinase', ['b'])
>>> Monomer('Substrate', ['b', 'y'], {'y': ('U', 'P')})
Monomer('Substrate', ['b', 'y'], {'y': ('U', 'P')})
>>> catalyze(Kinase(), 'b', Substrate(y='U'), 'b', Substrate(y='P'), (1e-4, 1e-1, 1)) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_Kinase_SubstrateU_to_KinaseSubstrateU',
Kinase(b=None) + Substrate(b=None, y='U') | Kinase(b=1) % Substrate(b=1, y='U'),
bind_Kinase_SubstrateU_to_KinaseSubstrateU_kf,
bind_Kinase_SubstrateU_to_KinaseSubstrateU_kr),
Parameter('bind_Kinase_SubstrateU_to_KinaseSubstrateU_kf', 0.0001),
Parameter('bind_Kinase_SubstrateU_to_KinaseSubstrateU_kr', 0.1),
Rule('catalyze_KinaseSubstrateU_to_Kinase_SubstrateP',
Kinase(b=1) % Substrate(b=1, y='U') >> Kinase(b=None) + Substrate(b=None, y='P'),
catalyze_KinaseSubstrateU_to_Kinase_SubstrateP_kc),
Parameter('catalyze_KinaseSubstrateU_to_Kinase_SubstrateP_kc', 1.0),
])
"""
_verify_sites(enzyme, e_site)
_verify_sites(substrate, s_site)
# Set up some aliases to the patterns we'll use in the rules
enzyme_free = enzyme({e_site: None})
# retain any existing state for substrate's s_site, otherwise set it to None
if s_site in substrate.site_conditions:
substrate_free = substrate()
s_state = (substrate.site_conditions[s_site], 1)
else:
substrate_free = substrate({s_site: None})
s_state = 1
es_complex = enzyme({e_site: 1}) % substrate({s_site: s_state})
# If product is actually a variant of substrate, we need to explicitly say
# that it is no longer bound to enzyme, unless product already specifies a
# state for s_site.
if product().monomer is substrate().monomer \
and s_site not in product.site_conditions:
product = product({s_site: None})
# create the rules
components = _macro_rule('bind',
enzyme_free + substrate_free | es_complex,
klist[0:2], ['kf', 'kr'])
components |= _macro_rule('catalyze',
es_complex >> enzyme_free + product,
[klist[2]], ['kc'])
return components
def catalyze_complex(enzyme, e_site, substrate, s_site, product, klist, m1=None, m2=None):
""" Generate the two-step catalytic reaction E + S | E:S >> E + P, while allowing complexes to serve as enzyme, substrate and/or product.
E:S1 + S:S2 | E:S1:S:S2 >> E:S1 + P:S2
Parameters
----------
enzyme, substrate, product : Monomer, MonomerPattern, or ComplexPattern
Monomers or complexes participating in the binding reaction.
e_site, s_site : string
The names of the sites on `enzyme` and `substrate` (respectively) where
they bind each other to form the E:S complex.
klist : list of 3 Parameters or list of 3 numbers
Forward, reverse and catalytic rate constants (in that order). If
Parameters are passed, they will be used directly in the generated
Rules. If numbers are passed, Parameters will be created with
automatically generated names based on the names and states of enzyme,
substrate and product and these parameters will be included at the end
of the returned component list.
m1, m2 : Monomer or MonomerPattern
If enzyme or substrate binding site is present in multiple monomers
within a complex, the specific monomer desired for binding must be specified.
Returns
-------
components : ComponentSet
The generated components. Contains the bidirectional binding Rule
and optionally three Parameters if klist was given as numbers.
"""
if isinstance(m1, Monomer):
m1 = m1()
if isinstance(m2, Monomer):
m2 = m2()
def build_complex(s1, site1, m1):
_verify_sites_complex(s1, site1)
#Retrieve a dictionary specifying the MonomerPattern within the complex that contains the given binding site.
specsitesdict = _verify_sites_complex(s1, site1)
s1complexpatub, s1complexpatb = check_sites_comp_build(s1, site1, m1, specsitesdict)
return s1complexpatb, s1complexpatub
def check_sites_comp_build(s1, site1, m1, specsitesdict):
#Return error if binding site exists on multiple monomers and a monomer for binding (m1) hasn't been specified.
if len(specsitesdict) > 1 and m1==None:
raise ValueError("Binding site '%s' present in more than one monomer in complex '%s'. Specify variable m1, the monomer used for binding within the complex." % (site1, s1))
if not s1.is_concrete:
raise ValueError("Complex '%s' must be concrete." % (s1))
#If the given binding site is only present in one monomer in the complex:
if m1==None:
#Build up ComplexPattern for use in rule (with state of given binding site specified).
s1complexpatub = list(specsitesdict.keys())[0]({site1:None})
s1complexpatb = list(specsitesdict.keys())[0]({site1:50})
for monomer in s1.monomer_patterns:
if monomer not in specsitesdict.keys():
s1complexpatub %= monomer
s1complexpatb %= monomer
#If the binding site is present on more than one monomer in the complex, the monomer must be specified by the user. Use specified m1 to build ComplexPattern.
else:
#Make sure binding states of MonomerPattern m1 match those of the monomer within the ComplexPattern s1 (ComplexPattern monomer takes precedence if not).
i = 0
identical_monomers = []
for mon in s1.monomer_patterns:
#Only change the binding site for the first monomer that matches. Keep any others unchanged to add to final complex that is returned.
if mon.monomer.name == m1.monomer.name:
i += 1
if i == 1:
s1complexpatub = m1({site1:None})
s1complexpatb = m1({site1:50})
else:
identical_monomers.append(mon)
#Build up ComplexPattern for use in rule (with state of given binding site on m1 specified).
for mon in s1.monomer_patterns:
if mon.monomer.name != m1.monomer.name:
s1complexpatub %= mon
s1complexpatb %= mon
if identical_monomers:
for i in range(len(identical_monomers)):
s1complexpatub %= identical_monomers[i]
s1complexpatb %= identical_monomers[i]
return s1complexpatub, s1complexpatb
#If no complexes exist in the reaction, revert to catalyze().
if (isinstance(enzyme, MonomerPattern) or isinstance(enzyme, Monomer)) and (isinstance(substrate, MonomerPattern) or isinstance(substrate, Monomer)):
_verify_sites(enzyme, e_site)
_verify_sites(substrate, s_site)
return catalyze(enzyme, e_site, substrate, s_site, product, klist,)
# Build E:S
if isinstance(enzyme, ComplexPattern):
enzymepatb, enzyme_free = build_complex(enzyme, e_site, m1)
else:
enzymepatb, enzyme_free = enzyme({e_site: 1}), enzyme({e_site: None})
if isinstance(substrate, ComplexPattern):
substratepatb, substratepatub = build_complex(substrate, s_site, m2)
else:
substratepatb = substrate({s_site: 50})
"""if s_site in substrate.site_conditions:
substrate_free = substrate()
s_state = (substrate.site_conditions[s_site], 1)
else:
substrate_free = substrate({s_site: None})
s_state = 1
substratepatb = substrate({s_site: s_state})
"""
es_complex = enzymepatb % substratepatb
# Use bind complex to binding rule.
components = bind_complex(enzyme, e_site, substrate, s_site, klist[0:2], m1, m2)
components |= _macro_rule('catalyze',
es_complex >> enzyme_free + product,
[klist[2]], ['kc'])
return components
def catalyze_state(enzyme, e_site, substrate, s_site, mod_site,
state1, state2, klist):
"""
Generate the two-step catalytic reaction E + S | E:S >> E + P. A wrapper
around catalyze() with a signature specifying the state change of the
substrate resulting from catalysis.
Parameters
----------
enzyme : Monomer or MonomerPattern
E in the above reaction.
substrate : Monomer or MonomerPattern
S and P in the above reaction. The product species is assumed to be
identical to the substrate species in all respects except the state
of the modification site. The state of the modification site should
not be specified in the MonomerPattern for the substrate.
e_site, s_site : string
The names of the sites on `enzyme` and `substrate` (respectively) where
they bind each other to form the E:S complex.
mod_site : string
The name of the site on the substrate that is modified by catalysis.
state1, state2 : strings
The states of the modification site (mod_site) on the substrate before
(state1) and after (state2) catalysis.
klist : list of 3 Parameters or list of 3 numbers
Forward, reverse and catalytic rate constants (in that order). If
Parameters are passed, they will be used directly in the generated
Rules. If numbers are passed, Parameters will be created with
automatically generated names based on the names and states of enzyme,
substrate and product and these parameters will be included at the end
of the returned component list.
Returns
-------
components : ComponentSet
The generated components. Contains two Rules (bidirectional complex
formation and unidirectional product dissociation), and optionally three
Parameters if klist was given as plain numbers.
Notes
-----
When passing a MonomerPattern for `enzyme` or `substrate`, do not include
`e_site` or `s_site` in the respective patterns. In addition, do not
include the state of the modification site on the substrate. The macro
will handle this.
Examples
--------
Using a single Monomer for substrate and product with a state change::
Monomer('Kinase', ['b'])
Monomer('Substrate', ['b', 'y'], {'y': ('U', 'P')})
catalyze_state(Kinase, 'b', Substrate, 'b', 'y', 'U', 'P',
(1e-4, 1e-1, 1))
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Kinase', ['b'])
Monomer('Kinase', ['b'])
>>> Monomer('Substrate', ['b', 'y'], {'y': ('U', 'P')})
Monomer('Substrate', ['b', 'y'], {'y': ('U', 'P')})
>>> catalyze_state(Kinase, 'b', Substrate, 'b', 'y', 'U', 'P', (1e-4, 1e-1, 1)) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('bind_Kinase_SubstrateU_to_KinaseSubstrateU',
Kinase(b=None) + Substrate(b=None, y='U') | Kinase(b=1) % Substrate(b=1, y='U'),
bind_Kinase_SubstrateU_to_KinaseSubstrateU_kf,
bind_Kinase_SubstrateU_to_KinaseSubstrateU_kr),
Parameter('bind_Kinase_SubstrateU_to_KinaseSubstrateU_kf', 0.0001),
Parameter('bind_Kinase_SubstrateU_to_KinaseSubstrateU_kr', 0.1),
Rule('catalyze_KinaseSubstrateU_to_Kinase_SubstrateP',
Kinase(b=1) % Substrate(b=1, y='U') >> Kinase(b=None) + Substrate(b=None, y='P'),
catalyze_KinaseSubstrateU_to_Kinase_SubstrateP_kc),
Parameter('catalyze_KinaseSubstrateU_to_Kinase_SubstrateP_kc', 1.0),
])
"""
return catalyze(enzyme, e_site, substrate({mod_site: state1}),
s_site, substrate({mod_site: state2}), klist)
def catalyze_one_step(enzyme, substrate, product, kf):
"""
Generate the one-step catalytic reaction E + S >> E + P.
Parameters
----------
enzyme, substrate, product : Monomer or MonomerPattern
E, S and P in the above reaction.
kf : a Parameter or a number
Forward rate constant for the reaction. If a
Parameter is passed, it will be used directly in the generated
Rules. If a number is passed, a Parameter will be created with an
automatically generated name based on the names and states of the
enzyme, substrate and product and this parameter will be included
at the end of the returned component list.
Returns
-------
components : ComponentSet
The generated components. Contains the unidirectional reaction Rule
and optionally the forward rate Parameter if klist was given as a
number.
Notes
-----
In this macro, there is no direct binding between enzyme and substrate,
so binding sites do not have to be specified. This represents an
approximation for the case when the enzyme is operating in its linear
range. However, if catalysis is nevertheless contingent on the enzyme or
substrate being unbound on some site, then that information must be encoded
in the MonomerPattern for the enzyme or substrate. See the examples, below.
Examples
--------
Convert S to P by E::
Model()
Monomer('E', ['b'])
Monomer('S', ['b'])
Monomer('P')
catalyze_one_step(E, S, P, 1e-4)
If the ability of the enzyme E to catalyze this reaction is dependent
on the site 'b' of E being unbound, then this macro must be called as
catalyze_one_step(E(b=None), S, P, 1e-4)
and similarly if the substrate or product must be unbound.
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('E', ['b'])
Monomer('E', ['b'])
>>> Monomer('S', ['b'])
Monomer('S', ['b'])
>>> Monomer('P')
Monomer('P')
>>> catalyze_one_step(E, S, P, 1e-4) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('one_step_E_S_to_E_P', E() + S() >> E() + P(), one_step_E_S_to_E_P_kf),
Parameter('one_step_E_S_to_E_P_kf', 0.0001),
])
"""
if isinstance(enzyme, Monomer):
enzyme = enzyme()
if isinstance(substrate, Monomer):
substrate = substrate()
if isinstance(product, Monomer):
product = product()
return _macro_rule('one_step',
enzyme + substrate >> enzyme + product,
[kf], ['kf'])
def catalyze_one_step_reversible(enzyme, substrate, product, klist):
"""
Create fwd and reverse rules for catalysis of the form::
E + S -> E + P
P -> S
Parameters
----------
enzyme, substrate, product : Monomer or MonomerPattern
E, S and P in the above reactions.
klist : list of 2 Parameters or list of 2 numbers
A list containing the rate constant for catalysis and the rate constant
for the conversion of product back to substrate (in that order). If
Parameters are passed, they will be used directly in the generated
Rules. If numbers are passed, Parameters will be created with
automatically generated names based on the names and states of S1 and
S2 and these parameters will be included at the end of the returned
component list.
Returns
-------
components : ComponentSet
The generated components. Contains two rules (the single-step catalysis
rule and the product reversion rule) and optionally the two generated
Parameter objects if klist was given as numbers.
Notes
-----
Calls the macro catalyze_one_step to generate the catalysis rule.
Examples
--------
One-step, pseudo-first order conversion of S to P by E::
Model()
Monomer('E', ['b'])
Monomer('S', ['b'])
Monomer('P')
catalyze_one_step_reversible(E, S, P, [1e-1, 1e-4])
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('E', ['b'])
Monomer('E', ['b'])
>>> Monomer('S', ['b'])
Monomer('S', ['b'])
>>> Monomer('P')
Monomer('P')
>>> catalyze_one_step_reversible(E, S, P, [1e-1, 1e-4]) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('one_step_E_S_to_E_P', E() + S() >> E() + P(), one_step_E_S_to_E_P_kf),
Parameter('one_step_E_S_to_E_P_kf', 0.1),
Rule('reverse_P_to_S', P() >> S(), reverse_P_to_S_kr),
Parameter('reverse_P_to_S_kr', 0.0001),
])
"""
if isinstance(enzyme, Monomer):
enzyme = enzyme()
if isinstance(substrate, Monomer):
substrate = substrate()
if isinstance(product, Monomer):
product = product()
components = catalyze_one_step(enzyme, substrate, product, klist[0])
components |= _macro_rule('reverse', product >> substrate,
[klist[1]], ['kr'])
return components
# Synthesis and degradation
# =========================
def synthesize(species, ksynth):
"""
Generate a reaction which synthesizes a species.
Note that `species` must be "concrete", i.e. the state of all
sites in all of its monomers must be specified. No site may be
left unmentioned.
Parameters
----------
species : Monomer, MonomerPattern or ComplexPattern
The species to synthesize. If a Monomer, sites are considered
as unbound and in their default state. If a pattern, must be
concrete.
ksynth : Parameters or number
Synthesis rate. If a Parameter is passed, it will be used directly in
the generated Rule. If a number is passed, a Parameter will be created
with an automatically generated name based on the names and site states
of the components of `species` and this parameter will be included at
the end of the returned component list.
Returns
-------
components : ComponentSet
The generated components. Contains the unidirectional synthesis Rule and
optionally a Parameter if ksynth was given as a number.
Examples
--------
Synthesize A with site x unbound and site y in state 'e'::
Model()
Monomer('A', ['x', 'y'], {'y': ['e', 'f']})
synthesize(A(x=None, y='e'), 1e-4)
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('A', ['x', 'y'], {'y': ['e', 'f']})
Monomer('A', ['x', 'y'], {'y': ['e', 'f']})
>>> synthesize(A(x=None, y='e'), 1e-4) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('synthesize_Ae', None >> A(x=None, y='e'), synthesize_Ae_k),
Parameter('synthesize_Ae_k', 0.0001),
])
"""
def synthesize_name_func(rule_expression):
cps = rule_expression.product_pattern.complex_patterns
return '_'.join(_complex_pattern_label(cp) for cp in cps)
if isinstance(species, Monomer):
species = species()
species = as_complex_pattern(species)
if not species.is_concrete():
raise ValueError("species must be concrete")
return _macro_rule('synthesize', None >> species, [ksynth], ['k'],
name_func=synthesize_name_func)
def degrade(species, kdeg):
"""
Generate a reaction which degrades a species.
Note that `species` is not required to be "concrete".
Parameters
----------
species : Monomer, MonomerPattern or ComplexPattern
The species to synthesize. If a Monomer, sites are considered
as unbound and in their default state. If a pattern, must be
concrete.
kdeg : Parameters or number
Degradation rate. If a Parameter is passed, it will be used directly in
the generated Rule. If a number is passed, a Parameter will be created
with an automatically generated name based on the names and site states
of the components of `species` and this parameter will be included at
the end of the returned component list.
Returns
-------
components : ComponentSet
The generated components. Contains the unidirectional degradation Rule
and optionally a Parameter if ksynth was given as a number.
Examples
--------
Degrade all B, even bound species::
Model()
Monomer('B', ['x'])
degrade(B(), 1e-6)
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('B', ['x'])
Monomer('B', ['x'])
>>> degrade(B(), 1e-6) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('degrade_B', B() >> None, degrade_B_k),
Parameter('degrade_B_k', 1e-06),
])
"""
def degrade_name_func(rule_expression):
cps = rule_expression.reactant_pattern.complex_patterns
return '_'.join(_complex_pattern_label(cp) for cp in cps)
if isinstance(species, Monomer):
species = species()
species = as_complex_pattern(species)
return _macro_rule('degrade', species >> None, [kdeg], ['k'],
name_func=degrade_name_func)
def synthesize_degrade_table(table):
"""
Generate a table of synthesis and degradation reactions.
Given a list of species, calls the `synthesize` and `degrade` macros on each
one. The species and the parameter values are passed as a list of lists
(i.e. a table) with each inner list consisting of the species, forward and
reverse rates (in that order).
Each species' associated pair of rates may be either Parameters or
numbers. If Parameters are passed, they will be used directly in the
generated Rules. If numbers are passed, Parameters will be created with
automatically generated names based on the names and states of the relevant
species and these parameters will be included in the returned component
list. To omit any individual reaction, pass None in place of the
corresponding parameter.
Note that any `species` with a non-None synthesis rate must be "concrete".
Parameters
----------
table : list of lists
Table of species and rates, as described above.
Returns
-------
components : ComponentSet
The generated components. Contains the unidirectional synthesis and
degradation Rules and optionally the Parameters for any rates given as
numbers.
Examples
--------
Specify synthesis and degradation reactions for A and B in a table::
Model()
Monomer('A', ['x', 'y'], {'y': ['e', 'f']})
Monomer('B', ['x'])
synthesize_degrade_table([[A(x=None, y='e'), 1e-4, 1e-6],
[B(), None, 1e-7]])
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('A', ['x', 'y'], {'y': ['e', 'f']})
Monomer('A', ['x', 'y'], {'y': ['e', 'f']})
>>> Monomer('B', ['x'])
Monomer('B', ['x'])
>>> synthesize_degrade_table([[A(x=None, y='e'), 1e-4, 1e-6],
... [B(), None, 1e-7]]) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('synthesize_Ae', None >> A(x=None, y='e'), synthesize_Ae_k),
Parameter('synthesize_Ae_k', 0.0001),
Rule('degrade_Ae', A(x=None, y='e') >> None, degrade_Ae_k),
Parameter('degrade_Ae_k', 1e-06),
Rule('degrade_B', B() >> None, degrade_B_k),
Parameter('degrade_B_k', 1e-07),
])
"""
# loop over interactions
components = ComponentSet()
for row in table:
species, ksynth, kdeg = row
if ksynth is not None:
components |= synthesize(species, ksynth)
if kdeg is not None:
components |= degrade(species, kdeg)
return components
# Polymer assembly (pores/rings and chains)
# =========================================
def polymer_species(subunit, site1, site2, size, closed=False):
"""
Return a ComplexPattern representing a linear or closed circular polymer.
Parameters
----------
subunit : Monomer or MonomerPattern
The subunit of which the polymer is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
size : integer
The number of subunits in the polymer.
closed : boolean
If False (default), the polymer is linear, with unbound sites at each
end. If True, the polymer is a closed circle, like a ring or pore.
Returns
-------
A ComplexPattern corresponding to the polymer.
Notes
-----
Used by both chain_species and pore_species.
"""
_verify_sites(subunit, site1, site2)
if size <= 0:
raise ValueError("size must be an integer greater than 0")
if size == 1:
polymer = subunit({site1: None, site2: None})
elif size == 2:
polymer = subunit({site1: None, site2: 1}) % \
subunit({site1: 1, site2: None})
else:
# If a closed circle, use 0 as the bond number for the "seam";
# if linear, use None for the unbound ends
seam_site_num = size if closed else None
# First subunit
polymer = subunit({site1: seam_site_num, site2: 1})
# Build up the ComplexPattern for the polymer, starting with the first
# subunit
for i in range(1, size-1):
polymer %= subunit({site1: i, site2: i + 1})
# Attach the last subunit
polymer %= subunit({site1: size-1, site2: seam_site_num})
# Set ComplexPattern to MatchOnce
polymer.match_once = True
return polymer
def assemble_polymer_sequential(subunit, site1, site2, max_size, ktable,
closed=False):
"""Generate rules to assemble a polymer by sequential subunit addition.
The polymer species are created by sequential addition of `subunit` monomers,
i.e. larger oligomeric species never fuse together. The polymer structure is
defined by the `polymer_species` macro.
Parameters
----------
subunit : Monomer or MonomerPattern
The subunit of which the polymer is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
max_size : integer
The maximum number of subunits in the polymer.
ktable : list of lists of Parameters or numbers
Table of forward and reverse rate constants for the assembly steps. The
outer list must be of length `max_size` - 1, and the inner lists must
all be of length 2. In the outer list, the first element corresponds to
the first assembly step in which two monomeric subunits bind to form a
2-subunit complex, and the last element corresponds to the final step in
which the `max_size`th subunit is added. Each inner list contains the
forward and reverse rate constants (in that order) for the corresponding
assembly reaction, and each of these pairs must comprise solely
Parameter objects or solely numbers (never one of each). If Parameters
are passed, they will be used directly in the generated Rules. If
numbers are passed, Parameters will be created with automatically
generated names based on `subunit`, `site1`, `site2` and the polymer sizes
and these parameters will be included at the end of the returned
component list.
closed : boolean
If False (default), assembles a linear (non-circular) polymer. If True,
assembles a circular ring/pore polymer.
Notes
-----
See documentation for :py:func:`assemble_chain_sequential` and
:py:func:`assemble_pore_sequential` for examples.
"""
if len(ktable) != max_size - 1:
raise ValueError("len(ktable) must be equal to max_size - 1")
def polymer_rule_name(rule_expression, size):
react_p = rule_expression.reactant_pattern
monomer = react_p.complex_patterns[0].monomer_patterns[0].monomer
return '%s_%d' % (monomer.name, size)
components = ComponentSet()
s = polymer_species(subunit, site1, site2, 1, closed=closed)
for size, klist in zip(range(2, max_size + 1), ktable):
polymer_prev = polymer_species(subunit, site1, site2, size - 1,
closed=closed)
polymer_next = polymer_species(subunit, site1, site2, size,
closed=closed)
name_func = functools.partial(polymer_rule_name, size=size)
rule_name_base = 'assemble_%s_sequential' % \
('pore' if closed else 'chain')
components |= _macro_rule(rule_name_base,
s + polymer_prev | polymer_next,
klist, ['kf', 'kr'],
name_func=name_func)
return components
# Pore assembly
# =============
def pore_species(subunit, site1, site2, size):
"""
Return a ComplexPattern representing a circular homomeric pore.
Parameters
----------
subunit : Monomer or MonomerPattern
The subunit of which the pore is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
size : integer
The number of subunits in the pore.
Returns
-------
A ComplexPattern corresponding to the pore.
Notes
-----
At sizes 1 and 2 the ring is not closed, i.e. there is one site1 and one
site2 which remain unbound. At size 3 and up the ring is closed and all
site1 sites are bound to a site2.
Examples
--------
Get the ComplexPattern object representing a pore of size 4::
Model()
Monomer('Unit', ['p1', 'p2'])
pore_tetramer = pore_species(Unit, 'p1', 'p2', 4)
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2'])
Monomer('Unit', ['p1', 'p2'])
>>> pore_species(Unit, 'p1', 'p2', 4)
MatchOnce(Unit(p1=4, p2=1) % Unit(p1=1, p2=2) % Unit(p1=2, p2=3) % Unit(p1=3, p2=4))
"""
return polymer_species(subunit, site1, site2, size, closed=True)
def assemble_pore_sequential(subunit, site1, site2, max_size, ktable):
"""Generate rules to assemble a circular homomeric pore sequentially.
The pore species are created by sequential addition of `subunit` monomers,
i.e. larger oligomeric species never fuse together. The pore structure is
defined by the `pore_species` macro.
Parameters
----------
subunit : Monomer or MonomerPattern
The subunit of which the pore is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
max_size : integer
The maximum number of subunits in the pore.
ktable : list of lists of Parameters or numbers
Table of forward and reverse rate constants for the assembly steps. The
outer list must be of length `max_size` - 1, and the inner lists must
all be of length 2. In the outer list, the first element corresponds to
the first assembly step in which two monomeric subunits bind to form a
2-subunit complex, and the last element corresponds to the final step in
which the `max_size`th subunit is added. Each inner list contains the
forward and reverse rate constants (in that order) for the corresponding
assembly reaction, and each of these pairs must comprise solely
Parameter objects or solely numbers (never one of each). If Parameters
are passed, they will be used directly in the generated Rules. If
numbers are passed, Parameters will be created with automatically
generated names based on `subunit`, `site1`, `site2` and the pore sizes
and these parameters will be included at the end of the returned
component list.
Examples
--------
Assemble a three-membered pore by sequential addition of monomers,
with the same forward/reverse rates for monomer-monomer and monomer-dimer
interactions::
Model()
Monomer('Unit', ['p1', 'p2'])
assemble_pore_sequential(Unit, 'p1', 'p2', 3, [[1e-4, 1e-1]] * 2)
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2'])
Monomer('Unit', ['p1', 'p2'])
>>> assemble_pore_sequential(Unit, 'p1', 'p2', 3, [[1e-4, 1e-1]] * 2) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('assemble_pore_sequential_Unit_2',
Unit(p1=None, p2=None) + Unit(p1=None, p2=None) |
Unit(p1=None, p2=1) % Unit(p1=1, p2=None),
assemble_pore_sequential_Unit_2_kf,
assemble_pore_sequential_Unit_2_kr),
Parameter('assemble_pore_sequential_Unit_2_kf', 0.0001),
Parameter('assemble_pore_sequential_Unit_2_kr', 0.1),
Rule('assemble_pore_sequential_Unit_3',
Unit(p1=None, p2=None) + Unit(p1=None, p2=1) % Unit(p1=1, p2=None) |
MatchOnce(Unit(p1=3, p2=1) % Unit(p1=1, p2=2) % Unit(p1=2, p2=3)),
assemble_pore_sequential_Unit_3_kf,
assemble_pore_sequential_Unit_3_kr),
Parameter('assemble_pore_sequential_Unit_3_kf', 0.0001),
Parameter('assemble_pore_sequential_Unit_3_kr', 0.1),
])
"""
return assemble_polymer_sequential(subunit, site1, site2, max_size, ktable,
closed=True)
def pore_transport(subunit, sp_site1, sp_site2, sc_site, min_size, max_size,
csource, c_site, cdest, ktable):
"""
Generate rules to transport cargo through a circular homomeric pore.
The pore structure is defined by the `pore_species` macro -- `subunit`
monomers bind to each other from `sp_site1` to `sp_site2` to form a closed
ring. The transport reaction is modeled as a catalytic process of the form
pore + csource | pore:csource >> pore + cdest
Parameters
----------
subunit : Monomer or MonomerPattern
Subunit of which the pore is composed.
sp_site1, sp_site2 : string
Names of the sites where one copy of `subunit` binds to the next.
sc_site : string
Name of the site on `subunit` where it binds to the cargo `csource`.
min_size, max_size : integer
Minimum and maximum number of subunits in the pore at which transport
will occur.
csource : Monomer or MonomerPattern
Cargo "source", i.e. the entity to be transported.
c_site : string
Name of the site on `csource` where it binds to `subunit`.
cdest : Monomer or MonomerPattern
Cargo "destination", i.e. the resulting state after the transport event.
ktable : list of lists of Parameters or numbers
Table of forward, reverse and catalytic rate constants for the transport
reactions. The outer list must be of length `max_size` - `min_size` + 1,
and the inner lists must all be of length 3. In the outer list, the
first element corresponds to the transport through the pore of size
`min_size` and the last element to that of size `max_size`. Each inner
list contains the forward, reverse and catalytic rate constants (in that
order) for the corresponding transport reaction, and each of these pairs
must comprise solely Parameter objects or solely numbers (never some of
each). If Parameters are passed, they will be used directly in the
generated Rules. If numbers are passed, Parameters will be created with
automatically generated names based on the subunit, the pore size and
the cargo, and these parameters will be included at the end of the
returned component list.
Examples
--------
Specify that a three-membered pore is capable of
transporting cargo from the mitochondria to the cytoplasm::
Model()
Monomer('Unit', ['p1', 'p2', 'sc_site'])
Monomer('Cargo', ['c_site', 'loc'], {'loc':['mito', 'cyto']})
pore_transport(Unit, 'p1', 'p2', 'sc_site', 3, 3,
Cargo(loc='mito'), 'c_site', Cargo(loc='cyto'),
[[1e-4, 1e-1, 1]])
Generates two rules--one (reversible) binding rule and one transport
rule--and the three associated parameters.
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2', 'sc_site'])
Monomer('Unit', ['p1', 'p2', 'sc_site'])
>>> Monomer('Cargo', ['c_site', 'loc'], {'loc':['mito', 'cyto']})
Monomer('Cargo', ['c_site', 'loc'], {'loc': ['mito', 'cyto']})
>>> pore_transport(Unit, 'p1', 'p2', 'sc_site', 3, 3,
... Cargo(loc='mito'), 'c_site', Cargo(loc='cyto'),
... [[1e-4, 1e-1, 1]]) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('pore_transport_complex_Unit_3_Cargomito',
MatchOnce(Unit(p1=3, p2=1, sc_site=None) %
Unit(p1=1, p2=2, sc_site=None) %
Unit(p1=2, p2=3, sc_site=None)) +
Cargo(c_site=None, loc='mito') |
MatchOnce(Unit(p1=3, p2=1, sc_site=4) %
Unit(p1=1, p2=2, sc_site=None) %
Unit(p1=2, p2=3, sc_site=None) %
Cargo(c_site=4, loc='mito')),
pore_transport_complex_Unit_3_Cargomito_kf,
pore_transport_complex_Unit_3_Cargomito_kr),
Parameter('pore_transport_complex_Unit_3_Cargomito_kf', 0.0001),
Parameter('pore_transport_complex_Unit_3_Cargomito_kr', 0.1),
Rule('pore_transport_dissociate_Unit_3_Cargocyto',
MatchOnce(Unit(p1=3, p2=1, sc_site=4) %
Unit(p1=1, p2=2, sc_site=None) %
Unit(p1=2, p2=3, sc_site=None) %
Cargo(c_site=4, loc='mito')) >>
MatchOnce(Unit(p1=3, p2=1, sc_site=None) %
Unit(p1=1, p2=2, sc_site=None) %
Unit(p1=2, p2=3, sc_site=None)) +
Cargo(c_site=None, loc='cyto'),
pore_transport_dissociate_Unit_3_Cargocyto_kc),
Parameter('pore_transport_dissociate_Unit_3_Cargocyto_kc', 1.0),
])
"""
_verify_sites(subunit, sc_site)
_verify_sites(csource, c_site)
if len(ktable) != max_size - min_size + 1:
raise ValueError("len(ktable) must be equal to max_size - min_size + 1")
def pore_transport_rule_name(rule_expression, size):
# Get ReactionPatterns
react_p = rule_expression.reactant_pattern
prod_p = rule_expression.product_pattern
# Build the label components
# Pore is always first complex of LHS due to how we build the rules
subunit = react_p.complex_patterns[0].monomer_patterns[0]
if len(react_p.complex_patterns) == 2:
# This is the complexation reaction
cargo = react_p.complex_patterns[1].monomer_patterns[0]
else:
# This is the dissociation reaction
cargo = prod_p.complex_patterns[1].monomer_patterns[0]
return '%s_%d_%s' % (_monomer_pattern_label(subunit), size,
_monomer_pattern_label(cargo))
components = ComponentSet()
# Set up some aliases that are invariant with pore size
subunit_free = subunit({sc_site: None})
csource_free = csource({c_site: None})
# If cdest is actually a variant of csource, we need to explicitly say that
# it is no longer bound to the pore
if cdest().monomer is csource().monomer:
cdest = cdest({c_site: None})
for size, klist in zip(range(min_size, max_size + 1), ktable):
# More aliases which do depend on pore size
pore_free = pore_species(subunit_free, sp_site1, sp_site2, size)
# This one is a bit tricky. The pore:csource complex must only introduce
# one additional bond even though there are multiple subunits in the
# pore. We create partial patterns for bound pore and csource, using a
# bond number that is high enough not to conflict with the bonds within
# the pore ring itself.
# Start by copying pore_free, which has all cargo binding sites empty
pore_bound = pore_free.copy()
# Get the next bond number not yet used in the pore structure itself
cargo_bond_num = size + 1
# Assign that bond to the first subunit in the pore
pore_bound.monomer_patterns[0].site_conditions[sc_site] = cargo_bond_num
# Create a cargo source pattern with that same bond
csource_bound = csource({c_site: cargo_bond_num})
# Finally we can define the complex trivially; the bond numbers are
# already present in the patterns
pc_complex = pore_bound % csource_bound
# Create the rules (just like catalyze)
name_func = functools.partial(pore_transport_rule_name, size=size)
components |= _macro_rule('pore_transport_complex',
pore_free + csource_free | pc_complex,
klist[0:2], ['kf', 'kr'],
name_func=name_func)
components |= _macro_rule('pore_transport_dissociate',
pc_complex >> pore_free + cdest,
[klist[2]], ['kc'],
name_func=name_func)
return components
def pore_bind(subunit, sp_site1, sp_site2, sc_site, size, cargo, c_site,
klist):
"""
Generate rules to bind a monomer to a circular homomeric pore.
The pore structure is defined by the `pore_species` macro -- `subunit`
monomers bind to each other from `sp_site1` to `sp_site2` to form a closed
ring. The binding reaction takes the form pore + cargo | pore:cargo.
Parameters
----------
subunit : Monomer or MonomerPattern
Subunit of which the pore is composed.
sp_site1, sp_site2 : string
Names of the sites where one copy of `subunit` binds to the next.
sc_site : string
Name of the site on `subunit` where it binds to the cargo `cargo`.
size : integer
Number of subunits in the pore at which binding will occur.
cargo : Monomer or MonomerPattern
Cargo that binds to the pore complex.
c_site : string
Name of the site on `cargo` where it binds to `subunit`.
klist : list of Parameters or numbers
List containing forward and reverse rate constants for the binding
reaction (in that order). Rate constants should either be both Parameter
objects or both numbers. If Parameters are passed, they will be used
directly in the generated Rules. If numbers are passed, Parameters
will be created with automatically generated names based on the
subunit, the pore size and the cargo, and these parameters will be
included at the end of the returned component list.
Examples
--------
Specify that a cargo molecule can bind reversibly to a 3-membered
pore::
Model()
Monomer('Unit', ['p1', 'p2', 'sc_site'])
Monomer('Cargo', ['c_site'])
pore_bind(Unit, 'p1', 'p2', 'sc_site', 3,
Cargo(), 'c_site', [1e-4, 1e-1, 1])
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2', 'sc_site'])
Monomer('Unit', ['p1', 'p2', 'sc_site'])
>>> Monomer('Cargo', ['c_site'])
Monomer('Cargo', ['c_site'])
>>> pore_bind(Unit, 'p1', 'p2', 'sc_site', 3,
... Cargo(), 'c_site', [1e-4, 1e-1, 1]) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('pore_bind_Unit_3_Cargo',
MatchOnce(Unit(p1=3, p2=1, sc_site=None) %
Unit(p1=1, p2=2, sc_site=None) %
Unit(p1=2, p2=3, sc_site=None)) +
Cargo(c_site=None) |
MatchOnce(Unit(p1=3, p2=1, sc_site=4) %
Unit(p1=1, p2=2, sc_site=None) %
Unit(p1=2, p2=3, sc_site=None) %
Cargo(c_site=4)),
pore_bind_Unit_3_Cargo_kf, pore_bind_Unit_3_Cargo_kr),
Parameter('pore_bind_Unit_3_Cargo_kf', 0.0001),
Parameter('pore_bind_Unit_3_Cargo_kr', 0.1),
])
"""
_verify_sites(subunit, sc_site)
_verify_sites(cargo, c_site)
def pore_bind_rule_name(rule_expression, size):
# Get ReactionPatterns
react_p = rule_expression.reactant_pattern
prod_p = rule_expression.product_pattern
# Build the label components
# Pore is always first complex of LHS due to how we build the rules
subunit = react_p.complex_patterns[0].monomer_patterns[0].monomer
if len(react_p.complex_patterns) == 2:
# This is the complexation reaction
cargo = react_p.complex_patterns[1].monomer_patterns[0]
else:
# This is the dissociation reaction
cargo = prod_p.complex_patterns[1].monomer_patterns[0]
return '%s_%d_%s' % (subunit.name, size,
_monomer_pattern_label(cargo))
components = ComponentSet()
# Set up some aliases that are invariant with pore size
subunit_free = subunit({sc_site: None})
cargo_free = cargo({c_site: None})
#for size, klist in zip(range(min_size, max_size + 1), ktable):
# More aliases which do depend on pore size
pore_free = pore_species(subunit_free, sp_site1, sp_site2, size)
# This one is a bit tricky. The pore:cargo complex must only introduce
# one additional bond even though there are multiple subunits in the
# pore. We create partial patterns for bound pore and cargo, using a
# bond number that is high enough not to conflict with the bonds within
# the pore ring itself.
# Start by copying pore_free, which has all cargo binding sites empty
pore_bound = pore_free.copy()
# Get the next bond number not yet used in the pore structure itself
cargo_bond_num = size + 1
# Assign that bond to the first subunit in the pore
pore_bound.monomer_patterns[0].site_conditions[sc_site] = cargo_bond_num
# Create a cargo source pattern with that same bond
cargo_bound = cargo({c_site: cargo_bond_num})
# Finally we can define the complex trivially; the bond numbers are
# already present in the patterns
pc_complex = pore_bound % cargo_bound
# Create the rules
name_func = functools.partial(pore_bind_rule_name, size=size)
components |= _macro_rule('pore_bind',
pore_free + cargo_free | pc_complex,
klist[0:2], ['kf', 'kr'],
name_func=name_func)
return components
# Chain assembly
# =============
def chain_species(subunit, site1, site2, size):
"""
Return a ComplexPattern representing a linear, chained polymer.
Parameters
----------
subunit : Monomer or MonomerPattern
The subunit of which the chain is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
size : integer
The number of subunits in the chain.
Returns
-------
A ComplexPattern corresponding to the chain.
Notes
-----
Similar to pore_species, but never closes the chain.
Examples
--------
Get the ComplexPattern object representing a chain of length 4::
Model()
Monomer('Unit', ['p1', 'p2'])
chain_tetramer = chain_species(Unit, 'p1', 'p2', 4)
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2'])
Monomer('Unit', ['p1', 'p2'])
>>> chain_species(Unit, 'p1', 'p2', 4)
MatchOnce(Unit(p1=None, p2=1) % Unit(p1=1, p2=2) % Unit(p1=2, p2=3) % Unit(p1=3, p2=None))
"""
return polymer_species(subunit, site1, site2, size, closed=False)
def assemble_chain_sequential(subunit, site1, site2, max_size, ktable):
"""
Generate rules to assemble a homomeric chain sequentially.
The chain species are created by sequential addition of `subunit` monomers.
The chain structure is defined by the `chain_species` macro.
Parameters
----------
subunit : Monomer or MonomerPattern
The subunit of which the chain is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
max_size : integer
The maximum number of subunits in the chain.
ktable : list of lists of Parameters or numbers
Table of forward and reverse rate constants for the assembly steps. The
outer list must be of length `max_size` - 1, and the inner lists must
all be of length 2. In the outer list, the first element corresponds to
the first assembly step in which two monomeric subunits bind to form a
2-subunit complex, and the last element corresponds to the final step in
which the `max_size`th subunit is added. Each inner list contains the
forward and reverse rate constants (in that order) for the corresponding
assembly reaction, and each of these pairs must comprise solely
Parameter objects or solely numbers (never one of each). If Parameters
are passed, they will be used directly in the generated Rules. If
numbers are passed, Parameters will be created with automatically
generated names based on `subunit`, `site1`, `site2` and the chain sizes
and these parameters will be included at the end of the returned
component list.
Examples
--------
Assemble a three-membered chain by sequential addition of monomers,
with the same forward/reverse rates for monomer-monomer and monomer-dimer
interactions::
Model()
Monomer('Unit', ['p1', 'p2'])
assemble_chain_sequential(Unit, 'p1', 'p2', 3, [[1e-4, 1e-1]] * 2)
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2'])
Monomer('Unit', ['p1', 'p2'])
>>> assemble_chain_sequential(Unit, 'p1', 'p2', 3, [[1e-4, 1e-1]] * 2) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('assemble_chain_sequential_Unit_2', Unit(p1=None, p2=None) + Unit(p1=None, p2=None) | Unit(p1=None, p2=1) % Unit(p1=1, p2=None), assemble_chain_sequential_Unit_2_kf, assemble_chain_sequential_Unit_2_kr),
Parameter('assemble_chain_sequential_Unit_2_kf', 0.0001),
Parameter('assemble_chain_sequential_Unit_2_kr', 0.1),
Rule('assemble_chain_sequential_Unit_3', Unit(p1=None, p2=None) + Unit(p1=None, p2=1) % Unit(p1=1, p2=None) | MatchOnce(Unit(p1=None, p2=1) % Unit(p1=1, p2=2) % Unit(p1=2, p2=None)), assemble_chain_sequential_Unit_3_kf, assemble_chain_sequential_Unit_3_kr),
Parameter('assemble_chain_sequential_Unit_3_kf', 0.0001),
Parameter('assemble_chain_sequential_Unit_3_kr', 0.1),
])
"""
return assemble_polymer_sequential(subunit, site1, site2, max_size, ktable,
closed=False)
def chain_species_base(base, basesite, subunit, site1, site2, size, comp=1):
"""
Return a MonomerPattern representing a chained species, chained to a base complex.
Parameters
----------
base : Monomer or MonomerPattern
The base complex to which the growing chain will be attached.
basesite : string
Name of the site on complex where first subunit binds.
subunit : Monomer or MonomerPattern
The subunit of which the chain is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next.
size : integer
The number of subunits in the chain.
comp : optional; a ComplexPattern to which the base molecule is attached.
Returns
-------
A ComplexPattern corresponding to the chain.
Notes
-----
Similar to pore_species, but never closes the chain.
Examples
--------
Get the ComplexPattern object representing a chain of size 4 bound to a base, which is itself bound to a complex:
Model()
Monomer('Base', ['b1', 'b2'])
Monomer('Unit', ['p1', 'p2'])
Monomer('Complex1', ['s1'])
Monomer('Complex2', ['s1', 's2'])
chain_tetramer = chain_species_base(Base(b1=1, b2=ANY), 'b1', Unit, 'p1', 'p2', 4, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY))
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Unit', ['p1', 'p2'])
Monomer('Unit', ['p1', 'p2'])
>>> Monomer('Base', ['b1', 'b2'])
Monomer('Base', ['b1', 'b2'])
>>> Monomer('Complex1', ['s1'])
Monomer('Complex1', ['s1'])
>>> Monomer('Complex2', ['s1', 's2'])
Monomer('Complex2', ['s1', 's2'])
>>> chain_species_base(Base(b2=ANY), 'b1', Unit, 'p1', 'p2', 4, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY))
MatchOnce(Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY) % Base(b1=1, b2=ANY) % Unit(p1=1, p2=2) % Unit(p1=2, p2=3) % Unit(p1=3, p2=4) % Unit(p1=4, p2=None))
"""
_verify_sites(base, basesite)
_verify_sites(subunit, site1, site2)
if size <= 0:
raise ValueError("size must be an integer greater than 0")
if comp == 1:
compbase = base({basesite: 1})
else:
compbase = comp % base({basesite: 1})
if size == 1:
chainlink = compbase % subunit({site1: 1, site2: None})
elif size == 2:
chainlink = compbase % subunit({site1: 1, site2: 2}) % \
subunit({site1: 2, site2: None})
else:
# build up a ComplexPattern, starting with a single subunit
chainbase = compbase
chainlink = chainbase % subunit({site1: 1, site2: 2})
for i in range(2, size):
chainlink %= subunit({site1: i, site2: i+1})
chainlink %= subunit({site1: size, site2: None})
chainlink.match_once = True
return chainlink
def assemble_chain_sequential_base(base, basesite, subunit, site1, site2, max_size, ktable, comp=1):
"""
Generate rules to assemble a homomeric chain sequentially onto a base complex (only the subunit creates repeating chain, not the base).
The chain species are created by sequential addition of `subunit` monomers.
The chain structure is defined by the `pore_species_base` macro.
Parameters
----------
base : Monomer or MonomerPattern
The base complex to which the chain is attached.
basesite : string
The name of the site on the complex to which chain attaches.
subunit : Monomer or MonomerPattern
The subunit of which the chain is composed.
site1, site2 : string
The names of the sites where one copy of `subunit` binds to the next; the first will also be the site where the first subunit binds the base.
max_size : integer
The maximum number of subunits in the chain.
ktable : list of lists of Parameters or numbers
Table of forward and reverse rate constants for the assembly steps. The
outer list must be of length `max_size` + 1, and the inner lists must
all be of length 2. In the outer list, the first element corresponds to
the first assembly step in which the complex binds the first subunit. The next corresponds to a bound subunit binding to form a
2-subunit complex, and the last element corresponds to the final step in
which the `max_size`th subunit is added. Each inner list contains the
forward and reverse rate constants (in that order) for the corresponding
assembly reaction, and each of these pairs must comprise solely
Parameter objects or solely numbers (never one of each). If Parameters
are passed, they will be used directly in the generated Rules. If
numbers are passed, Parameters will be created with automatically
generated names based on `subunit`, `site1`, `site2` and the chain sizes
and these parameters will be included at the end of the returned
component list.
comp : optional; a ComplexPattern to which the base molecule is attached.
Examples
--------
Assemble a three-membered chain by sequential addition of monomers to a base, which is in turn attached to a complex,
with the same forward/reverse rates for monomer-monomer and monomer-dimer
interactions::
Model()
Monomer('Base', ['b1', 'b2'])
Monomer('Unit', ['p1', 'p2'])
Monomer('Complex1', ['s1'])
Monomer('Complex2', ['s1', s2'])
assemble_chain_sequential(Base(b2=ANY), 'b1', Unit, 'p1', 'p2', 3, [[1e-4, 1e-1]] * 2, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY))
Execution::
>>> Model() # doctest:+ELLIPSIS
<Model '_interactive_' (monomers: 0, rules: 0, parameters: 0, expressions: 0, compartments: 0) at ...>
>>> Monomer('Base', ['b1', 'b2'])
Monomer('Base', ['b1', 'b2'])
>>> Monomer('Unit', ['p1', 'p2'])
Monomer('Unit', ['p1', 'p2'])
>>> Monomer('Complex1', ['s1'])
Monomer('Complex1', ['s1'])
>>> Monomer('Complex2', ['s1', 's2'])
Monomer('Complex2', ['s1', 's2'])
>>> assemble_chain_sequential_base(Base(b2=ANY), 'b1', Unit, 'p1', 'p2', 3, [[1e-4, 1e-1]] * 2, Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY)) # doctest:+NORMALIZE_WHITESPACE
ComponentSet([
Rule('assemble_chain_sequential_base_Unit_2', Unit(p1=None, p2=None) + Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY) % Base(b1=1, b2=ANY) % Unit(p1=1, p2=None) | Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY) % Base(b1=1, b2=ANY) % Unit(p1=1, p2=2) % Unit(p1=2, p2=None), assemble_chain_sequential_base_Unit_2_kf, assemble_chain_sequential_base_Unit_2_kr),
Parameter('assemble_chain_sequential_base_Unit_2_kf', 0.0001),
Parameter('assemble_chain_sequential_base_Unit_2_kr', 0.1),
Rule('assemble_chain_sequential_base_Unit_3', Unit(p1=None, p2=None) + Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY) % Base(b1=1, b2=ANY) % Unit(p1=1, p2=2) % Unit(p1=2, p2=None) | MatchOnce(Complex1(s1=ANY) % Complex2(s1=ANY, s2=ANY) % Base(b1=1, b2=ANY) % Unit(p1=1, p2=2) % Unit(p1=2, p2=3) % Unit(p1=3, p2=None)), assemble_chain_sequential_base_Unit_3_kf, assemble_chain_sequential_base_Unit_3_kr),
Parameter('assemble_chain_sequential_base_Unit_3_kf', 0.0001),
Parameter('assemble_chain_sequential_base_Unit_3_kr', 0.1),
])
"""
if len(ktable) != max_size-1:
raise ValueError("len(ktable) must be equal to max_size-1")
def chain_rule_name(rule_expression, size):
react_p = rule_expression.reactant_pattern
monomer = react_p.complex_patterns[0].monomer_patterns[0].monomer
return '%s_%d' % (monomer.name, size)
components = ComponentSet()
s = subunit({site1:None, site2:None})
for size, klist in zip(range(2, max_size + 1), ktable):
chain_prev = chain_species_base(base, basesite, subunit, site1, site2, size - 1, comp)
chain_next = chain_species_base(base, basesite, subunit, site1, site2, size, comp)
name_func = functools.partial(chain_rule_name, size=size)
components |= _macro_rule('assemble_chain_sequential_base',
s + chain_prev | chain_next,
klist, ['kf', 'kr'],
name_func=name_func)
return components
if __name__ == "__main__":
import doctest
doctest.testmod()
| 43.03458 | 409 | 0.621722 |
401dd15bdec369395d029e314b0ea60f3f2deb09 | 3,503 | py | Python | validator/kube/resource.py | KubeOperator/kubeGrade | ca9cdeb438707135bb35a1a3d3e028367dd122df | [
"Apache-2.0"
] | 4 | 2020-03-20T00:28:06.000Z | 2020-04-17T18:33:21.000Z | validator/kube/resource.py | KubeOperator/kubeGrade | ca9cdeb438707135bb35a1a3d3e028367dd122df | [
"Apache-2.0"
] | null | null | null | validator/kube/resource.py | KubeOperator/kubeGrade | ca9cdeb438707135bb35a1a3d3e028367dd122df | [
"Apache-2.0"
] | 1 | 2021-06-09T02:09:17.000Z | 2021-06-09T02:09:17.000Z | from datetime import datetime
from kubernetes import client
from kubernetes.client import ApiClient
__all__ = ["NamespaceKubernetesResource", "KubernetesResourceProvider"]
class NamespaceKubernetesResource:
def __init__(self, namespace):
super().__init__()
self.namespaces = namespace
self.deployments = []
self.stateful_sets = []
self.daemon_sets = []
self.jobs = []
self.cron_jobs = []
self.pods = []
class KubernetesResourceProvider():
def __init__(self, host, token):
configuration = client.Configuration()
configuration.api_key_prefix['authorization'] = 'Bearer'
configuration.api_key['authorization'] = token
configuration.host = "https://{}:6443".format(host)
configuration.verify_ssl = False
configuration.debug = False
self.client = ApiClient(configuration)
def fetch_namespaced_kubernetes_resource(self, namespace):
result = NamespaceKubernetesResource(namespace)
result.version = self.fetch_kubernetes_version()
result.fetch_time = datetime.now()
result.deployments = self.fetch_deployments(namespace)
result.stateful_sets = self.fetch_stateful_sets(namespace)
result.daemon_sets = self.fetch_daemon_sets(namespace)
result.jobs = self.fetch_jobs(namespace)
result.cron_jobs = self.fetch_corn_jobs(namespace)
result.pods = self.fetch_pods(namespace)
return result
def fetch_namespaces(self):
api = client.CoreV1Api(self.client)
return api.list_namespace().items
def fetch_nodes(self):
api = client.CoreV1Api(self.client)
return api.list_node().items
def fetch_kubernetes_version(self):
api = client.VersionApi(self.client)
version = "{}.{}".format(api.get_code().major, api.get_code().minor)
return version
def fetch_pods(self, ns=None):
api = client.CoreV1Api(self.client)
if not ns:
result = api.list_pod_for_all_namespaces().items
else:
result = api.list_namespaced_pod(ns).items
return result
def fetch_corn_jobs(self, ns=None):
api = client.BatchV1beta1Api(self.client)
if not ns:
result = api.list_cron_job_for_all_namespaces().items
else:
result = api.list_namespaced_cron_job(ns).items
return result
def fetch_jobs(self, ns=None):
api = client.BatchV1Api(self.client)
if not ns:
result = api.list_job_for_all_namespaces().items
else:
result = api.list_namespaced_job(ns).items
return result
def fetch_daemon_sets(self, ns=None):
api = client.AppsV1Api(self.client)
if not ns:
result = api.list_daemon_set_for_all_namespaces().items
else:
result = api.list_namespaced_daemon_set(ns).items
return result
def fetch_stateful_sets(self, ns=None):
api = client.AppsV1Api(self.client)
if not ns:
result = api.list_stateful_set_for_all_namespaces().items
else:
result = api.list_namespaced_stateful_set(ns).items
return result
def fetch_deployments(self, ns=None):
api = client.AppsV1Api(self.client)
if not ns:
result = api.list_deployment_for_all_namespaces().items
else:
result = api.list_namespaced_deployment(ns).items
return result
| 34.343137 | 76 | 0.653725 |
aaa02dfc59f8314d6787a1bcebb595d9a139faf7 | 2,885 | py | Python | TensorArtist/tartist/data/flow/remote.py | cosmic119/DiscoGAN | 5a86f36f45a3dafdc028fc2100eb477e54dc83cd | [
"MIT"
] | null | null | null | TensorArtist/tartist/data/flow/remote.py | cosmic119/DiscoGAN | 5a86f36f45a3dafdc028fc2100eb477e54dc83cd | [
"MIT"
] | null | null | null | TensorArtist/tartist/data/flow/remote.py | cosmic119/DiscoGAN | 5a86f36f45a3dafdc028fc2100eb477e54dc83cd | [
"MIT"
] | null | null | null | # -*- coding:utf8 -*-
# File : remote.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 3/10/17
#
# This file is part of TensorArtist.
from .base import SimpleDataFlowBase
from ..rflow import InputPipe, OutputPipe, control
from ..rflow import make_push_pair
from multiprocessing import Process
import time
__all__ = ['RemoteDataFlow', 'MPPrefetchDataFlow', 'MPCustomDataFlow', 'RemoteMonitorDataFlow']
class RemoteDataFlow(SimpleDataFlowBase):
def __init__(self, pipe_name, bufsize=100):
self._pipe = InputPipe(pipe_name, bufsize=bufsize)
def _gen(self):
with control([self._pipe]):
while True:
yield self._pipe.get()
class MPPrefetchDataFlow(SimpleDataFlowBase):
def _mainloop_worker(self, wid):
with self._pushs[wid].activate():
for data in self._dataflow:
self._pushs[wid].send(data)
def __init__(self, dataflow, nr_workers=1, mode='tcp', send_qsize=10):
self._dataflow = dataflow
self._nr_workers = nr_workers
self._mode = mode
self._send_qsize = send_qsize
self._pull = None
self._pushs = None
self._procs = None
def _initialize(self):
super()._initialize()
self._pull, self._pushs = make_push_pair(str(self), self._nr_workers, mode=self._mode, send_qsize=self._send_qsize)
self._procs = [Process(target=self._mainloop_worker, args=(i, ), daemon=True) for i in range(self._nr_workers)]
for p in self._procs:
p.start()
def _gen(self):
with self._pull.activate():
while True:
yield self._pull.recv()
class MPCustomDataFlow(SimpleDataFlowBase):
def __init__(self, target=None, nr_workers=2, mode='tcp', send_qsize=10):
self._nr_workers = nr_workers
self._mode = mode
self._send_qsize = send_qsize
self._pull = None
self._pushs = None
def run(self, wid, pipe):
return self.target(wid, pipe)
def _initialize(self):
super()._initialize()
self._pull, self._pushs = make_push_pair(str(self), self._nr_workers, mode=self._mode, send_qsize=self._send_qsize)
self._procs = [Process(target=self.run, args=(i, self._pushs[i]), daemon=True) for i in range(self._nr_workers)]
for p in self._procs:
p.start()
def _gen(self):
with self._pull.activate():
while True:
yield self._pull.recv()
class RemoteMonitorDataFlow(SimpleDataFlowBase):
def __init__(self, df, pipe_name, bufsize=1):
self._df = df
self._pipe = OutputPipe(pipe_name, bufsize=bufsize)
def _gen(self):
with control([self._pipe]):
for data in self._df:
self._pipe.put_nowait({'data': data, 'time': time.time()})
yield data
| 31.703297 | 123 | 0.634315 |
c2ff032f44788856e59b75b42cc8ddefc6cafe81 | 8,226 | py | Python | Chapter 4/B05034_ch4_SRC_V3/src_ch4_python2/wargame/docs/source/conf.py | kunal2494/Learning-Python-Application-Development | 241bec5d5340e95cd68aef813c9ce8b0fca98986 | [
"MIT"
] | 64 | 2016-12-15T20:38:45.000Z | 2022-03-31T06:38:39.000Z | Chapter 4/B05034_ch4_SRC_V3/src_ch4_python2/wargame/docs/source/conf.py | kunal2494/Learning-Python-Application-Development | 241bec5d5340e95cd68aef813c9ce8b0fca98986 | [
"MIT"
] | null | null | null | Chapter 4/B05034_ch4_SRC_V3/src_ch4_python2/wargame/docs/source/conf.py | kunal2494/Learning-Python-Application-Development | 241bec5d5340e95cd68aef813c9ce8b0fca98986 | [
"MIT"
] | 48 | 2016-09-14T20:01:31.000Z | 2021-11-25T09:03:37.000Z | # -*- coding: utf-8 -*-
#
# wargame_py2 documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 26 14:39:35 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.todo',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wargame_py2'
copyright = u'2015, Ninad Sathaye'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.0'
# The full version, including alpha/beta/rc tags.
release = '2.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'wargame_py2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'wargame_py2.tex', u'wargame\\_py2 Documentation',
u'Ninad Sathaye', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wargame_py2', u'wargame_py2 Documentation',
[u'Ninad Sathaye'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wargame_py2', u'wargame_py2 Documentation',
u'Ninad Sathaye', 'wargame_py2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| 31.517241 | 79 | 0.718332 |
f26a6390b45fe548a347544f344b604b8627391d | 8,218 | py | Python | myvenv/Lib/site-packages/suds/servicedefinition.py | Fa67/saleor-shop | 76110349162c54c8bfcae61983bb59ba8fb0f778 | [
"BSD-3-Clause"
] | null | null | null | myvenv/Lib/site-packages/suds/servicedefinition.py | Fa67/saleor-shop | 76110349162c54c8bfcae61983bb59ba8fb0f778 | [
"BSD-3-Clause"
] | 5 | 2020-03-24T16:37:25.000Z | 2021-06-10T21:24:54.000Z | upibo-venv/Lib/site-packages/suds/servicedefinition.py | smbpgroup/upibo | 625dcda9f9692c62aeb9fe8f7123a5d407c610ae | [
"BSD-3-Clause"
] | null | null | null | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{service definition} provides a textual representation of a service.
"""
from suds import *
import suds.metrics as metrics
from suds.sax import Namespace
from logging import getLogger
log = getLogger(__name__)
class ServiceDefinition(UnicodeMixin):
"""
A service definition provides an object used to generate a textual description
of a service.
@ivar wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
@ivar service: The service object.
@type service: L{suds.wsdl.Service}
@ivar ports: A list of port-tuple: (port, [(method-name, pdef)])
@type ports: [port-tuple,..]
@ivar prefixes: A list of remapped prefixes.
@type prefixes: [(prefix,uri),..]
@ivar types: A list of type definitions
@type types: [I{Type},..]
"""
def __init__(self, wsdl, service):
"""
@param wsdl: A WSDL object
@type wsdl: L{Definitions}
@param service: A service B{name}.
@type service: str
"""
self.wsdl = wsdl
self.service = service
self.ports = []
self.params = []
self.types = []
self.prefixes = []
self.addports()
self.paramtypes()
self.publictypes()
self.getprefixes()
self.pushprefixes()
def pushprefixes(self):
"""
Add our prefixes to the WSDL so that when users invoke methods
and reference the prefixes, they will resolve properly.
"""
for ns in self.prefixes:
self.wsdl.root.addPrefix(ns[0], ns[1])
def addports(self):
"""
Look through the list of service ports and construct a list of tuples
where each tuple is used to describe a port and its list of methods as:
(port, [method]). Each method is a tuple: (name, [pdef,..]) where each
pdef is a tuple: (param-name, type).
"""
timer = metrics.Timer()
timer.start()
for port in self.service.ports:
p = self.findport(port)
for op in list(port.binding.operations.values()):
m = p[0].method(op.name)
binding = m.binding.input
method = (m.name, binding.param_defs(m))
p[1].append(method)
metrics.log.debug("method '%s' created: %s", m.name, timer)
p[1].sort()
timer.stop()
def findport(self, port):
"""
Find and return a port tuple for the specified port.
Created and added when not found.
@param port: A port.
@type port: I{service.Port}
@return: A port tuple.
@rtype: (port, [method])
"""
for p in self.ports:
if p[0] == p: return p
p = (port, [])
self.ports.append(p)
return p
def getprefixes(self):
"""Add prefixes for each namespace referenced by parameter types."""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns)
def paramtypes(self):
"""Get all parameter types."""
for m in [p[1] for p in self.ports]:
for p in [p[1] for p in m]:
for pd in p:
if pd[1] in self.params: continue
item = (pd[1], pd[1].resolve())
self.params.append(item)
def publictypes(self):
"""Get all public types."""
for t in list(self.wsdl.schema.types.values()):
if t in self.params: continue
if t in self.types: continue
item = (t, t)
self.types.append(item)
self.types.sort(key=lambda x: x[0].name)
def nextprefix(self):
"""
Get the next available prefix. This means a prefix starting with 'ns' with
a number appended as (ns0, ns1, ..) that is not already defined in the
WSDL document.
"""
used = [ns[0] for ns in self.prefixes]
used += [ns[0] for ns in list(self.wsdl.root.nsprefixes.items())]
for n in range(0,1024):
p = 'ns%d'%n
if p not in used:
return p
raise Exception('prefixes exhausted')
def getprefix(self, u):
"""
Get the prefix for the specified namespace (URI)
@param u: A namespace URI.
@type u: str
@return: The namspace.
@rtype: (prefix, uri).
"""
for ns in Namespace.all:
if u == ns[1]: return ns[0]
for ns in self.prefixes:
if u == ns[1]: return ns[0]
raise Exception('ns (%s) not mapped' % u)
def xlate(self, type):
"""
Get a (namespace) translated I{qualified} name for specified type.
@param type: A schema type.
@type type: I{suds.xsd.sxbasic.SchemaObject}
@return: A translated I{qualified} name.
@rtype: str
"""
resolved = type.resolve()
name = resolved.name
if type.multi_occurrence():
name += '[]'
ns = resolved.namespace()
if ns[1] == self.wsdl.tns[1]:
return name
prefix = self.getprefix(ns[1])
return ':'.join((prefix, name))
def description(self):
"""
Get a textual description of the service for which this object represents.
@return: A textual description.
@rtype: str
"""
s = []
indent = (lambda n : '\n%*s'%(n*3,' '))
s.append('Service ( %s ) tns="%s"' % (self.service.name, self.wsdl.tns[1]))
s.append(indent(1))
s.append('Prefixes (%d)' % len(self.prefixes))
for p in self.prefixes:
s.append(indent(2))
s.append('%s = "%s"' % p)
s.append(indent(1))
s.append('Ports (%d):' % len(self.ports))
for p in self.ports:
s.append(indent(2))
s.append('(%s)' % p[0].name)
s.append(indent(3))
s.append('Methods (%d):' % len(p[1]))
for m in p[1]:
sig = []
s.append(indent(4))
sig.append(m[0])
sig.append('(')
sig.append(', '.join("%s %s" % (self.xlate(p[1]), p[0]) for p
in m[1]))
sig.append(')')
try:
s.append(''.join(sig))
except:
pass
s.append(indent(3))
s.append('Types (%d):' % len(self.types))
for t in self.types:
s.append(indent(4))
s.append(self.xlate(t[0]))
s.append('\n\n')
return ''.join(s)
def __unicode__(self):
try:
return self.description()
except Exception as e:
log.exception(e)
return tostr(e)
| 34.099585 | 83 | 0.536992 |
006f8d217768d3f738f862a872ecd05273ae455b | 683 | py | Python | app/core/migrations/0002_tag.py | vkathirvel/python-django-recipe-app-api | c0533c74aec9bb172cfe3b1a2d04cb81179a1790 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | vkathirvel/python-django-recipe-app-api | c0533c74aec9bb172cfe3b1a2d04cb81179a1790 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | vkathirvel/python-django-recipe-app-api | c0533c74aec9bb172cfe3b1a2d04cb81179a1790 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2020-10-03 08:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333 | 118 | 0.616398 |
9ae86d981bb5feee1b16b6cf63c6486663b5efa5 | 9,033 | py | Python | meiduo_mall/meiduo_mall/settings/dev.py | RuanJylf/MeiDuo_Mall | 06a570fb327e8d06934fc942266456c798a9aec2 | [
"MIT"
] | null | null | null | meiduo_mall/meiduo_mall/settings/dev.py | RuanJylf/MeiDuo_Mall | 06a570fb327e8d06934fc942266456c798a9aec2 | [
"MIT"
] | 4 | 2021-06-08T23:42:00.000Z | 2022-03-12T00:50:52.000Z | meiduo_mall/meiduo_mall/settings/dev.py | RuanJylf/MeiDuo_Mall | 06a570fb327e8d06934fc942266456c798a9aec2 | [
"MIT"
] | null | null | null | """
Django settings for meiduo_mall project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import datetime
import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# sys.path 保存python解释器的导包路径
sys.path.insert(0, os.path.join(BASE_DIR, 'apps'))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 't2_8r56*8kz-y3zwoy*kie66k@4n2*%t!j+8og_t7qywbhsvzi'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'www.meiduo.site', 'api.meiduo.site', 'localhost']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'ckeditor', # 富文本编辑器
'ckeditor_uploader', # 富文本编辑器上传图片模板
'django_crontab', # 定时任务
'haystack', # 对接搜索引擎
# 注册安装子应用配置
'users.apps.UsersConfig',
'verifications.apps.VerificationsConfig',
'oauth.apps.OauthConfig',
'areas.apps.AreasConfig',
'contents.apps.ContentsConfig',
'goods.apps.GoodsConfig',
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware', # 解决跨域请求
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduo_mall.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduo_mall.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': '127.0.0.1', # 数据库主机
'PORT': 3306, # 数据库端口
'USER': 'meiduo', # 数据库用户名
'PASSWORD': 'meiduo', # 数据库用户密码
'NAME': 'meiduo_mall' # 数据库名字
}
}
# redis 配置
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/0",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
"session": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/1",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
},
"verify_codes": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/2",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
},
},
"history": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://127.0.0.1:6379/3",
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
},
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
SESSION_CACHE_ALIAS = "session"
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'zh-hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
# 日志配置
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(lineno)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(module)s %(lineno)d %(message)s'
},
},
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'file': {
'level': 'INFO',
'class': 'logging.handlers.RotatingFileHandler',
'filename': os.path.join(os.path.dirname(BASE_DIR), "logs/meiduo.log"), # 日志文件的位置
'maxBytes': 300 * 1024 * 1024,
'backupCount': 10,
'formatter': 'verbose'
},
},
'loggers': {
'django': { # 定义了一个名为django的日志器
'handlers': ['console', 'file'],
'propagate': True,
},
}
}
# 指明自定义的用户模型类, 第一次数据库迁移之前配置
AUTH_USER_MODEL = 'users.User'
# 认证方法, 用户名或手机号认证, 实现多账号登录
AUTHENTICATION_BACKENDS = [
'users.utils.UsernameMobileAuthBackend',
]
REST_FRAMEWORK = {
# 异常处理
'EXCEPTION_HANDLER': 'meiduo_mall.utils.exceptions.exception_handler',
# jwt 认证
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
# 分页
'DEFAULT_PAGINATION_CLASS': 'meiduo_mall.utils.pagination.StandardResultsSetPagination',
}
# JWT 签证
JWT_AUTH = {
# jwt 过期时间
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1), # 有效期
'JWT_RESPONSE_PAYLOAD_HANDLER': 'users.utils.jwt_response_payload_handler',
}
# CORS 跨域请求白名单
CORS_ORIGIN_WHITELIST = (
# 8080前端端口访问前端页
'127.0.0.1:8080',
'localhost:8080',
'www.meiduo.site:8080',
# 8000后端端口,不能访问前端页面, 但是可以运行
'127.0.0.1:8000',
'api.meiduo.site:8000',
)
CORS_ALLOW_CREDENTIALS = True # 允许携带cookie
# QQ登录参数
QQ_CLIENT_ID = '101474184'
QQ_CLIENT_SECRET = 'c6ce949e04e12ecc909ae6a8b09b637c'
QQ_REDIRECT_URI = 'http://www.meiduo.site:8080/oauth_callback.html'
QQ_STATE = '/'
# Email: 网易邮箱发送邮件配置
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.163.com'
EMAIL_PORT = 25
# 发送邮件的邮箱
EMAIL_HOST_USER = 'ruanjylf@163.com'
# 在邮箱中设置的客户端授权密码
EMAIL_HOST_PASSWORD = 'R950413J'
# 收件人看到的发件人
EMAIL_FROM = '美多商城<ruanjylf@163.com>'
# DRF 扩展
REST_FRAMEWORK_EXTENSIONS = {
# 缓存时间
'DEFAULT_CACHE_RESPONSE_TIMEOUT': 60 * 60,
# 缓存存储
'DEFAULT_USE_CACHE': 'default',
}
# django文件存储
DEFAULT_FILE_STORAGE = 'meiduo_mall.utils.fastdfs.fdfs_storage.FastDFSStorage'
# FastDFS 分布式文件系统
FDFS_URL = 'http://image.meiduo.site:8888/'
FDFS_CLIENT_CONF = os.path.join(BASE_DIR, 'utils/fastdfs/client.conf')
# 富文本编辑器ckeditor配置
CKEDITOR_CONFIGS = {
'default': {
'toolbar': 'full', # 工具条功能
'height': 300, # 编辑器高度
# 'width': 300, # 编辑器宽
},
}
CKEDITOR_UPLOAD_PATH = '' # 上传图片保存路径,使用了FastDFS,所以此处设为''
# 页面静态化保存文件的目录
GENERATED_STATIC_HTML_FILES_DIR = os.path.join(os.path.dirname(os.path.dirname(BASE_DIR)), 'front_end_pc')
# 定时任务
CRONJOBS = [
# 每5分钟执行一次生成主页静态文件
# ('*/5 * * * *', 'contents.crons.generate_static_index_html', '>> /Users/delron/Desktop/meiduo_mall/logs/crontab.log')
('*/1 * * * *', 'contents.crons.generate_static_index_html', '>> '+ os.path.join(os.path.dirname(BASE_DIR), "logs/crontab.log"))
]
# 解决crontab中文问题
CRONTAB_COMMAND_PREFIX = 'LANG_ALL=zh_cn.UTF-8'
# */5 * * * *
# 1:16 1:21 1:26 :1:31
# 5 * * * *
# 2:05 3:05 4:05
# > 覆盖
# >> 追加
# Haystack
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': 'http://192.168.48.128:9200/', # 此处为elasticsearch运行的服务器ip地址,端口号固定为9200
'INDEX_NAME': 'meiduo', # 指定elasticsearch建立的索引库的名称
},
}
| 27.207831 | 132 | 0.647957 |
bef9c1a8488fedd534e226e87159d4a5497f46ce | 6,323 | py | Python | tests/test_forms.py | DummerDelfin/django-contact-form | ef7f7d2492b6bdf9bed6aae2fa5aadc3dd2f3d30 | [
"BSD-3-Clause"
] | null | null | null | tests/test_forms.py | DummerDelfin/django-contact-form | ef7f7d2492b6bdf9bed6aae2fa5aadc3dd2f3d30 | [
"BSD-3-Clause"
] | null | null | null | tests/test_forms.py | DummerDelfin/django-contact-form | ef7f7d2492b6bdf9bed6aae2fa5aadc3dd2f3d30 | [
"BSD-3-Clause"
] | null | null | null | import os
import unittest
from django.conf import settings
from django.core import mail
from django.test import RequestFactory, TestCase
from django.utils.six import text_type
import mock
from contact_form.forms import AkismetContactForm, ContactForm
class ContactFormTests(TestCase):
"""
Tests the base ContactForm.
"""
valid_data = {'name': 'Test',
'email': 'test@example.com',
'body': 'Test message'}
def request(self):
return RequestFactory().request()
def test_request_required(self):
"""
Can't instantiate without an HttpRequest.
"""
self.assertRaises(TypeError, ContactForm)
def test_valid_data_required(self):
"""
Can't try to build the message dict unless data is valid.
"""
data = {'name': 'Test',
'body': 'Test message'}
form = ContactForm(request=self.request(), data=data)
self.assertRaises(ValueError, form.get_message_dict)
self.assertRaises(ValueError, form.get_context)
def test_send(self):
"""
Valid form can and does in fact send email.
"""
form = ContactForm(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertTrue(self.valid_data['body'] in message.body)
self.assertEqual(settings.DEFAULT_FROM_EMAIL,
message.from_email)
self.assertEqual(form.recipient_list,
message.recipients())
def test_no_sites(self):
"""
Sites integration works with or without installed
contrib.sites.
"""
with self.modify_settings(
INSTALLED_APPS={
'remove': ['django.contrib.sites'],
}):
form = ContactForm(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
def test_recipient_list(self):
"""
Passing recipient_list when instantiating ContactForm properly
overrides the list of recipients.
"""
recipient_list = ['recipient_list@example.com']
form = ContactForm(request=self.request(),
data=self.valid_data,
recipient_list=recipient_list)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertEqual(recipient_list,
message.recipients())
def test_callable_template_name(self):
"""
When a template_name() method is defined, it is used and
preferred over a 'template_name' attribute.
"""
class CallableTemplateName(ContactForm):
def template_name(self):
return 'contact_form/test_callable_template_name.html'
form = CallableTemplateName(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(1, len(mail.outbox))
message = mail.outbox[0]
self.assertTrue('Callable template_name used.' in
message.body)
def test_callable_message_parts(self):
"""
Message parts implemented as methods are called and preferred
over attributes.
"""
overridden_data = {
'from_email': 'override@example.com',
'message': 'Overridden message.',
'recipient_list': ['override_recpt@example.com'],
'subject': 'Overridden subject',
}
class CallableMessageParts(ContactForm):
def from_email(self):
return overridden_data['from_email']
def message(self):
return overridden_data['message']
def recipient_list(self):
return overridden_data['recipient_list']
def subject(self):
return overridden_data['subject']
form = CallableMessageParts(request=self.request(),
data=self.valid_data)
self.assertTrue(form.is_valid())
self.assertEqual(overridden_data,
form.get_message_dict())
@unittest.skipUnless(
getattr(
settings,
'AKISMET_API_KEY',
os.getenv('PYTHON_AKISMET_API_KEY')
) is not None,
"AkismetContactForm requires Akismet configuration"
)
class AkismetContactFormTests(TestCase):
"""
Tests the Akismet contact form.
"""
def request(self):
return RequestFactory().request()
def test_akismet_form_spam(self):
"""
The Akismet contact form correctly rejects spam.
"""
data = {'name': 'viagra-test-123',
'email': 'email@example.com',
'body': 'This is spam.'}
with mock.patch('akismet.Akismet', autospec=True) as akismet_mock:
instance = akismet_mock.return_value
instance.verify_key.return_value = True
instance.comment_check.return_value = True
form = AkismetContactForm(
request=self.request(),
data=data
)
self.assertFalse(form.is_valid())
self.assertTrue(
text_type(form.SPAM_MESSAGE) in
form.errors['body']
)
def test_akismet_form_ham(self):
"""
The Akismet contact form correctly accepts non-spam.
"""
data = {'name': 'Test',
'email': 'email@example.com',
'body': 'Test message.'}
with mock.patch('akismet.Akismet', autospec=True) as akismet_mock:
instance = akismet_mock.return_value
instance.verify_key.return_value = True
instance.comment_check.return_value = False
form = AkismetContactForm(
request=self.request(),
data=data
)
self.assertTrue(form.is_valid())
| 30.399038 | 74 | 0.573936 |
f2b6114bdd12ad7814093cfa8cc4c3e64893b871 | 1,761 | py | Python | tests/unit/dataactvalidator/test_fabsreq11_detached_award_financial_assistance.py | RonSherfey/data-act-broker-backend | d287abda2cac06dd479ecf0127e789cb8e59387d | [
"CC0-1.0"
] | null | null | null | tests/unit/dataactvalidator/test_fabsreq11_detached_award_financial_assistance.py | RonSherfey/data-act-broker-backend | d287abda2cac06dd479ecf0127e789cb8e59387d | [
"CC0-1.0"
] | 3 | 2021-08-22T11:47:45.000Z | 2022-03-29T22:06:49.000Z | tests/unit/dataactvalidator/test_fabsreq11_detached_award_financial_assistance.py | RonSherfey/data-act-broker-backend | d287abda2cac06dd479ecf0127e789cb8e59387d | [
"CC0-1.0"
] | null | null | null | from tests.unit.dataactcore.factories.staging import DetachedAwardFinancialAssistanceFactory
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'fabsreq11_detached_award_financial_assistance'
def test_column_headers(database):
expected_subset = {'row_number', 'action_type', 'correction_delete_indicatr',
'uniqueid_AssistanceTransactionUniqueKey'}
actual = set(query_columns(_FILE, database))
assert expected_subset == actual
def test_success(database):
""" Test ActionType is required for all submissions except delete records. """
det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='C', action_type='c')
det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='', action_type='A')
# Test ignoring for D records
det_award_3 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', action_type=None)
det_award_4 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='D', action_type='')
det_award_5 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='d', action_type='Name')
errors = number_of_errors(_FILE, database, models=[det_award, det_award_2, det_award_3, det_award_4, det_award_5])
assert errors == 0
def test_failure(database):
""" Test fail ActionType is required for all submissions except delete records. """
det_award = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr='c', action_type=None)
det_award_2 = DetachedAwardFinancialAssistanceFactory(correction_delete_indicatr=None, action_type='')
errors = number_of_errors(_FILE, database, models=[det_award, det_award_2])
assert errors == 2
| 48.916667 | 118 | 0.794435 |
f9e81658d955af9cc508ac8e5e9906312b7d5475 | 22,396 | py | Python | recipes/AMI/Diarization/experiment.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 3,913 | 2021-03-14T13:54:52.000Z | 2022-03-30T05:09:55.000Z | recipes/AMI/Diarization/experiment.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 667 | 2021-03-14T20:11:17.000Z | 2022-03-31T04:07:17.000Z | recipes/AMI/Diarization/experiment.py | JasonSWFu/speechbrain | cb78ba2b33fceba273b055dc471535344c3053f0 | [
"Apache-2.0"
] | 785 | 2021-03-14T13:20:57.000Z | 2022-03-31T03:26:03.000Z | #!/usr/bin/python3
"""This recipe implements diarization system using deep embedding extraction followed by spectral clustering.
To run this recipe:
> python experiment.py hparams/<your_hyperparams_file.yaml>
e.g., python experiment.py hparams/ecapa_tdnn.yaml
Condition: Oracle VAD (speech regions taken from the groundtruth).
Note: There are multiple ways to write this recipe. We iterate over individual recordings.
This approach is less GPU memory demanding and also makes code easy to understand.
Citation: This recipe is based on the following paper,
N. Dawalatabad, M. Ravanelli, F. Grondin, J. Thienpondt, B. Desplanques, H. Na,
"ECAPA-TDNN Embeddings for Speaker Diarization," arXiv:2104.01466, 2021.
Authors
* Nauman Dawalatabad 2020
"""
import os
import sys
import torch
import logging
import pickle
import json
import glob
import shutil
import numpy as np
import speechbrain as sb
from tqdm.contrib import tqdm
from hyperpyyaml import load_hyperpyyaml
from speechbrain.utils.distributed import run_on_main
from speechbrain.processing.PLDA_LDA import StatObject_SB
from speechbrain.processing import diarization as diar
from speechbrain.utils.DER import DER
from speechbrain.dataio.dataio import read_audio
from speechbrain.dataio.dataio import read_audio_multichannel
np.random.seed(1234)
# Logger setup
logger = logging.getLogger(__name__)
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(current_dir))
try:
import sklearn # noqa F401
except ImportError:
err_msg = (
"Cannot import optional dependency `sklearn` used in this module.\n"
)
err_msg += "Please follow the below instructions\n"
err_msg += "=============================\n"
err_msg += "Using pip:\n"
err_msg += "pip install sklearn\n"
err_msg += "================================ \n"
err_msg += "Using conda:\n"
err_msg += "conda install sklearn"
raise ImportError(err_msg)
def compute_embeddings(wavs, lens):
"""Definition of the steps for computation of embeddings from the waveforms.
"""
with torch.no_grad():
wavs = wavs.to(params["device"])
feats = params["compute_features"](wavs)
feats = params["mean_var_norm"](feats, lens)
emb = params["embedding_model"](feats, lens)
emb = params["mean_var_norm_emb"](
emb, torch.ones(emb.shape[0], device=params["device"])
)
return emb
def embedding_computation_loop(split, set_loader, stat_file):
"""Extracts embeddings for a given dataset loader.
"""
# Note: We use speechbrain.processing.PLDA_LDA.StatObject_SB type to store embeddings.
# Extract embeddings (skip if already done).
if not os.path.isfile(stat_file):
logger.debug("Extracting deep embeddings and diarizing")
embeddings = np.empty(shape=[0, params["emb_dim"]], dtype=np.float64)
modelset = []
segset = []
# Different data may have different statistics.
params["mean_var_norm_emb"].count = 0
for batch in set_loader:
ids = batch.id
wavs, lens = batch.sig
mod = [x for x in ids]
seg = [x for x in ids]
modelset = modelset + mod
segset = segset + seg
# Embedding computation.
emb = (
compute_embeddings(wavs, lens)
.contiguous()
.squeeze(1)
.cpu()
.numpy()
)
embeddings = np.concatenate((embeddings, emb), axis=0)
modelset = np.array(modelset, dtype="|O")
segset = np.array(segset, dtype="|O")
# Intialize variables for start, stop and stat0.
s = np.array([None] * embeddings.shape[0])
b = np.array([[1.0]] * embeddings.shape[0])
stat_obj = StatObject_SB(
modelset=modelset,
segset=segset,
start=s,
stop=s,
stat0=b,
stat1=embeddings,
)
logger.debug("Saving Embeddings...")
stat_obj.save_stat_object(stat_file)
else:
logger.debug("Skipping embedding extraction (as already present).")
logger.debug("Loading previously saved embeddings.")
with open(stat_file, "rb") as in_file:
stat_obj = pickle.load(in_file)
return stat_obj
def prepare_subset_json(full_meta_data, rec_id, out_meta_file):
"""Prepares metadata for a given recording ID.
Arguments
---------
full_meta_data : json
Full meta (json) containing all the recordings
rec_id : str
The recording ID for which meta (json) has to be prepared
out_meta_file : str
Path of the output meta (json) file.
"""
subset = {}
for key in full_meta_data:
k = str(key)
if k.startswith(rec_id):
subset[key] = full_meta_data[key]
with open(out_meta_file, mode="w") as json_f:
json.dump(subset, json_f, indent=2)
def diarize_dataset(full_meta, split_type, n_lambdas, pval, n_neighbors=10):
"""This function diarizes all the recordings in a given dataset. It performs
computation of embedding and clusters them using spectral clustering (or other backends).
The output speaker boundary file is stored in the RTTM format.
"""
# Prepare `spkr_info` only once when Oracle num of speakers is selected.
# spkr_info is essential to obtain number of speakers from groundtruth.
if params["oracle_n_spkrs"] is True:
full_ref_rttm_file = (
params["ref_rttm_dir"] + "/fullref_ami_" + split_type + ".rttm"
)
rttm = diar.read_rttm(full_ref_rttm_file)
spkr_info = list( # noqa F841
filter(lambda x: x.startswith("SPKR-INFO"), rttm)
)
# Get all the recording IDs in this dataset.
all_keys = full_meta.keys()
A = [word.rstrip().split("_")[0] for word in all_keys]
all_rec_ids = list(set(A[1:]))
all_rec_ids.sort()
split = "AMI_" + split_type
i = 1
# Setting eval modality.
params["embedding_model"].eval()
msg = "Diarizing " + split_type + " set"
logger.info(msg)
if len(all_rec_ids) <= 0:
msg = "No recording IDs found! Please check if meta_data json file is properly generated."
logger.error(msg)
sys.exit()
# Diarizing different recordings in a dataset.
for rec_id in tqdm(all_rec_ids):
# This tag will be displayed in the log.
tag = (
"["
+ str(split_type)
+ ": "
+ str(i)
+ "/"
+ str(len(all_rec_ids))
+ "]"
)
i = i + 1
# Log message.
msg = "Diarizing %s : %s " % (tag, rec_id)
logger.debug(msg)
# Embedding directory.
if not os.path.exists(os.path.join(params["embedding_dir"], split)):
os.makedirs(os.path.join(params["embedding_dir"], split))
# File to store embeddings.
emb_file_name = rec_id + "." + params["mic_type"] + ".emb_stat.pkl"
diary_stat_emb_file = os.path.join(
params["embedding_dir"], split, emb_file_name
)
# Prepare a metadata (json) for one recording. This is basically a subset of full_meta.
# Lets keep this meta-info in embedding directory itself.
json_file_name = rec_id + "." + params["mic_type"] + ".json"
meta_per_rec_file = os.path.join(
params["embedding_dir"], split, json_file_name
)
# Write subset (meta for one recording) json metadata.
prepare_subset_json(full_meta, rec_id, meta_per_rec_file)
# Prepare data loader.
diary_set_loader = dataio_prep(params, meta_per_rec_file)
# Putting modules on the device.
params["compute_features"].to(params["device"])
params["mean_var_norm"].to(params["device"])
params["embedding_model"].to(params["device"])
params["mean_var_norm_emb"].to(params["device"])
# Compute Embeddings.
diary_obj = embedding_computation_loop(
"diary", diary_set_loader, diary_stat_emb_file
)
# Adding tag for directory path.
type_of_num_spkr = "oracle" if params["oracle_n_spkrs"] else "est"
tag = (
type_of_num_spkr
+ "_"
+ str(params["affinity"])
+ "_"
+ params["backend"]
)
out_rttm_dir = os.path.join(
params["sys_rttm_dir"], params["mic_type"], split, tag
)
if not os.path.exists(out_rttm_dir):
os.makedirs(out_rttm_dir)
out_rttm_file = out_rttm_dir + "/" + rec_id + ".rttm"
# Processing starts from here.
if params["oracle_n_spkrs"] is True:
# Oracle num of speakers.
num_spkrs = diar.get_oracle_num_spkrs(rec_id, spkr_info)
else:
if params["affinity"] == "nn":
# Num of speakers tunned on dev set (only for nn affinity).
num_spkrs = n_lambdas
else:
# Num of speakers will be estimated using max eigen gap for cos based affinity.
# So adding None here. Will use this None later-on.
num_spkrs = None
if params["backend"] == "kmeans":
diar.do_kmeans_clustering(
diary_obj, out_rttm_file, rec_id, num_spkrs, pval,
)
if params["backend"] == "SC":
# Go for Spectral Clustering (SC).
diar.do_spec_clustering(
diary_obj,
out_rttm_file,
rec_id,
num_spkrs,
pval,
params["affinity"],
n_neighbors,
)
# Can used for AHC later. Likewise one can add different backends here.
if params["backend"] == "AHC":
# call AHC
threshold = pval # pval for AHC is nothing but threshold.
diar.do_AHC(diary_obj, out_rttm_file, rec_id, num_spkrs, threshold)
# Once all RTTM outputs are generated, concatenate individual RTTM files to obtain single RTTM file.
# This is not needed but just staying with the standards.
concate_rttm_file = out_rttm_dir + "/sys_output.rttm"
logger.debug("Concatenating individual RTTM files...")
with open(concate_rttm_file, "w") as cat_file:
for f in glob.glob(out_rttm_dir + "/*.rttm"):
if f == concate_rttm_file:
continue
with open(f, "r") as indi_rttm_file:
shutil.copyfileobj(indi_rttm_file, cat_file)
msg = "The system generated RTTM file for %s set : %s" % (
split_type,
concate_rttm_file,
)
logger.debug(msg)
return concate_rttm_file
def dev_pval_tuner(full_meta, split_type):
"""Tuning p_value for affinity matrix.
The p_value used so that only p% of the values in each row is retained.
"""
DER_list = []
prange = np.arange(0.002, 0.015, 0.001)
n_lambdas = None # using it as flag later.
for p_v in prange:
# Process whole dataset for value of p_v.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, p_v
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append(DER_)
if params["oracle_n_spkrs"] is True and params["backend"] == "kmeans":
# no need of p_val search. Note p_val is needed for SC for both oracle and est num of speakers.
# p_val is needed in oracle_n_spkr=False when using kmeans backend.
break
# Take p_val that gave minmum DER on Dev dataset.
tuned_p_val = prange[DER_list.index(min(DER_list))]
return tuned_p_val
def dev_ahc_threshold_tuner(full_meta, split_type):
"""Tuning threshold for affinity matrix. This function is called when AHC is used as backend.
"""
DER_list = []
prange = np.arange(0.0, 1.0, 0.1)
n_lambdas = None # using it as flag later.
# Note: p_val is threshold in case of AHC.
for p_v in prange:
# Process whole dataset for value of p_v.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, p_v
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append(DER_)
if params["oracle_n_spkrs"] is True:
break # no need of threshold search.
# Take p_val that gave minmum DER on Dev dataset.
tuned_p_val = prange[DER_list.index(min(DER_list))]
return tuned_p_val
def dev_nn_tuner(full_meta, split_type):
"""Tuning n_neighbors on dev set. Assuming oracle num of speakers.
This is used when nn based affinity is selected.
"""
DER_list = []
pval = None
# Now assumming oracle num of speakers.
n_lambdas = 4
for nn in range(5, 15):
# Process whole dataset for value of n_lambdas.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, pval, nn
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append([nn, DER_])
if params["oracle_n_spkrs"] is True and params["backend"] == "kmeans":
break
DER_list.sort(key=lambda x: x[1])
tunned_nn = DER_list[0]
return tunned_nn[0]
def dev_tuner(full_meta, split_type):
"""Tuning n_components on dev set. Used for nn based affinity matrix.
Note: This is a very basic tunning for nn based affinity.
This is work in progress till we find a better way.
"""
DER_list = []
pval = None
for n_lambdas in range(1, params["max_num_spkrs"] + 1):
# Process whole dataset for value of n_lambdas.
concate_rttm_file = diarize_dataset(
full_meta, split_type, n_lambdas, pval
)
ref_rttm = os.path.join(params["ref_rttm_dir"], "fullref_ami_dev.rttm")
sys_rttm = concate_rttm_file
[MS, FA, SER, DER_] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
)
DER_list.append(DER_)
# Take n_lambdas with minmum DER.
tuned_n_lambdas = DER_list.index(min(DER_list)) + 1
return tuned_n_lambdas
def dataio_prep(hparams, json_file):
"""Creates the datasets and their data processing pipelines.
This is used for multi-mic processing.
"""
# 1. Datasets
data_folder = hparams["data_folder"]
dataset = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=json_file, replacements={"data_root": data_folder},
)
# 2. Define audio pipeline.
if params["mic_type"] == "Array1":
# Multi-mic (Microphone Array)
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
mics_signals = read_audio_multichannel(wav).unsqueeze(0)
sig = params["multimic_beamformer"](mics_signals)
sig = sig.squeeze()
return sig
else:
# Single microphone
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item([dataset], audio_pipeline)
# 3. Set output:
sb.dataio.dataset.set_output_keys([dataset], ["id", "sig"])
# 4. Create dataloader:
dataloader = sb.dataio.dataloader.make_dataloader(
dataset, **params["dataloader_opts"]
)
return dataloader
# Begin experiment!
if __name__ == "__main__": # noqa: C901
# Load hyperparameters file with command-line overrides.
params_file, run_opts, overrides = sb.core.parse_arguments(sys.argv[1:])
with open(params_file) as fin:
params = load_hyperpyyaml(fin, overrides)
# Dataset prep (peparing metadata files)
from ami_prepare import prepare_ami # noqa
run_on_main(
prepare_ami,
kwargs={
"data_folder": params["data_folder"],
"save_folder": params["save_folder"],
"ref_rttm_dir": params["ref_rttm_dir"],
"meta_data_dir": params["meta_data_dir"],
"manual_annot_folder": params["manual_annot_folder"],
"split_type": params["split_type"],
"skip_TNO": params["skip_TNO"],
"mic_type": params["mic_type"],
"vad_type": params["vad_type"],
"max_subseg_dur": params["max_subseg_dur"],
"overlap": params["overlap"],
},
)
# Create experiment directory.
sb.core.create_experiment_directory(
experiment_directory=params["output_folder"],
hyperparams_to_save=params_file,
overrides=overrides,
)
# Few more experiment directories inside results/ (to maintain cleaner structure).
exp_dirs = [
params["embedding_dir"],
params["sys_rttm_dir"],
params["der_dir"],
]
for dir_ in exp_dirs:
if not os.path.exists(dir_):
os.makedirs(dir_)
# We download the pretrained Model from HuggingFace (or elsewhere depending on
# the path given in the YAML file).
run_on_main(params["pretrainer"].collect_files)
params["pretrainer"].load_collected(device=(params["device"]))
params["embedding_model"].eval()
params["embedding_model"].to(params["device"])
# AMI Dev Set: Tune hyperparams on dev set.
# Read the meta-data file for dev set generated during data_prep
dev_meta_file = os.path.join(
params["meta_data_dir"],
"ami_dev." + params["mic_type"] + ".subsegs.json",
)
with open(dev_meta_file, "r") as f:
meta_dev = json.load(f)
full_meta = meta_dev
# Processing starts from here
# Following few lines selects option for different backend and affinity matrices. Finds best values for hyperameters using dev set.
best_nn = None
if params["affinity"] == "nn":
logger.info("Tuning for nn (Multiple iterations over AMI Dev set)")
best_nn = dev_nn_tuner(full_meta, "dev")
n_lambdas = None
best_pval = None
if params["affinity"] == "cos" and (
params["backend"] == "SC" or params["backend"] == "kmeans"
):
# oracle num_spkrs or not, doesn't matter for kmeans and SC backends
# cos: Tune for the best pval for SC /kmeans (for unknown num of spkrs)
logger.info(
"Tuning for p-value for SC (Multiple iterations over AMI Dev set)"
)
best_pval = dev_pval_tuner(full_meta, "dev")
elif params["backend"] == "AHC":
logger.info("Tuning for threshold-value for AHC")
best_threshold = dev_ahc_threshold_tuner(full_meta, "dev")
best_pval = best_threshold
else:
# NN for unknown num of speakers (can be used in future)
if params["oracle_n_spkrs"] is False:
# nn: Tune num of number of components (to be updated later)
logger.info(
"Tuning for number of eigen components for NN (Multiple iterations over AMI Dev set)"
)
# dev_tuner used for tuning num of components in NN. Can be used in future.
n_lambdas = dev_tuner(full_meta, "dev")
# Load 'dev' and 'eval' metadata files.
full_meta_dev = full_meta # current full_meta is for 'dev'
eval_meta_file = os.path.join(
params["meta_data_dir"],
"ami_eval." + params["mic_type"] + ".subsegs.json",
)
with open(eval_meta_file, "r") as f:
full_meta_eval = json.load(f)
# Tag to be appended to final output DER files. Writing DER for individual files.
type_of_num_spkr = "oracle" if params["oracle_n_spkrs"] else "est"
tag = (
type_of_num_spkr
+ "_"
+ str(params["affinity"])
+ "."
+ params["mic_type"]
)
# Perform final diarization on 'dev' and 'eval' with best hyperparams.
final_DERs = {}
for split_type in ["dev", "eval"]:
if split_type == "dev":
full_meta = full_meta_dev
else:
full_meta = full_meta_eval
# Performing diarization.
msg = "Diarizing using best hyperparams: " + split_type + " set"
logger.info(msg)
out_boundaries = diarize_dataset(
full_meta,
split_type,
n_lambdas=n_lambdas,
pval=best_pval,
n_neighbors=best_nn,
)
# Computing DER.
msg = "Computing DERs for " + split_type + " set"
logger.info(msg)
ref_rttm = os.path.join(
params["ref_rttm_dir"], "fullref_ami_" + split_type + ".rttm"
)
sys_rttm = out_boundaries
[MS, FA, SER, DER_vals] = DER(
ref_rttm,
sys_rttm,
params["ignore_overlap"],
params["forgiveness_collar"],
individual_file_scores=True,
)
# Writing DER values to a file. Append tag.
der_file_name = split_type + "_DER_" + tag
out_der_file = os.path.join(params["der_dir"], der_file_name)
msg = "Writing DER file to: " + out_der_file
logger.info(msg)
diar.write_ders_file(ref_rttm, DER_vals, out_der_file)
msg = (
"AMI "
+ split_type
+ " set DER = %s %%\n" % (str(round(DER_vals[-1], 2)))
)
logger.info(msg)
final_DERs[split_type] = round(DER_vals[-1], 2)
# Final print DERs
msg = (
"Final Diarization Error Rate (%%) on AMI corpus: Dev = %s %% | Eval = %s %%\n"
% (str(final_DERs["dev"]), str(final_DERs["eval"]))
)
logger.info(msg)
| 32.79063 | 135 | 0.612208 |
8d4457971221bec1117aadb51a009c263c20090a | 68,146 | py | Python | eve/app/algo.py | densechen/eve-mli | 73fdeff8cf080bf01040383a72e66cfc9219a803 | [
"MIT"
] | 9 | 2020-12-27T03:02:55.000Z | 2021-01-14T10:13:22.000Z | eve/app/algo.py | densechen/eve-mli | 73fdeff8cf080bf01040383a72e66cfc9219a803 | [
"MIT"
] | 1 | 2021-01-14T12:56:26.000Z | 2021-01-15T07:00:41.000Z | eve/app/algo.py | densechen/eve-mli | 73fdeff8cf080bf01040383a72e66cfc9219a803 | [
"MIT"
] | null | null | null | # _ _ _ _ _ _ _ _
# /\ \ /\ \ _ / /\ /\ \ /\_\/\_\ _ _\ \ /\ \
# / \ \ \ \ \ /_/ / / / \ \ / / / / //\_\/\__ \ \ \ \
# / /\ \ \ \ \ \ \___\/ / /\ \ \ /\ \/ \ \/ / / /_ \_\ /\ \_\
# / / /\ \_\/ / / \ \ \ / / /\ \_\ ____ / \____\__/ / / /\/_/ / /\/_/
# / /_/_ \/_/\ \ \ \_\ \/ /_/_ \/_/\____/\/ /\/________/ / / / / /
# / /____/\ \ \ \ / / / /____/\ \/____\/ / /\/_// / / / / / / /
# / /\____\/ \ \ \/ / / /\____\/ / / / / / / / / ____ / / /
# / / /______ \ \ \/ / / /______ / / / / / / /_/_/ ___/\___/ / /__
# / / /_______\ \ \ / / /_______\ \/_/ / / /_______/\__\/\__\/_/___\
# \/__________/ \_\/\/__________/ \/_/\_______\/ \/_________/
"""Abstract base classes for RL algorithms."""
import io
import os
import pathlib
import time
import warnings
from abc import ABC, abstractmethod
from collections import deque
from copy import deepcopy
from typing import (Any, Callable, Dict, Iterable, List, Optional, Tuple, Type,
Union)
import eve.app.logger as logger
import eve.app.space as space
import numpy as np
import torch as th
from eve.app.buffers import ReplayBuffer, RolloutBuffer, RolloutReturn
from eve.app.callbacks import (BaseCallback, CallbackList, ConvertCallback,
EvalCallback, MaybeCallback)
from eve.app.env import (DummyVecEnv, Monitor, ObsDictWrapper, VecEnv,
VecNormalize, check_for_correct_spaces, is_wrapped,
unwrap_vec_normalize)
from eve.app.policies import (ActorCriticPolicy, BasePolicy,
get_policy_from_name)
from eve.app.utils import (EveEnv, Schedule, get_device, get_latest_run_id,
get_schedule_fn, load_from_pkl, load_from_zip_file,
recursive_getattr, recursive_setattr, safe_mean,
save_to_pkl, save_to_zip_file, set_random_seed,
update_learning_rate)
# pylint:disable=no-member
r"""
.__ .__
_______ __ ____ _____ | | |__|
_/ __ \ \/ // __ \ ______ / \| | | |
\ ___/\ /\ ___/ /_____/ | Y Y \ |_| |
\___ >\_/ \___ > |__|_| /____/__|
\/ \/ \/
Action Noise
"""
class ActionNoise(ABC):
"""
The action noise base class
"""
def __init__(self):
super(ActionNoise, self).__init__()
def reset(self) -> None:
"""
call end of episode reset for the noise
"""
pass
@abstractmethod
def __call__(self) -> np.ndarray:
raise NotImplementedError()
class NormalActionNoise(ActionNoise):
"""
A Gaussian action noise
:param mean: the mean value of the noise
:param sigma: the scale of the noise (std here)
"""
def __init__(self, mean: np.ndarray, sigma: np.ndarray):
self._mu = mean
self._sigma = sigma
super(NormalActionNoise, self).__init__()
def __call__(self) -> np.ndarray:
return np.random.normal(self._mu, self._sigma)
def __repr__(self) -> str:
return f"NormalActionNoise(mu={self._mu}, sigma={self._sigma})"
class OrnsteinUhlenbeckActionNoise(ActionNoise):
"""
An Ornstein Uhlenbeck action noise, this is designed to approximate Brownian motion with friction.
Based on http://math.stackexchange.com/questions/1287634/implementing-ornstein-uhlenbeck-in-matlab
:param mean: the mean of the noise
:param sigma: the scale of the noise
:param theta: the rate of mean reversion
:param dt: the timestep for the noise
:param initial_noise: the initial value for the noise output, (if None: 0)
"""
def __init__(
self,
mean: np.ndarray,
sigma: np.ndarray,
theta: float = 0.15,
dt: float = 1e-2,
initial_noise: Optional[np.ndarray] = None,
):
self._theta = theta
self._mu = mean
self._sigma = sigma
self._dt = dt
self.initial_noise = initial_noise
self.noise_prev = np.zeros_like(self._mu)
self.reset()
super(OrnsteinUhlenbeckActionNoise, self).__init__()
def __call__(self) -> np.ndarray:
noise = (self.noise_prev + self._theta *
(self._mu - self.noise_prev) * self._dt + self._sigma *
np.sqrt(self._dt) * np.random.normal(size=self._mu.shape))
self.noise_prev = noise
return noise
def reset(self) -> None:
"""
reset the Ornstein Uhlenbeck noise, to the initial position
"""
self.noise_prev = self.initial_noise if self.initial_noise is not None else np.zeros_like(
self._mu)
def __repr__(self) -> str:
return f"OrnsteinUhlenbeckActionNoise(mu={self._mu}, sigma={self._sigma})"
class VectorizedActionNoise(ActionNoise):
"""
A Vectorized action noise for parallel environments.
:param base_noise: ActionNoise The noise generator to use
:param n_envs: The number of parallel environments
"""
def __init__(self, base_noise: ActionNoise, n_envs: int):
try:
self.n_envs = int(n_envs)
assert self.n_envs > 0
except (TypeError, AssertionError):
raise ValueError(
f"Expected n_envs={n_envs} to be positive integer greater than 0"
)
self.base_noise = base_noise
self.noises = [deepcopy(self.base_noise) for _ in range(n_envs)]
def reset(self, indices: Optional[Iterable[int]] = None) -> None:
"""
Reset all the noise processes, or those listed in indices
:param indices: Optional[Iterable[int]] The indices to reset. Default: None.
If the parameter is None, then all processes are reset to their initial position.
"""
if indices is None:
indices = range(len(self.noises))
for index in indices:
self.noises[index].reset()
def __repr__(self) -> str:
return f"VecNoise(BaseNoise={repr(self.base_noise)}), n_envs={len(self.noises)})"
def __call__(self) -> np.ndarray:
"""
Generate and stack the action noise from each noise object
"""
noise = np.stack([noise() for noise in self.noises])
return noise
@property
def base_noise(self) -> ActionNoise:
return self._base_noise
@base_noise.setter
def base_noise(self, base_noise: ActionNoise) -> None:
if base_noise is None:
raise ValueError(
"Expected base_noise to be an instance of ActionNoise, not None",
ActionNoise)
if not isinstance(base_noise, ActionNoise):
raise TypeError(
"Expected base_noise to be an instance of type ActionNoise",
ActionNoise)
self._base_noise = base_noise
@property
def noises(self) -> List[ActionNoise]:
return self._noises
@noises.setter
def noises(self, noises: List[ActionNoise]) -> None:
noises = list(noises) # raises TypeError if not iterable
assert len(
noises
) == self.n_envs, f"Expected a list of {self.n_envs} ActionNoises, found {len(noises)}."
different_types = [
i for i, noise in enumerate(noises)
if not isinstance(noise, type(self.base_noise))
]
if len(different_types):
raise ValueError(
f"Noise instances at indices {different_types} don't match the type of base_noise",
type(self.base_noise))
self._noises = noises
for noise in noises:
noise.reset()
r"""
.__ .__
_______ __ ____ _____ | | |__|
_/ __ \ \/ // __ \ ______ / \| | | |
\ ___/\ /\ ___/ /_____/ | Y Y \ |_| |
\___ >\_/ \___ > |__|_| /____/__|
\/ \/ \/
Base Algorithm
"""
def configure_logger(verbose: int = 0,
tensorboard_log: Optional[str] = None,
tb_log_name: str = "",
reset_num_timesteps: bool = True) -> None:
"""
Configure the logger's outputs.
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param tb_log_name: tensorboard log
"""
if tensorboard_log is not None:
latest_run_id = get_latest_run_id(tensorboard_log, tb_log_name)
if not reset_num_timesteps:
# Continue training in the same directory
latest_run_id -= 1
save_path = os.path.join(tensorboard_log,
f"{tb_log_name}_{latest_run_id + 1}")
if verbose >= 1:
logger.configure(save_path, ["stdout", "tensorboard"])
else:
logger.configure(save_path, ["tensorboard"])
elif verbose == 0:
logger.configure(format_strings=[""])
class BaseAlgorithm(ABC):
"""
The base of RL algorithms
:param policy: Policy object
:param env: The environment to learn from
:param policy_base: The base policy used by this method
:param learning_rate: learning rate for the optimizer,
it can be a function of the current progress remaining (from 1 to 0)
:param policy_kwargs: Additional arguments to be passed to the policy on creation
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param verbose: The verbosity level: 0 none, 1 training information, 2 debug
:param device: Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: Seed for the pseudo random generators
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param supported_action_spaces: The action spaces supported by the algorithm.
"""
def __init__(
self,
policy: Type[BasePolicy],
env: Union[EveEnv, str, None],
policy_base: Type[BasePolicy],
learning_rate: Union[float, Schedule],
policy_kwargs: Dict[str, Any] = None,
tensorboard_log: Optional[str] = None,
verbose: int = 0,
device: Union[th.device, str] = "auto",
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1,
supported_action_spaces: Optional[Tuple[space.EveSpace, ...]] = None,
sample_episode: bool = False,
):
if isinstance(policy, str) and policy_base is not None:
self.policy_class = get_policy_from_name(policy_base, policy)
else:
self.policy_class = policy
self.device = get_device(device)
if verbose > 0:
print(f"Using {self.device} device")
self.env = None # type: Optional[EveEnv]
# get VecNormalize object if needed
self._vec_normalize_env = unwrap_vec_normalize(env)
self.verbose = verbose
self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs
self.observation_space = None # type: Optional[space.EveSpace]
self.action_space = None # type: Optional[space.EveSpace]
self.n_envs = None
self.num_timesteps = 0
# Used for updating schedules
self._total_timesteps = 0
self.eval_env = None
self.seed = seed
self.action_noise = None # type: Optional[ActionNoise]
self.start_time = None
self.policy = None
self.learning_rate = learning_rate
self.tensorboard_log = tensorboard_log
self.lr_schedule = None # type: Optional[Schedule]
self._last_obs = None # type: Optional[np.ndarray]
self._last_dones = None # type: Optional[np.ndarray]
# When using VecNormalize:
self._last_original_obs = None # type: Optional[np.ndarray]
self._episode_num = 0
# Used for gSDE only
self.use_sde = use_sde
self.sde_sample_freq = sde_sample_freq
# Track the training progress remaining (from 1 to 0)
# this is used to update the learning rate
self._current_progress_remaining = 1
# Buffers for logging
self.ep_info_buffer = None # type: Optional[deque]
self.ep_success_buffer = None # type: Optional[deque]
# For logging
self._n_updates = 0 # type: int
self.sample_episode = sample_episode
# Create and wrap the env if needed
if env is not None:
if isinstance(env, str):
if create_eval_env:
self.eval_env = env
env = self._wrap_env(env, self.verbose, monitor_wrapper)
self.observation_space = env.observation_space
self.action_space = env.action_space
self.n_envs = env.num_envs
self.env = env
if supported_action_spaces is not None:
assert isinstance(
self.action_space, supported_action_spaces
), (f"The algorithm only supports {supported_action_spaces} as action spaces "
f"but {self.action_space} was provided")
if not support_multi_env and self.n_envs > 1:
raise ValueError(
"Error: the model does not support multiple envs; it requires "
"a single vectorized environment.")
if self.use_sde and not isinstance(self.action_space,
space.EveBox):
raise ValueError(
"generalized State-Dependent Exploration (gSDE) can only be used with continuous actions."
)
if self.sample_episode and self.n_envs > 1:
raise ValueError("Sample episode only fit for n_env == 1")
@staticmethod
def _wrap_env(env: EveEnv,
verbose: int = 0,
monitor_wrapper: bool = True) -> VecEnv:
""" "
Wrap environment with the appropriate wrappers if needed.
For instance, to have a vectorized environment
or to re-order the image channels.
:param env:
:param verbose:
:param monitor_wrapper: Whether to wrap the env in a ``Monitor`` when possible.
:return: The wrapped environment.
"""
if not isinstance(env, VecEnv):
if not is_wrapped(env, Monitor) and monitor_wrapper:
if verbose >= 1:
print("Wrapping the env with a `Monitor` wrapper")
env = Monitor(env)
if verbose >= 1:
print("Wrapping the env in a DummyVecEnv.")
env = DummyVecEnv([lambda: env])
if isinstance(env.observation_space, space.EveDict):
env = ObsDictWrapper(env)
return env
@abstractmethod
def _setup_model(self) -> None:
"""Create networks, buffer and optimizers."""
def _get_eval_env(self, eval_env: Optional[EveEnv]) -> Optional[EveEnv]:
"""
Return the environment that will be used for evaluation.
:param eval_env:)
:return:
"""
if eval_env is None:
eval_env = self.eval_env
if eval_env is not None:
eval_env = self._wrap_env(eval_env, self.verbose)
assert eval_env.num_envs == 1
return eval_env
def _setup_lr_schedule(self) -> None:
"""Transform to callable if needed."""
self.lr_schedule = get_schedule_fn(self.learning_rate)
def _update_current_progress_remaining(self, num_timesteps: int,
total_timesteps: int) -> None:
"""
Compute current progress remaining (starts from 1 and ends to 0)
:param num_timesteps: current number of timesteps
:param total_timesteps:
"""
self._current_progress_remaining = 1.0 - float(num_timesteps) / float(
total_timesteps)
def _update_learning_rate(
self, optimizers: Union[List[th.optim.Optimizer],
th.optim.Optimizer]) -> None:
"""
Update the optimizers learning rate using the current learning rate schedule
and the current progress remaining (from 1 to 0).
:param optimizers:
An optimizer or a list of optimizers.
"""
# Log the current learning rate
logger.record("train/learning_rate",
self.lr_schedule(self._current_progress_remaining))
if not isinstance(optimizers, list):
optimizers = [optimizers]
for optimizer in optimizers:
update_learning_rate(
optimizer, self.lr_schedule(self._current_progress_remaining))
def _excluded_save_params(self) -> List[str]:
"""
Returns the names of the parameters that should be excluded from being
saved by pickling. E.g. replay buffers are skipped by default
as they take up a lot of space. PyTorch variables should be excluded
with this so they can be stored with ``th.save``.
:return: List of parameters that should be excluded from being saved with pickle.
"""
return [
"policy",
"device",
"env",
"eval_env",
"replay_buffer",
"rollout_buffer",
"_vec_normalize_env",
]
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
"""
Get the name of the torch variables that will be saved with
PyTorch ``th.save``, ``th.load`` and ``state_dicts`` instead of the default
pickling strategy. This is to handle device placement correctly.
Names can point to specific variables under classes, e.g.
"policy.optimizer" would point to ``optimizer`` object of ``self.policy``
if this object.
:return:
List of Torch variables whose state dicts to save (e.g. th.nn.Modules),
and list of other Torch variables to store with ``th.save``.
"""
state_dicts = ["policy"]
return state_dicts, []
def _init_callback(
self,
callback: MaybeCallback,
eval_env: Optional[VecEnv] = None,
eval_freq: int = 10000,
n_eval_episodes: int = 5,
log_path: Optional[str] = None,
) -> BaseCallback:
"""
:param callback: Callback(s) called at every step with state of the algorithm.
:param eval_freq: How many steps between evaluations; if None, do not evaluate.
:param n_eval_episodes: Number of episodes to rollout during evaluation.
:param log_path: Path to a folder where the evaluations will be saved
:return: A hybrid callback calling `callback` and performing evaluation.
"""
# Convert a list of callbacks into a callback
if isinstance(callback, list):
callback = CallbackList(callback)
# Convert functional callback to object
if not isinstance(callback, BaseCallback):
callback = ConvertCallback(callback)
# Create eval callback in charge of the evaluation
if eval_env is not None:
eval_callback = EvalCallback(
eval_env,
best_model_save_path=log_path,
log_path=log_path,
eval_freq=eval_freq,
n_eval_episodes=n_eval_episodes,
)
callback = CallbackList([callback, eval_callback])
callback.init_callback(self)
return callback
def _setup_learn(
self,
total_timesteps: int,
eval_env: Optional[EveEnv],
callback: MaybeCallback = None,
eval_freq: int = 10000,
n_eval_episodes: int = 5,
log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
tb_log_name: str = "run",
) -> Tuple[int, BaseCallback]:
"""
Initialize different variables needed for training.
:param total_timesteps: The total number of samples (env steps) to train on
:param eval_env: Environment to use for evaluation.
:param callback: Callback(s) called at every step with state of the algorithm.
:param eval_freq: How many steps between evaluations
:param n_eval_episodes: How many episodes to play per evaluation
:param log_path: Path to a folder where the evaluations will be saved
:param reset_num_timesteps: Whether to reset or not the ``num_timesteps`` attribute
:param tb_log_name: the name of the run for tensorboard log
:return:
"""
self.start_time = time.time()
if self.ep_info_buffer is None or reset_num_timesteps:
# Initialize buffers if they don't exist, or reinitialize if resetting counters
self.ep_info_buffer = deque(maxlen=100)
self.ep_success_buffer = deque(maxlen=100)
if self.action_noise is not None:
self.action_noise.reset()
if reset_num_timesteps:
self.num_timesteps = 0
self._episode_num = 0
else:
# Make sure training timesteps are ahead of the internal counter
total_timesteps += self.num_timesteps
self._total_timesteps = total_timesteps
# Avoid resetting the environment when calling ``.learn()`` consecutive times
if reset_num_timesteps or self._last_obs is None:
self._last_obs = self.env.reset()
self._last_dones = np.zeros((self.env.num_envs, ), dtype=np.bool)
# Retrieve unnormalized observation for saving into the buffer
if self._vec_normalize_env is not None:
self._last_original_obs = self._vec_normalize_env.get_original_obs(
)
if eval_env is not None and self.seed is not None:
eval_env.seed(self.seed)
eval_env = self._get_eval_env(eval_env)
# Configure logger's outputs
configure_logger(self.verbose, self.tensorboard_log, tb_log_name,
reset_num_timesteps)
# Create eval callback if needed
callback = self._init_callback(callback, eval_env, eval_freq,
n_eval_episodes, log_path)
return total_timesteps, callback
def _update_info_buffer(self,
infos: List[Dict[str, Any]],
dones: Optional[np.ndarray] = None) -> None:
"""
Retrieve reward and episode length and update the buffer
if using Monitor wrapper.
:param infos:
"""
if dones is None:
dones = np.array([False] * len(infos))
for idx, info in enumerate(infos):
maybe_ep_info = info.get("episode")
maybe_is_success = info.get("is_success")
if maybe_ep_info is not None:
self.ep_info_buffer.extend([maybe_ep_info])
if maybe_is_success is not None and dones[idx]:
self.ep_success_buffer.append(maybe_is_success)
def get_env(self) -> Optional[VecEnv]:
"""
Returns the current environment (can be None if not defined).
:return: The current environment
"""
return self.env
def get_vec_normalize_env(self) -> Optional[VecNormalize]:
"""
Return the ``VecNormalize`` wrapper of the training env
if it exists.
:return: The ``VecNormalize`` env.
"""
return self._vec_normalize_env
def set_env(self, env: EveEnv) -> None:
"""
Checks the validity of the environment, and if it is coherent,
set it as the current environment.
Furthermore wrap any non vectorized env into a vectorized checked parameters:
- observation_space
- action_space
:param env: The environment for learning a policy
"""
# if it is not a VecEnv, make it a VecEnv
# and do other transformations (dict obs, image transpose) if needed
env = self._wrap_env(env, self.verbose)
# Check that the observation spaces match
check_for_correct_spaces(env, self.observation_space,
self.action_space)
self.n_envs = env.num_envs
self.env = env
@abstractmethod
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 100,
tb_log_name: str = "run",
eval_env: Optional[EveEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "BaseAlgorithm":
"""
Return a trained model.
:param total_timesteps: The total number of samples (env steps) to train on
:param callback: callback(s) called at every step with state of the algorithm.
:param log_interval: The number of timesteps before logging.
:param tb_log_name: the name of the run for TensorBoard logging
:param eval_env: Environment that will be used to evaluate the agent
:param eval_freq: Evaluate the agent every ``eval_freq`` timesteps (this may vary a little)
:param n_eval_episodes: Number of episode to evaluate the agent
:param eval_log_path: Path to a folder where the evaluations will be saved
:param reset_num_timesteps: whether or not to reset the current timestep number (used in logging)
:return: the trained model
"""
def predict(
self,
observation: np.ndarray,
mask: Optional[np.ndarray] = None,
deterministic: bool = False,
) -> Tuple[np.ndarray, Optional[np.ndarray]]:
"""
Get the model's action(s) from an observation
:param observation: the input observation
:param state: The last states (can be None, used in recurrent policies)
:param mask: The last masks (can be None, used in recurrent policies)
:param deterministic: Whether or not to return deterministic actions.
:return: the model's action and the next state
(used in recurrent policies)
"""
return self.policy.predict(observation, mask, deterministic)
def set_random_seed(self, seed: Optional[int] = None) -> None:
"""
Set the seed of the pseudo-random generators
(python, numpy, pytorch, action_space)
:param seed:
"""
if seed is None:
return
set_random_seed(seed,
using_cuda=self.device.type == th.device("cuda").type)
self.action_space.seed(seed)
if self.env is not None:
self.env.seed(seed)
if self.eval_env is not None:
self.eval_env.seed(seed)
def set_parameters(
self,
load_path_or_dict: Union[str, Dict[str, Dict]],
exact_match: bool = True,
device: Union[th.device, str] = "auto",
) -> None:
"""
Load parameters from a given zip-file or a nested dictionary containing parameters for
different modules (see ``get_parameters``).
:param load_path_or_iter: Location of the saved data (path or file-like, see ``save``), or a nested
dictionary containing nn.Module parameters used by the policy. The dictionary maps
object names to a state-dictionary returned by ``torch.nn.Module.state_dict()``.
:param exact_match: If True, the given parameters should include parameters for each
module and each of their parameters, otherwise raises an Exception. If set to False, this
can be used to update only specific parameters.
:param device: Device on which the code should run.
"""
params = None
if isinstance(load_path_or_dict, dict):
params = load_path_or_dict
else:
_, params, _ = load_from_zip_file(load_path_or_dict, device=device)
# Keep track which objects were updated.
# `_get_torch_save_params` returns [params, other_pytorch_variables].
# We are only interested in former here.
objects_needing_update = set(self._get_torch_save_params()[0])
updated_objects = set()
for name in params:
attr = None
try:
attr = recursive_getattr(self, name)
except Exception:
# What errors recursive_getattr could throw? KeyError, but
# possible something else too (e.g. if key is an int?).
# Catch anything for now.
raise ValueError(f"Key {name} is an invalid object name.")
if isinstance(attr, th.optim.Optimizer):
# Optimizers do not support "strict" keyword...
# Seems like they will just replace the whole
# optimizer state with the given one.
# On top of this, optimizer state-dict
# seems to change (e.g. first ``optim.step()``),
# which makes comparing state dictionary keys
# invalid (there is also a nesting of dictionaries
# with lists with dictionaries with ...), adding to the
# mess.
#
# TL;DR: We might not be able to reliably say
# if given state-dict is missing keys.
#
# Solution: Just load the state-dict as is, and trust
# the user has provided a sensible state dictionary.
attr.load_state_dict(params[name])
else:
# Assume attr is th.nn.Module
attr.load_state_dict(params[name], strict=exact_match)
updated_objects.add(name)
if exact_match and updated_objects != objects_needing_update:
raise ValueError(
"Names of parameters do not match agents' parameters: "
f"expected {objects_needing_update}, got {updated_objects}")
@classmethod
def load(
cls,
path: Union[str, pathlib.Path, io.BufferedIOBase],
env: Optional[EveEnv] = None,
device: Union[th.device, str] = "auto",
**kwargs,
) -> "BaseAlgorithm":
"""
Load the model from a zip-file
:param path: path to the file (or a file-like) where to
load the agent from
:param env: the new environment to run the loaded model on
(can be None if you only need prediction from a trained model)
has priority over any saved environment
:param device: Device on which the code should run.
:param kwargs: extra arguments to change the model when loading
"""
data, params, pytorch_variables = load_from_zip_file(path,
device=device)
# Remove stored device information and replace with ours
if "policy_kwargs" in data:
if "device" in data["policy_kwargs"]:
del data["policy_kwargs"]["device"]
if "policy_kwargs" in kwargs and kwargs["policy_kwargs"] != data[
"policy_kwargs"]:
raise ValueError(
f"The specified policy kwargs do not equal the stored policy kwargs."
f"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}"
)
if "observation_space" not in data or "action_space" not in data:
raise KeyError(
"The observation_space and action_space were not given, can't verify new environments"
)
if env is not None:
# Wrap first if needed
env = cls._wrap_env(env, data["verbose"])
# Check if given env is valid
check_for_correct_spaces(env, data["observation_space"],
data["action_space"])
else:
# Use stored env, if one exists. If not, continue as is (can be used for predict)
if "env" in data:
env = data["env"]
# noinspection PyArgumentList
model = cls( # pylint:disable=unexpected-keyword-arg,no-value-for-parameter
policy=data["policy_class"],
env=env,
device=device,
_init_setup_model=False,
)
# load parameters
model.__dict__.update(data)
model.__dict__.update(kwargs)
model._setup_model()
# put state_dicts back in place
model.set_parameters(params, exact_match=True, device=device)
# put other pytorch variables back in place
if pytorch_variables is not None:
for name in pytorch_variables:
recursive_setattr(model, name, pytorch_variables[name])
# Sample gSDE exploration matrix, so it uses the right device
# see issue #44
if model.use_sde:
model.policy.reset_noise()
return model
def get_parameters(self) -> Dict[str, Dict]:
"""
Return the parameters of the agent. This includes parameters from different networks, e.g.
critics (value functions) and policies (pi functions).
:return: Mapping of from names of the objects to PyTorch state-dicts.
"""
state_dicts_names, _ = self._get_torch_save_params()
params = {}
for name in state_dicts_names:
attr = recursive_getattr(self, name)
# Retrieve state dict
params[name] = attr.state_dict()
return params
def save(
self,
path: Union[str, pathlib.Path, io.BufferedIOBase],
exclude: Optional[Iterable[str]] = None,
include: Optional[Iterable[str]] = None,
) -> None:
"""
Save all the attributes of the object and the model parameters in a zip-file.
:param path: path to the file where the rl agent should be saved
:param exclude: name of parameters that should be excluded in addition to the default ones
:param include: name of parameters that might be excluded but should be included anyway
"""
# Copy parameter list so we don't mutate the original dict
data = self.__dict__.copy()
# Exclude is union of specified parameters (if any) and standard exclusions
if exclude is None:
exclude = []
exclude = set(exclude).union(self._excluded_save_params())
# Do not exclude params if they are specifically included
if include is not None:
exclude = exclude.difference(include)
state_dicts_names, torch_variable_names = self._get_torch_save_params()
all_pytorch_variables = state_dicts_names + torch_variable_names
for torch_var in all_pytorch_variables:
# We need to get only the name of the top most module as we'll remove that
var_name = torch_var.split(".")[0]
# Any params that are in the save vars must not be saved by data
exclude.add(var_name)
# Remove parameter entries of parameters which are to be excluded
for param_name in exclude:
data.pop(param_name, None)
# Build dict of torch variables
pytorch_variables = None
if torch_variable_names is not None:
pytorch_variables = {}
for name in torch_variable_names:
attr = recursive_getattr(self, name)
pytorch_variables[name] = attr
# Build dict of state_dicts
params_to_save = self.get_parameters()
save_to_zip_file(path,
data=data,
params=params_to_save,
pytorch_variables=pytorch_variables)
class OffPolicyAlgorithm(BaseAlgorithm):
"""
The base for Off-Policy algorithms (ex: SAC/TD3)
:param policy: Policy object
:param env: The environment to learn from
:param policy_base: The base policy used by this method
:param learning_rate: learning rate for the optimizer,
it can be a function of the current progress remaining (from 1 to 0)
:param buffer_size: size of the replay buffer
:param learning_starts: how many steps of the model to collect transitions for before learning starts
:param batch_size: Minibatch size for each gradient update
:param tau: the soft update coefficient ("Polyak update", between 0 and 1)
:param gamma: the discount factor
:param train_freq: Update the model every ``train_freq`` steps. Set to `-1` to disable.
:param gradient_steps: How many gradient steps to do after each rollout
(see ``train_freq`` and ``n_episodes_rollout``)
Set to ``-1`` means to do as many gradient steps as steps done in the environment
during the rollout.
:param n_episodes_rollout: Update the model every ``n_episodes_rollout`` episodes.
Note that this cannot be used at the same time as ``train_freq``. Set to `-1` to disable.
:param action_noise: the action noise type (None by default), this can help
for hard exploration problem. Cf common.noise for the different action noise type.
:param policy_kwargs: Additional arguments to be passed to the policy on creation
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param verbose: The verbosity level: 0 none, 1 training information, 2 debug
:param device: Device on which the code should run.
By default, it will try to use a Cuda compatible device and fallback to cpu
if it is not possible.
:param support_multi_env: Whether the algorithm supports training
with multiple environments (as in A2C)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param seed: Seed for the pseudo random generators
:param use_sde: Whether to use State Dependent Exploration (SDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling
during the warm up phase (before learning starts)
:param sde_support: Whether the model support gSDE or not
:param remove_time_limit_termination: Remove terminations (dones) that are due to time limit.
See https://github.com/hill-a/stable-baselines/issues/863
:param supported_action_spaces: The action spaces supported by the algorithm.
"""
def __init__(self,
policy: Type[BasePolicy],
env: Union[EveEnv, str],
policy_base: Type[BasePolicy],
learning_rate: Union[float, Schedule],
buffer_size: int = int(1e6),
learning_starts: int = 100,
batch_size: int = 256,
tau: float = 0.005,
gamma: float = 0.99,
train_freq: int = 1,
gradient_steps: int = 1,
n_episodes_rollout: int = -1,
action_noise: Optional[ActionNoise] = None,
policy_kwargs: Dict[str, Any] = None,
tensorboard_log: Optional[str] = None,
verbose: int = 0,
device: Union[th.device, str] = "auto",
support_multi_env: bool = False,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
seed: Optional[int] = None,
use_sde: bool = False,
sde_sample_freq: int = -1,
use_sde_at_warmup: bool = False,
sde_support: bool = True,
remove_time_limit_termination: bool = False,
supported_action_spaces: Optional[Tuple[space.EveSpace,
...]] = None,
sample_episode: bool = False):
super(OffPolicyAlgorithm, self).__init__(
policy=policy,
env=env,
policy_base=policy_base,
learning_rate=learning_rate,
policy_kwargs=policy_kwargs,
tensorboard_log=tensorboard_log,
verbose=verbose,
device=device,
support_multi_env=support_multi_env,
create_eval_env=create_eval_env,
monitor_wrapper=monitor_wrapper,
seed=seed,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
supported_action_spaces=supported_action_spaces,
sample_episode=sample_episode,
)
self.buffer_size = buffer_size
self.batch_size = batch_size
self.learning_starts = learning_starts
self.tau = tau
self.gamma = gamma
self.train_freq = train_freq
self.gradient_steps = gradient_steps
self.n_episodes_rollout = n_episodes_rollout
self.action_noise = action_noise
# Remove terminations (dones) that are due to time limit
# see https://github.com/hill-a/stable-baselines/issues/863
self.remove_time_limit_termination = remove_time_limit_termination
if train_freq > 0 and n_episodes_rollout > 0:
warnings.warn(
"You passed a positive value for `train_freq` and `n_episodes_rollout`."
"Please make sure this is intended. "
"The agent will collect data by stepping in the environment "
"until both conditions are true: "
"`number of steps in the env` >= `train_freq` and "
"`number of episodes` > `n_episodes_rollout`")
self.actor = None # type: Optional[th.nn.Module]
self.replay_buffer = None # type: Optional[ReplayBuffer]
# Update policy keyword arguments
if sde_support:
self.policy_kwargs["use_sde"] = self.use_sde
# For gSDE only
self.use_sde_at_warmup = use_sde_at_warmup
def _setup_model(self) -> None:
self._setup_lr_schedule()
self.set_random_seed(self.seed)
self.replay_buffer = ReplayBuffer(
self.buffer_size,
self.observation_space,
self.action_space,
self.device,
sample_episode=self.sample_episode,
)
self.policy = self.policy_class(
self.observation_space,
self.action_space,
self.lr_schedule,
**self.policy_kwargs # pytype:disable=not-instantiable
)
self.policy = self.policy.to(self.device)
def save_replay_buffer(
self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:
"""
Save the replay buffer as a pickle file.
:param path: Path to the file where the replay buffer should be saved.
if path is a str or pathlib.Path, the path is automatically created if necessary.
"""
assert self.replay_buffer is not None, "The replay buffer is not defined"
save_to_pkl(path, self.replay_buffer, self.verbose)
def load_replay_buffer(
self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:
"""
Load a replay buffer from a pickle file.
:param path: Path to the pickled replay buffer.
"""
self.replay_buffer = load_from_pkl(path, self.verbose)
assert isinstance(
self.replay_buffer, ReplayBuffer
), "The replay buffer must inherit from ReplayBuffer class"
def _setup_learn(
self,
total_timesteps: int,
eval_env: Optional[EveEnv],
callback: MaybeCallback = None,
eval_freq: int = 10000,
n_eval_episodes: int = 5,
log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
tb_log_name: str = "run",
) -> Tuple[int, BaseCallback]:
"""
cf `BaseAlgorithm`.
"""
# Prevent continuity issue by truncating trajectory
# when using memory efficient replay buffer
# see https://github.com/DLR-RM/stable-baselines3/issues/46
truncate_last_traj = (reset_num_timesteps
and self.replay_buffer is not None
and (self.replay_buffer.full
or self.replay_buffer.pos > 0))
if truncate_last_traj:
# Go to the previous index
pos = (self.replay_buffer.pos - 1) % self.replay_buffer.buffer_size
self.replay_buffer.dones[pos] = True
return super()._setup_learn(total_timesteps, eval_env, callback,
eval_freq, n_eval_episodes, log_path,
reset_num_timesteps, tb_log_name)
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 4,
eval_env: Optional[EveEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "run",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "OffPolicyAlgorithm":
total_timesteps, callback = self._setup_learn(
total_timesteps, eval_env, callback, eval_freq, n_eval_episodes,
eval_log_path, reset_num_timesteps, tb_log_name)
callback.on_training_start(locals(), globals())
while self.num_timesteps < total_timesteps:
rollout = self.collect_rollouts(
self.env,
n_episodes=self.n_episodes_rollout,
n_steps=self.train_freq,
action_noise=self.action_noise,
callback=callback,
learning_starts=self.learning_starts,
replay_buffer=self.replay_buffer,
log_interval=log_interval,
)
if rollout.continue_training is False:
break
if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:
# If no `gradient_steps` is specified,
# do as many gradients steps as steps performed during the rollout
gradient_steps = self.gradient_steps if self.gradient_steps > 0 else rollout.episode_timesteps
self.train(batch_size=self.batch_size,
gradient_steps=gradient_steps)
callback.on_training_end()
return self
def train(self, gradient_steps: int, batch_size: int) -> None:
"""
Sample the replay buffer and do the updates
(gradient descent and update target networks)
"""
raise NotImplementedError()
def _sample_action(
self,
learning_starts: int,
action_noise: Optional[ActionNoise] = None
) -> Tuple[np.ndarray, np.ndarray]:
"""
Sample an action according to the exploration policy.
This is either done by sampling the probability distribution of the policy,
or sampling a random action (from a uniform distribution over the action space)
or by adding noise to the deterministic output.
:param action_noise: Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: Number of steps before learning for the warm-up phase.
:return: action to take in the environment
and scaled action that will be stored in the replay buffer.
The two differs when the action space is not normalized (bounds are not [-1, 1]).
"""
# Select action randomly or according to policy
# if neuron wise, the action space is [n_envs, neurons, actions]
# else, the action space is [n_envs, actions]
if self.num_timesteps < learning_starts and not (
self.use_sde and self.use_sde_at_warmup):
unscaled_action = []
for _ in range(self.action_space.max_neurons):
unscaled_action.append(self.action_space.sample())
unscaled_action = np.array([unscaled_action])
else:
# Note: when using continuous actions,
# we assume that the policy uses tanh to scale the action
# We use non-deterministic action in the case of SAC, for TD3, it does not matter
unscaled_action = self.predict(self._last_obs, deterministic=False)
# Rescale the action from [low, high] to [-1, 1]
if isinstance(self.action_space, space.EveBox):
scaled_action = self.policy.scale_action(unscaled_action)
# Add noise to the action (improve exploration)
if action_noise is not None:
scaled_action = np.clip(scaled_action + action_noise(), -1, 1)
# We store the scaled action in the buffer
buffer_action = scaled_action
action = self.policy.unscale_action(scaled_action)
else:
# Discrete case, no need to normalize or clip
buffer_action = unscaled_action
action = buffer_action
return action, buffer_action
def _dump_logs(self) -> None:
"""
Write log.
"""
fps = int(self.num_timesteps / (time.time() - self.start_time))
logger.record("time/episodes",
self._episode_num,
exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:
logger.record(
"rollout/ep_rew_mean",
safe_mean([ep_info["r"] for ep_info in self.ep_info_buffer]))
logger.record(
"rollout/ep_len_mean",
safe_mean([ep_info["l"] for ep_info in self.ep_info_buffer]))
logger.record("time/fps", fps)
logger.record("time/time_elapsed",
int(time.time() - self.start_time),
exclude="tensorboard")
logger.record("time/total timesteps",
self.num_timesteps,
exclude="tensorboard")
if self.use_sde:
logger.record("train/std", (self.actor.get_std()).mean().item())
if len(self.ep_success_buffer) > 0:
logger.record("rollout/success rate",
safe_mean(self.ep_success_buffer))
# Pass the number of timesteps for tensorboard
logger.dump(step=self.num_timesteps)
def _on_step(self) -> None:
"""
Method called after each step in the environment.
It is meant to trigger DQN target network update
but can be used for other purposes
"""
pass
def collect_rollouts(
self,
env: VecEnv,
callback: BaseCallback,
n_episodes: int = 1,
n_steps: int = -1,
action_noise: Optional[ActionNoise] = None,
learning_starts: int = 0,
replay_buffer: Optional[ReplayBuffer] = None,
log_interval: Optional[int] = None,
) -> RolloutReturn:
"""
Collect experiences and store them into a ``ReplayBuffer``.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param n_episodes: Number of episodes to use to collect rollout data
You can also specify a ``n_steps`` instead
:param n_steps: Number of steps to use to collect rollout data
You can also specify a ``n_episodes`` instead.
:param action_noise: Action noise that will be used for exploration
Required for deterministic policy (e.g. TD3). This can also be used
in addition to the stochastic policy for SAC.
:param learning_starts: Number of steps before learning for the warm-up phase.
:param replay_buffer:
:param log_interval: Log data every ``log_interval`` episodes
:return:
"""
episode_rewards, total_timesteps = [], []
total_steps, total_episodes = 0, 0
assert isinstance(env, VecEnv), "You must pass a VecEnv"
assert env.num_envs == 1, "OffPolicyAlgorithm only support single environment"
if self.use_sde:
self.actor.reset_noise()
callback.on_rollout_start()
continue_training = True
while total_steps < n_steps or total_episodes < n_episodes:
done = False
episode_reward, episode_timesteps = 0.0, 0
while not done:
if self.use_sde and self.sde_sample_freq > 0 and total_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.actor.reset_noise()
# Select action randomly or according to policy
action, buffer_action = self._sample_action(
learning_starts, action_noise)
# Rescale and perform action
new_obs, reward, done, infos = env.step(action)
self.num_timesteps += 1
episode_timesteps += 1
total_steps += 1
# Give access to local variables
callback.update_locals(locals())
# Only stop training if return value is False, not when it is None.
if callback.on_step() is False:
return RolloutReturn(0.0,
total_steps,
total_episodes,
continue_training=False)
episode_reward += reward
# Retrieve reward and episode length if using Monitor wrapper
self._update_info_buffer(infos, done)
# Store data in replay buffer
if replay_buffer is not None:
# Store only the unnormalized version
if self._vec_normalize_env is not None:
new_obs_ = self._vec_normalize_env.get_original_obs()
reward_ = self._vec_normalize_env.get_original_reward()
else:
# Avoid changing the original ones
self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward
replay_buffer.add(self._last_original_obs, new_obs_,
buffer_action, reward_, done)
self._last_obs = new_obs
# Save the unnormalized observation
if self._vec_normalize_env is not None:
self._last_original_obs = new_obs_
self._update_current_progress_remaining(
self.num_timesteps, self._total_timesteps)
# For DQN, check if the target network should be updated
# and update the exploration schedule
# For SAC/TD3, the update is done as the same time as the gradient update
# see https://github.com/hill-a/stable-baselines/issues/900
self._on_step()
if 0 < n_steps <= total_steps:
break
if done:
total_episodes += 1
self._episode_num += 1
episode_rewards.append(episode_reward)
total_timesteps.append(episode_timesteps)
if action_noise is not None:
action_noise.reset()
# Log training infos
if log_interval is not None and log_interval > 0 and self._episode_num % log_interval == 0:
self._dump_logs()
mean_reward = np.mean(episode_rewards) if total_episodes > 0 else 0.0
callback.on_rollout_end()
return RolloutReturn(mean_reward, total_steps, total_episodes,
continue_training)
class OnPolicyAlgorithm(BaseAlgorithm):
"""
The base for On-Policy algorithms (ex: A2C/PPO).
:param policy: The policy model to use (MlpPolicy, ...)
:param env: The environment to learn from.
:param learning_rate: The learning rate, it can be a function
of the current progress remaining (from 1 to 0)
:param n_steps: The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param gamma: Discount factor
:param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator.
Equivalent to classic advantage when set to 1.
:param ent_coef: Entropy coefficient for the loss calculation
:param vf_coef: Value function coefficient for the loss calculation
:param max_grad_norm: The maximum value for the gradient clipping
:param use_sde: Whether to use generalized State Dependent Exploration (gSDE)
instead of action noise exploration (default: False)
:param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE
Default: -1 (only sample at the beginning of the rollout)
:param tensorboard_log: the log location for tensorboard (if None, no logging)
:param create_eval_env: Whether to create a second environment that will be
used for evaluating the agent periodically. (Only available when passing string for the environment)
:param monitor_wrapper: When creating an environment, whether to wrap it
or not in a Monitor wrapper.
:param policy_kwargs: additional arguments to be passed to the policy on creation
:param verbose: the verbosity level: 0 no output, 1 info, 2 debug
:param seed: Seed for the pseudo random generators
:param device: Device (cpu, cuda, ...) on which the code should be run.
Setting it to auto, the code will be run on the GPU if possible.
:param _init_setup_model: Whether or not to build the network at the creation of the instance
:param supported_action_spaces: The action spaces supported by the algorithm.
"""
def __init__(
self,
policy: Union[str, Type[ActorCriticPolicy]],
env: Union[EveEnv, str],
learning_rate: Union[float, Schedule],
n_steps: int,
gamma: float,
gae_lambda: float,
ent_coef: float,
vf_coef: float,
max_grad_norm: float,
use_sde: bool,
sde_sample_freq: int,
tensorboard_log: Optional[str] = None,
create_eval_env: bool = False,
monitor_wrapper: bool = True,
policy_kwargs: Optional[Dict[str, Any]] = None,
verbose: int = 0,
seed: Optional[int] = None,
device: Union[th.device, str] = "auto",
_init_setup_model: bool = True,
supported_action_spaces: Optional[Tuple[space.EveSpace, ...]] = None,
sample_episode: bool = False,
):
super(OnPolicyAlgorithm, self).__init__(
policy=policy,
env=env,
policy_base=ActorCriticPolicy,
learning_rate=learning_rate,
policy_kwargs=policy_kwargs,
verbose=verbose,
device=device,
use_sde=use_sde,
sde_sample_freq=sde_sample_freq,
create_eval_env=create_eval_env,
support_multi_env=True,
seed=seed,
tensorboard_log=tensorboard_log,
supported_action_spaces=supported_action_spaces,
sample_episode=sample_episode,
)
self.n_steps = n_steps
self.gamma = gamma
self.gae_lambda = gae_lambda
self.ent_coef = ent_coef
self.vf_coef = vf_coef
self.max_grad_norm = max_grad_norm
self.rollout_buffer = None
if _init_setup_model:
self._setup_model()
def _setup_model(self) -> None:
self._setup_lr_schedule()
self.set_random_seed(self.seed)
self.rollout_buffer = RolloutBuffer(
self.n_steps,
self.observation_space,
self.action_space,
self.device,
gamma=self.gamma,
gae_lambda=self.gae_lambda,
n_envs=self.n_envs,
sample_episode=self.sample_episode,
)
self.policy = self.policy_class(
self.observation_space,
self.action_space,
self.lr_schedule,
use_sde=self.use_sde,
**self.policy_kwargs # pytype:disable=not-instantiable
)
self.policy = self.policy.to(self.device)
def collect_rollouts(self, env: VecEnv, callback: BaseCallback,
rollout_buffer: RolloutBuffer,
n_rollout_steps: int) -> bool:
"""
Collect experiences using the current policy and fill a ``RolloutBuffer``.
The term rollout here refers to the model-free notion and should not
be used with the concept of rollout used in model-based RL or planning.
:param env: The training environment
:param callback: Callback that will be called at each step
(and at the beginning and end of the rollout)
:param rollout_buffer: Buffer to fill with rollouts
:param n_steps: Number of experiences to collect per environment
:return: True if function returned with at least `n_rollout_steps`
collected, False if callback terminated rollout prematurely.
"""
assert self._last_obs is not None, "No previous observation was provided"
n_steps = 0
rollout_buffer.reset()
# Sample new weights for the state dependent exploration
if self.use_sde:
self.policy.reset_noise(env.num_envs)
callback.on_rollout_start()
while n_steps < n_rollout_steps:
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
# Sample a new noise matrix
self.policy.reset_noise(env.num_envs)
with th.no_grad():
# Convert to pytorch tensor
obs_tensor = th.as_tensor(self._last_obs).to(self.device)
actions, values, log_probs = self.policy.forward(obs_tensor)
actions = actions.cpu().numpy()
# Rescale and perform action
# Clip the actions to avoid out of bound error
if isinstance(self.action_space, space.EveBox):
clipped_actions = np.clip(actions, self.action_space.low,
self.action_space.high)
else:
clipped_actions = actions
new_obs, rewards, dones, infos = env.step(clipped_actions)
self.num_timesteps += env.num_envs
# Give access to local variables
callback.update_locals(locals())
if callback.on_step() is False:
return False
self._update_info_buffer(infos)
n_steps += 1
# if isinstance(self.action_space, space.EveDiscrete):
# # Reshape in case of discrete action
# # actions = actions.reshape(-1, 1)
rollout_buffer.add(self._last_obs, actions, rewards,
self._last_dones, values, log_probs)
self._last_obs = new_obs
self._last_dones = dones
with th.no_grad():
# Compute value for the last timestep
obs_tensor = th.as_tensor(new_obs).to(self.device)
_, values, _ = self.policy.forward(obs_tensor)
rollout_buffer.compute_returns_and_advantage(last_values=values,
dones=dones)
callback.on_rollout_end()
return True
def train(self) -> None:
"""
Consume current rollout data and update policy parameters.
Implemented by individual algorithms.
"""
raise NotImplementedError
def learn(
self,
total_timesteps: int,
callback: MaybeCallback = None,
log_interval: int = 1,
eval_env: Optional[EveEnv] = None,
eval_freq: int = -1,
n_eval_episodes: int = 5,
tb_log_name: str = "OnPolicyAlgorithm",
eval_log_path: Optional[str] = None,
reset_num_timesteps: bool = True,
) -> "OnPolicyAlgorithm":
iteration = 0
total_timesteps, callback = self._setup_learn(
total_timesteps, eval_env, callback, eval_freq, n_eval_episodes,
eval_log_path, reset_num_timesteps, tb_log_name)
callback.on_training_start(locals(), globals())
while self.num_timesteps < total_timesteps:
continue_training = self.collect_rollouts(
self.env,
callback,
self.rollout_buffer,
n_rollout_steps=self.n_steps)
if continue_training is False:
break
iteration += 1
self._update_current_progress_remaining(self.num_timesteps,
total_timesteps)
# Display training infos
if log_interval is not None and iteration % log_interval == 0:
fps = int(self.num_timesteps / (time.time() - self.start_time))
logger.record("time/iterations",
iteration,
exclude="tensorboard")
if len(self.ep_info_buffer) > 0 and len(
self.ep_info_buffer[0]) > 0:
logger.record(
"rollout/ep_rew_mean",
safe_mean(
[ep_info["r"] for ep_info in self.ep_info_buffer]))
logger.record(
"rollout/ep_len_mean",
safe_mean(
[ep_info["l"] for ep_info in self.ep_info_buffer]))
logger.record("time/fps", fps)
logger.record("time/time_elapsed",
int(time.time() - self.start_time),
exclude="tensorboard")
logger.record("time/total_timesteps",
self.num_timesteps,
exclude="tensorboard")
logger.dump(step=self.num_timesteps)
self.train()
callback.on_training_end()
return self
def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:
state_dicts = ["policy", "policy.optimizer"]
return state_dicts, []
| 40.757177 | 110 | 0.5998 |
8f721f14f5570b36a60bbfa9379b2a706fc000a4 | 470 | py | Python | clock_time.py | aaronsaderholm/bad-arduino-clock | 5e46d9843b48b898a49de7d073e042c95c76cc5d | [
"MIT"
] | null | null | null | clock_time.py | aaronsaderholm/bad-arduino-clock | 5e46d9843b48b898a49de7d073e042c95c76cc5d | [
"MIT"
] | null | null | null | clock_time.py | aaronsaderholm/bad-arduino-clock | 5e46d9843b48b898a49de7d073e042c95c76cc5d | [
"MIT"
] | null | null | null | import time
import serial
import struct
# configure the serial connections (the parameters differs on the device you are connecting to)
ser = serial.Serial(port='/dev/cu.usbserial-A900JEHX',baudrate=115200)
time.sleep(2)
time = int(time.time())
print(time,"\n")
ser.write(bytes(time))
time.sleep(1)
while 1:
try:
print(ser.readline())
time.sleep(1)
except ser.SerialTimeoutException:
print('Data could not be read')
time.sleep(1) | 26.111111 | 95 | 0.697872 |
959570dd9111f4e560b212e2b9595bc24781a7d8 | 66 | py | Python | popcorn/image_processing/fiberExtractors.py | bertrand-faure/popcorn | 757ceee636bd5c4973373efde0f99943f94e4e19 | [
"MIT"
] | 11 | 2021-03-03T17:34:24.000Z | 2021-11-20T02:27:01.000Z | popcorn/image_processing/fiberExtractors.py | bertrand-faure/popcorn | 757ceee636bd5c4973373efde0f99943f94e4e19 | [
"MIT"
] | null | null | null | popcorn/image_processing/fiberExtractors.py | bertrand-faure/popcorn | 757ceee636bd5c4973373efde0f99943f94e4e19 | [
"MIT"
] | 4 | 2021-01-12T10:01:00.000Z | 2021-12-03T10:08:11.000Z | from skimage import morphology
import numpy as np
import math
| 8.25 | 30 | 0.787879 |
fa96baab15e0c00870bc043ac02e31c10b222759 | 65,256 | py | Python | pennylane/ops/qubit/non_parametric_ops.py | eddddddy/pennylane | bcd837b1ed1187895c327abfe62aea71fbeba02f | [
"Apache-2.0"
] | null | null | null | pennylane/ops/qubit/non_parametric_ops.py | eddddddy/pennylane | bcd837b1ed1187895c327abfe62aea71fbeba02f | [
"Apache-2.0"
] | null | null | null | pennylane/ops/qubit/non_parametric_ops.py | eddddddy/pennylane | bcd837b1ed1187895c327abfe62aea71fbeba02f | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This submodule contains the discrete-variable quantum operations that do
not depend on any parameters.
"""
# pylint:disable=abstract-method,arguments-differ,protected-access,invalid-overridden-method, no-member
import cmath
import warnings
import numpy as np
from scipy.linalg import block_diag
import pennylane as qml
from pennylane.operation import AnyWires, Observable, Operation
from pennylane.utils import pauli_eigs
from pennylane.wires import Wires
INV_SQRT2 = 1 / qml.math.sqrt(2)
class Hadamard(Observable, Operation):
r"""Hadamard(wires)
The Hadamard operator
.. math:: H = \frac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1\\ 1 & -1\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_wires = 1
"""int: Number of wires that the operator acts on."""
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "H"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.Hadamard.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.Hadamard.compute_matrix())
[[ 0.70710678 0.70710678]
[ 0.70710678 -0.70710678]]
"""
return np.array([[INV_SQRT2, INV_SQRT2], [INV_SQRT2, -INV_SQRT2]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.Hadamard.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.Hadamard.compute_eigvals())
[ 1 -1]
"""
return pauli_eigs(1)
@staticmethod
def compute_diagonalizing_gates(wires):
r"""Sequence of gates that diagonalize the operator in the computational basis (static method).
Given the eigendecomposition :math:`O = U \Sigma U^{\dagger}` where
:math:`\Sigma` is a diagonal matrix containing the eigenvalues,
the sequence of diagonalizing gates implements the unitary :math:`U`.
The diagonalizing gates rotate the state into the eigenbasis
of the operator.
.. seealso:: :meth:`~.Hadamard.diagonalizing_gates`.
Args:
wires (Iterable[Any], Wires): wires that the operator acts on
Returns:
list[.Operator]: list of diagonalizing gates
**Example**
>>> print(qml.Hadamard.compute_diagonalizing_gates(wires=[0]))
[RY(-0.7853981633974483, wires=[0])]
"""
return [qml.RY(-np.pi / 4, wires=wires)]
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.Hadamard.decomposition`.
Args:
wires (Any, Wires): Wire that the operator acts on.
Returns:
list[Operator]: decomposition of the operator
**Example:**
>>> print(qml.Hadamard.compute_decomposition(0))
[PhaseShift(1.5707963267948966, wires=[0]),
RX(1.5707963267948966, wires=[0]),
PhaseShift(1.5707963267948966, wires=[0])]
"""
decomp_ops = [
qml.PhaseShift(np.pi / 2, wires=wires),
qml.RX(np.pi / 2, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def adjoint(self):
return Hadamard(wires=self.wires)
def single_qubit_rot_angles(self):
# H = RZ(\pi) RY(\pi/2) RZ(0)
return [np.pi, np.pi / 2, 0.0]
def pow(self, z):
return super().pow(z % 2)
class PauliX(Observable, Operation):
r"""PauliX(wires)
The Pauli X operator
.. math:: \sigma_x = \begin{bmatrix} 0 & 1 \\ 1 & 0\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_wires = 1
"""int: Number of wires that the operator acts on."""
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "X"
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "X"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PauliX.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.PauliX.compute_matrix())
[[0 1]
[1 0]]
"""
return np.array([[0, 1], [1, 0]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.PauliX.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.PauliX.compute_eigvals())
[ 1 -1]
"""
return pauli_eigs(1)
@staticmethod
def compute_diagonalizing_gates(wires):
r"""Sequence of gates that diagonalize the operator in the computational basis (static method).
Given the eigendecomposition :math:`O = U \Sigma U^{\dagger}` where
:math:`\Sigma` is a diagonal matrix containing the eigenvalues,
the sequence of diagonalizing gates implements the unitary :math:`U`.
The diagonalizing gates rotate the state into the eigenbasis
of the operator.
.. seealso:: :meth:`~.PauliX.diagonalizing_gates`.
Args:
wires (Iterable[Any], Wires): wires that the operator acts on
Returns:
list[.Operator]: list of diagonalizing gates
**Example**
>>> print(qml.PauliX.compute_diagonalizing_gates(wires=[0]))
[Hadamard(wires=[0])]
"""
return [Hadamard(wires=wires)]
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.PauliX.decomposition`.
Args:
wires (Any, Wires): Wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.PauliX.compute_decomposition(0))
[PhaseShift(1.5707963267948966, wires=[0]),
RX(3.141592653589793, wires=[0]),
PhaseShift(1.5707963267948966, wires=[0])]
"""
decomp_ops = [
qml.PhaseShift(np.pi / 2, wires=wires),
qml.RX(np.pi, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def adjoint(self):
return PauliX(wires=self.wires)
def pow(self, z):
z_mod2 = z % 2
if abs(z_mod2 - 0.5) < 1e-6:
return [SX(wires=self.wires)]
return super().pow(z_mod2)
def _controlled(self, wire):
CNOT(wires=Wires(wire) + self.wires)
def single_qubit_rot_angles(self):
# X = RZ(-\pi/2) RY(\pi) RZ(\pi/2)
return [np.pi / 2, np.pi, -np.pi / 2]
class PauliY(Observable, Operation):
r"""PauliY(wires)
The Pauli Y operator
.. math:: \sigma_y = \begin{bmatrix} 0 & -i \\ i & 0\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_wires = 1
"""int: Number of wires that the operator acts on."""
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "Y"
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "Y"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PauliY.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.PauliY.compute_matrix())
[[ 0.+0.j -0.-1.j]
[ 0.+1.j 0.+0.j]]
"""
return np.array([[0, -1j], [1j, 0]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.PauliY.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.PauliY.compute_eigvals())
[ 1 -1]
"""
return pauli_eigs(1)
@staticmethod
def compute_diagonalizing_gates(wires):
r"""Sequence of gates that diagonalize the operator in the computational basis (static method).
Given the eigendecomposition :math:`O = U \Sigma U^{\dagger}` where
:math:`\Sigma` is a diagonal matrix containing the eigenvalues,
the sequence of diagonalizing gates implements the unitary :math:`U`.
The diagonalizing gates rotate the state into the eigenbasis
of the operator.
.. seealso:: :meth:`~.PauliY.diagonalizing_gates`.
Args:
wires (Iterable[Any], Wires): wires that the operator acts on
Returns:
list[.Operator]: list of diagonalizing gates
**Example**
>>> print(qml.PauliY.compute_diagonalizing_gates(wires=[0]))
[PauliZ(wires=[0]), S(wires=[0]), Hadamard(wires=[0])]
"""
return [
PauliZ(wires=wires),
S(wires=wires),
Hadamard(wires=wires),
]
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.PauliY.decomposition`.
Args:
wires (Any, Wires): Single wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.PauliY.compute_decomposition(0))
[PhaseShift(1.5707963267948966, wires=[0]),
RY(3.141592653589793, wires=[0]),
PhaseShift(1.5707963267948966, wires=[0])]
"""
decomp_ops = [
qml.PhaseShift(np.pi / 2, wires=wires),
qml.RY(np.pi, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def adjoint(self):
return PauliY(wires=self.wires)
def pow(self, z):
return super().pow(z % 2)
def _controlled(self, wire):
CY(wires=Wires(wire) + self.wires)
def single_qubit_rot_angles(self):
# Y = RZ(0) RY(\pi) RZ(0)
return [0.0, np.pi, 0.0]
class PauliZ(Observable, Operation):
r"""PauliZ(wires)
The Pauli Z operator
.. math:: \sigma_z = \begin{bmatrix} 1 & 0 \\ 0 & -1\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_wires = 1
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "Z"
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "Z"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.PauliZ.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.PauliZ.compute_matrix())
[[ 1 0]
[ 0 -1]]
"""
return np.array([[1, 0], [0, -1]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.PauliZ.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.PauliZ.compute_eigvals())
[ 1 -1]
"""
return pauli_eigs(1)
@staticmethod
def compute_diagonalizing_gates(wires): # pylint: disable=unused-argument
r"""Sequence of gates that diagonalize the operator in the computational basis (static method).
Given the eigendecomposition :math:`O = U \Sigma U^{\dagger}` where
:math:`\Sigma` is a diagonal matrix containing the eigenvalues,
the sequence of diagonalizing gates implements the unitary :math:`U`.
The diagonalizing gates rotate the state into the eigenbasis
of the operator.
.. seealso:: :meth:`~.PauliZ.diagonalizing_gates`.
Args:
wires (Iterable[Any] or Wires): wires that the operator acts on
Returns:
list[.Operator]: list of diagonalizing gates
**Example**
>>> print(qml.PauliZ.compute_diagonalizing_gates(wires=[0]))
[]
"""
return []
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.PauliZ.decomposition`.
Args:
wires (Any, Wires): Single wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.PauliZ.compute_decomposition(0))
[PhaseShift(3.141592653589793, wires=[0])]
"""
return [qml.PhaseShift(np.pi, wires=wires)]
def adjoint(self):
return PauliZ(wires=self.wires)
def pow(self, z):
z_mod2 = z % 2
if z_mod2 == 0:
return []
if z_mod2 == 1:
return [self.__copy__()]
if abs(z_mod2 - 0.5) < 1e-6:
return [S(wires=self.wires)]
if abs(z_mod2 - 0.25) < 1e-6:
return [T(wires=self.wires)]
return [qml.PhaseShift(np.pi * z_mod2, wires=self.wires)]
def _controlled(self, wire):
CZ(wires=Wires(wire) + self.wires)
def single_qubit_rot_angles(self):
# Z = RZ(\pi) RY(0) RZ(0)
return [np.pi, 0.0, 0.0]
class S(Operation):
r"""S(wires)
The single-qubit phase gate
.. math:: S = \begin{bmatrix}
1 & 0 \\
0 & i
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_wires = 1
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "Z"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.S.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.S.compute_matrix())
[[1.+0.j 0.+0.j]
[0.+0.j 0.+1.j]]
"""
return np.array([[1, 0], [0, 1j]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.S.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.S.compute_eigvals())
[1.+0.j 0.+1.j]
"""
return np.array([1, 1j])
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.S.decomposition`.
Args:
wires (Any, Wires): Single wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.S.compute_decomposition(0))
[PhaseShift(1.5707963267948966, wires=[0])]
"""
return [qml.PhaseShift(np.pi / 2, wires=wires)]
def adjoint(self):
op = S(wires=self.wires)
op.inverse = not self.inverse
return op
def pow(self, z):
z_mod4 = z % 4
pow_map = {
0: lambda op: [],
0.5: lambda op: [T(wires=op.wires)],
1: lambda op: [op.__copy__()],
2: lambda op: [PauliZ(wires=op.wires)],
}
return pow_map.get(z_mod4, lambda op: [qml.PhaseShift(np.pi * z_mod4 / 2, wires=op.wires)])(
self
)
def single_qubit_rot_angles(self):
# S = RZ(\pi/2) RY(0) RZ(0)
return [np.pi / 2, 0.0, 0.0]
class T(Operation):
r"""T(wires)
The single-qubit T gate
.. math:: T = \begin{bmatrix}
1 & 0 \\
0 & e^{\frac{i\pi}{4}}
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_wires = 1
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "Z"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.T.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.T.compute_matrix())
[[1.+0.j 0. +0.j ]
[0.+0.j 0.70710678+0.70710678j]]
"""
return np.array([[1, 0], [0, cmath.exp(1j * np.pi / 4)]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.T.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.T.compute_eigvals())
[1.+0.j 0.70710678+0.70710678j]
"""
return np.array([1, cmath.exp(1j * np.pi / 4)])
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.T.decomposition`.
Args:
wires (Any, Wires): Single wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.T.compute_decomposition(0))
[PhaseShift(0.7853981633974483, wires=[0])]
"""
return [qml.PhaseShift(np.pi / 4, wires=wires)]
def pow(self, z):
z_mod8 = z % 8
pow_map = {
0: lambda op: [],
1: lambda op: [op.__copy__()],
2: lambda op: [S(wires=op.wires)],
4: lambda op: [PauliZ(wires=op.wires)],
}
return pow_map.get(z_mod8, lambda op: [qml.PhaseShift(np.pi * z_mod8 / 4, wires=op.wires)])(
self
)
def adjoint(self):
op = T(wires=self.wires)
op.inverse = not self.inverse
return op
def single_qubit_rot_angles(self):
# T = RZ(\pi/4) RY(0) RZ(0)
return [np.pi / 4, 0.0, 0.0]
class SX(Operation):
r"""SX(wires)
The single-qubit Square-Root X operator.
.. math:: SX = \sqrt{X} = \frac{1}{2} \begin{bmatrix}
1+i & 1-i \\
1-i & 1+i \\
\end{bmatrix}.
**Details:**
* Number of wires: 1
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wire the operation acts on
"""
num_wires = 1
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "X"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.SX.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.SX.compute_matrix())
[[0.5+0.5j 0.5-0.5j]
[0.5-0.5j 0.5+0.5j]]
"""
return 0.5 * np.array([[1 + 1j, 1 - 1j], [1 - 1j, 1 + 1j]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.SX.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.SX.compute_eigvals())
[1.+0.j 0.+1.j]
"""
return np.array([1, 1j])
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.SX.decomposition`.
Args:
wires (Any, Wires): Single wire that the operator acts on.
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.SX.compute_decomposition(0))
[RZ(1.5707963267948966, wires=[0]),
RY(1.5707963267948966, wires=[0]),
RZ(-3.141592653589793, wires=[0]),
PhaseShift(1.5707963267948966, wires=[0])]
"""
decomp_ops = [
qml.RZ(np.pi / 2, wires=wires),
qml.RY(np.pi / 2, wires=wires),
qml.RZ(-np.pi, wires=wires),
qml.PhaseShift(np.pi / 2, wires=wires),
]
return decomp_ops
def pow(self, z):
z_mod4 = z % 4
if z_mod4 == 2:
return [PauliX(wires=self.wires)]
return super().pow(z_mod4)
def adjoint(self):
op = SX(wires=self.wires)
op.inverse = not self.inverse
return op
def single_qubit_rot_angles(self):
# SX = RZ(-\pi/2) RY(\pi/2) RZ(\pi/2)
return [np.pi / 2, np.pi / 2, -np.pi / 2]
class CNOT(Operation):
r"""CNOT(wires)
The controlled-NOT operator
.. math:: CNOT = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & 0 & 1\\
0 & 0 & 1 & 0
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_wires = 2
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "X"
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "X"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CNOT.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.CNOT.compute_matrix())
[[1 0 0 0]
[0 1 0 0]
[0 0 0 1]
[0 0 1 0]]
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 0, 1], [0, 0, 1, 0]])
def adjoint(self):
return CNOT(wires=self.wires)
def pow(self, z):
return super().pow(z % 2)
def _controlled(self, wire):
Toffoli(wires=Wires(wire) + self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class CZ(Operation):
r"""CZ(wires)
The controlled-Z operator
.. math:: CZ = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & 1 & 0\\
0 & 0 & 0 & -1
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_wires = 2
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "Z"
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "Z"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CZ.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.CZ.compute_matrix())
[[ 1 0 0 0]
[ 0 1 0 0]
[ 0 0 1 0]
[ 0 0 0 -1]]
"""
return np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, -1]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.CZ.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.CZ.compute_eigvals())
[1, 1, 1, -1]
"""
return np.array([1, 1, 1, -1])
def adjoint(self):
return CZ(wires=self.wires)
def pow(self, z):
return super().pow(z % 2)
@property
def control_wires(self):
return Wires(self.wires[0])
class CY(Operation):
r"""CY(wires)
The controlled-Y operator
.. math:: CY = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1 & 0 & 0\\
0 & 0 & 0 & -i\\
0 & 0 & i & 0
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_wires = 2
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "Y"
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "Y"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CY.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.CY.compute_matrix())
[[ 1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[ 0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[ 0.+0.j 0.+0.j 0.+0.j -0.-1.j]
[ 0.+0.j 0.+0.j 0.+1.j 0.+0.j]]
"""
return np.array(
[
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, -1j],
[0, 0, 1j, 0],
]
)
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CY.decomposition`.
Args:
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.CY.compute_decomposition(0))
[CRY(3.141592653589793, wires=[0, 1]), S(wires=[0])]
"""
return [qml.CRY(np.pi, wires=wires), S(wires=wires[0])]
def adjoint(self):
return CY(wires=self.wires)
def pow(self, z):
return super().pow(z % 2)
@property
def control_wires(self):
return Wires(self.wires[0])
class SWAP(Operation):
r"""SWAP(wires)
The swap operator
.. math:: SWAP = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & 1 & 0\\
0 & 1 & 0 & 0\\
0 & 0 & 0 & 1
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_wires = 2
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.SWAP.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.SWAP.compute_matrix())
[[1 0 0 0]
[0 0 1 0]
[0 1 0 0]
[0 0 0 1]]
"""
return np.array([[1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]])
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.SWAP.decomposition`.
Args:
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.SWAP.compute_decomposition((0,1)))
[CNOT(wires=[0, 1]), CNOT(wires=[1, 0]), CNOT(wires=[0, 1])]
"""
decomp_ops = [
qml.CNOT(wires=[wires[0], wires[1]]),
qml.CNOT(wires=[wires[1], wires[0]]),
qml.CNOT(wires=[wires[0], wires[1]]),
]
return decomp_ops
def pow(self, z):
return super().pow(z % 2)
def adjoint(self):
return SWAP(wires=self.wires)
def _controlled(self, wire):
CSWAP(wires=wire + self.wires)
class ISWAP(Operation):
r"""ISWAP(wires)
The i-swap operator
.. math:: ISWAP = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 0 & i & 0\\
0 & i & 0 & 0\\
0 & 0 & 0 & 1
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_wires = 2
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.ISWAP.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.ISWAP.compute_matrix())
[[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+1.j 0.+0.j]
[0.+0.j 0.+1.j 0.+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]]
"""
return np.array([[1, 0, 0, 0], [0, 0, 1j, 0], [0, 1j, 0, 0], [0, 0, 0, 1]])
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.ISWAP.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.ISWAP.compute_eigvals())
[1j, -1j, 1, 1]
"""
return np.array([1j, -1j, 1, 1])
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.ISWAP.decomposition`.
Args:
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.ISWAP.compute_decomposition((0,1)))
[S(wires=[0]),
S(wires=[1]),
Hadamard(wires=[0]),
CNOT(wires=[0, 1]),
CNOT(wires=[1, 0]),
Hadamard(wires=[1])]
"""
decomp_ops = [
S(wires=wires[0]),
S(wires=wires[1]),
Hadamard(wires=wires[0]),
CNOT(wires=[wires[0], wires[1]]),
CNOT(wires=[wires[1], wires[0]]),
Hadamard(wires=wires[1]),
]
return decomp_ops
def adjoint(self):
op = ISWAP(wires=self.wires)
op.inverse = not self.inverse
return op
def pow(self, z):
z_mod2 = z % 2
if abs(z_mod2 - 0.5) < 1e-6:
return [SISWAP(wires=self.wires)]
return super().pow(z_mod2)
class SISWAP(Operation):
r"""SISWAP(wires)
The square root of i-swap operator. Can also be accessed as ``qml.SQISW``
.. math:: SISWAP = \begin{bmatrix}
1 & 0 & 0 & 0 \\
0 & 1/ \sqrt{2} & i/\sqrt{2} & 0\\
0 & i/ \sqrt{2} & 1/ \sqrt{2} & 0\\
0 & 0 & 0 & 1
\end{bmatrix}.
**Details:**
* Number of wires: 2
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
num_wires = 2
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.SISWAP.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.SISWAP.compute_matrix())
[[1.+0.j 0.+0.j 0.+0.j 0.+0.j]
[0.+0.j 0.70710678+0.j 0.+0.70710678j 0.+0.j]
[0.+0.j 0.+0.70710678j 0.70710678+0.j 0.+0.j]
[0.+0.j 0.+0.j 0.+0.j 1.+0.j]]
"""
return np.array(
[
[1, 0, 0, 0],
[0, INV_SQRT2, INV_SQRT2 * 1j, 0],
[0, INV_SQRT2 * 1j, INV_SQRT2, 0],
[0, 0, 0, 1],
]
)
@staticmethod
def compute_eigvals(): # pylint: disable=arguments-differ
r"""Eigenvalues of the operator in the computational basis (static method).
If :attr:`diagonalizing_gates` are specified and implement a unitary :math:`U`,
the operator can be reconstructed as
.. math:: O = U \Sigma U^{\dagger},
where :math:`\Sigma` is the diagonal matrix containing the eigenvalues.
Otherwise, no particular order for the eigenvalues is guaranteed.
.. seealso:: :meth:`~.SISWAP.eigvals`
Returns:
array: eigenvalues
**Example**
>>> print(qml.SISWAP.compute_eigvals())
[0.70710678+0.70710678j 0.70710678-0.70710678j 1.+0.j 1.+0.j]
"""
return np.array([INV_SQRT2 * (1 + 1j), INV_SQRT2 * (1 - 1j), 1, 1])
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.SISWAP.decomposition`.
Args:
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.SISWAP.compute_decomposition((0,1)))
[SX(wires=[0]),
RZ(1.5707963267948966, wires=[0]),
CNOT(wires=[0, 1]),
SX(wires=[0]),
RZ(5.497787143782138, wires=[0]),
SX(wires=[0]),
RZ(1.5707963267948966, wires=[0]),
SX(wires=[1]),
RZ(5.497787143782138, wires=[1]),
CNOT(wires=[0, 1]),
SX(wires=[0]),
SX(wires=[1])]
"""
decomp_ops = [
SX(wires=wires[0]),
qml.RZ(np.pi / 2, wires=wires[0]),
CNOT(wires=[wires[0], wires[1]]),
SX(wires=wires[0]),
qml.RZ(7 * np.pi / 4, wires=wires[0]),
SX(wires=wires[0]),
qml.RZ(np.pi / 2, wires=wires[0]),
SX(wires=wires[1]),
qml.RZ(7 * np.pi / 4, wires=wires[1]),
CNOT(wires=[wires[0], wires[1]]),
SX(wires=wires[0]),
SX(wires=wires[1]),
]
return decomp_ops
def pow(self, z):
z_mod4 = z % 4
return [ISWAP(wires=self.wires)] if z_mod4 == 2 else super().pow(z_mod4)
def adjoint(self):
op = SISWAP(wires=self.wires)
op.inverse = not self.inverse
return op
SQISW = SISWAP
class CSWAP(Operation):
r"""CSWAP(wires)
The controlled-swap operator
.. math:: CSWAP = \begin{bmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0 \\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0 \\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1
\end{bmatrix}.
.. note:: The first wire provided corresponds to the **control qubit**.
**Details:**
* Number of wires: 3
* Number of parameters: 0
Args:
wires (Sequence[int]): the wires the operation acts on
"""
is_self_inverse = True
num_wires = 3
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "SWAP"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.CSWAP.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.CSWAP.compute_matrix())
[[1 0 0 0 0 0 0 0]
[0 1 0 0 0 0 0 0]
[0 0 1 0 0 0 0 0]
[0 0 0 1 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 0 0 0 0 0 1 0]
[0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 1]]
"""
return np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
]
)
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.CSWAP.decomposition`.
Args:
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.CSWAP.compute_decomposition((0,1,2)))
[Toffoli(wires=[0, 2, 1]), Toffoli(wires=[0, 1, 2]), Toffoli(wires=[0, 2, 1])]
"""
decomp_ops = [
qml.Toffoli(wires=[wires[0], wires[2], wires[1]]),
qml.Toffoli(wires=[wires[0], wires[1], wires[2]]),
qml.Toffoli(wires=[wires[0], wires[2], wires[1]]),
]
return decomp_ops
def pow(self, z):
return super().pow(z % 2)
def adjoint(self):
return CSWAP(wires=self.wires)
@property
def control_wires(self):
return Wires(self.wires[0])
class Toffoli(Operation):
r"""Toffoli(wires)
Toffoli (controlled-controlled-X) gate.
.. math::
Toffoli =
\begin{pmatrix}
1 & 0 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 1 & 0 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 1 & 0 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 1 & 0 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 1 & 0 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 1 & 0 & 0\\
0 & 0 & 0 & 0 & 0 & 0 & 0 & 1\\
0 & 0 & 0 & 0 & 0 & 0 & 1 & 0
\end{pmatrix}
**Details:**
* Number of wires: 3
* Number of parameters: 0
Args:
wires (Sequence[int]): the subsystem the gate acts on
"""
num_wires = 3
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
basis = "X"
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "X"
@staticmethod
def compute_matrix(): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.Toffoli.matrix`
Returns:
ndarray: matrix
**Example**
>>> print(qml.Toffoli.compute_matrix())
[[1 0 0 0 0 0 0 0]
[0 1 0 0 0 0 0 0]
[0 0 1 0 0 0 0 0]
[0 0 0 1 0 0 0 0]
[0 0 0 0 1 0 0 0]
[0 0 0 0 0 1 0 0]
[0 0 0 0 0 0 0 1]
[0 0 0 0 0 0 1 0]]
"""
return np.array(
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 1, 0],
]
)
@staticmethod
def compute_decomposition(wires):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.Toffoli.decomposition`.
Args:
wires (Iterable, Wires): wires that the operator acts on
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.Toffoli.compute_decomposition((0,1,2)))
[Hadamard(wires=[2]),
CNOT(wires=[1, 2]),
T.inv(wires=[2]),
CNOT(wires=[0, 2]),
T(wires=[2]),
CNOT(wires=[1, 2]),
T.inv(wires=[2]),
CNOT(wires=[0, 2]),
T(wires=[2]),
T(wires=[1]),
CNOT(wires=[0, 1]),
Hadamard(wires=[2]),
T(wires=[0]),
T.inv(wires=[1]),
CNOT(wires=[0, 1])]
"""
decomp_ops = [
Hadamard(wires=wires[2]),
CNOT(wires=[wires[1], wires[2]]),
T(wires=wires[2]).inv(),
CNOT(wires=[wires[0], wires[2]]),
T(wires=wires[2]),
CNOT(wires=[wires[1], wires[2]]),
T(wires=wires[2]).inv(),
CNOT(wires=[wires[0], wires[2]]),
T(wires=wires[2]),
T(wires=wires[1]),
CNOT(wires=[wires[0], wires[1]]),
Hadamard(wires=wires[2]),
T(wires=wires[0]),
T(wires=wires[1]).inv(),
CNOT(wires=[wires[0], wires[1]]),
]
return decomp_ops
def adjoint(self):
return Toffoli(wires=self.wires)
def pow(self, z):
return super().pow(z % 2)
@property
def control_wires(self):
return Wires(self.wires[:2])
class MultiControlledX(Operation):
r"""MultiControlledX(control_wires, wires, control_values)
Apply a Pauli X gate controlled on an arbitrary computational basis state.
**Details:**
* Number of wires: Any (the operation can act on any number of wires)
* Number of parameters: 0
* Gradient recipe: None
Args:
control_wires (Union[Wires, Sequence[int], or int]): Deprecated way to indicate the control wires.
Now users should use "wires" to indicate both the control wires and the target wire.
wires (Union[Wires, Sequence[int], or int]): control wire(s) followed by a single target wire where
the operation acts on
control_values (str): a string of bits representing the state of the control
wires to control on (default is the all 1s state)
work_wires (Union[Wires, Sequence[int], or int]): optional work wires used to decompose
the operation into a series of Toffoli gates
.. note::
If ``MultiControlledX`` is not supported on the targeted device, PennyLane will decompose
the operation into :class:`~.Toffoli` and/or :class:`~.CNOT` gates. When controlling on
three or more wires, the Toffoli-based decompositions described in Lemmas 7.2 and 7.3 of
`Barenco et al. <https://arxiv.org/abs/quant-ph/9503016>`__ will be used. These methods
require at least one work wire.
The number of work wires provided determines the decomposition method used and the resulting
number of Toffoli gates required. When ``MultiControlledX`` is controlling on :math:`n`
wires:
#. If at least :math:`n - 2` work wires are provided, the decomposition in Lemma 7.2 will be
applied using the first :math:`n - 2` work wires.
#. If fewer than :math:`n - 2` work wires are provided, a combination of Lemmas 7.3 and 7.2
will be applied using only the first work wire.
These methods present a tradeoff between qubit number and depth. The method in point 1
requires fewer Toffoli gates but a greater number of qubits.
Note that the state of the work wires before and after the decomposition takes place is
unchanged.
"""
is_self_inverse = True
num_wires = AnyWires
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
grad_method = None
# pylint: disable=too-many-arguments
def __init__(
self,
control_wires=None,
wires=None,
control_values=None,
work_wires=None,
do_queue=True,
):
if wires is None:
raise ValueError("Must specify the wires where the operation acts on")
if control_wires is None:
if len(wires) > 1:
control_wires = Wires(wires[:-1])
wires = Wires(wires[-1])
else:
raise ValueError(
"MultiControlledX: wrong number of wires. "
f"{len(wires)} wire(s) given. Need at least 2."
)
else:
wires = Wires(wires)
control_wires = Wires(control_wires)
warnings.warn(
"The control_wires keyword will be removed soon. "
"Use wires = (control_wires, target_wire) instead. "
"See the documentation for more information.",
category=UserWarning,
)
if len(wires) != 1:
raise ValueError("MultiControlledX accepts a single target wire.")
work_wires = Wires([]) if work_wires is None else Wires(work_wires)
total_wires = control_wires + wires
if Wires.shared_wires([total_wires, work_wires]):
raise ValueError("The work wires must be different from the control and target wires")
if not control_values:
control_values = "1" * len(control_wires)
self.hyperparameters["control_wires"] = control_wires
self.hyperparameters["work_wires"] = work_wires
self.hyperparameters["control_values"] = control_values
super().__init__(wires=total_wires, do_queue=do_queue)
def label(self, decimals=None, base_label=None, cache=None):
return base_label or "X"
# pylint: disable=unused-argument
@staticmethod
def compute_matrix(
control_wires, control_values=None, **kwargs
): # pylint: disable=arguments-differ
r"""Representation of the operator as a canonical matrix in the computational basis (static method).
The canonical matrix is the textbook matrix representation that does not consider wires.
Implicitly, this assumes that the wires of the operator correspond to the global wire order.
.. seealso:: :meth:`~.MultiControlledX.matrix`
Args:
control_wires (Any or Iterable[Any]): wires to place controls on
control_values (str): string of bits determining the controls
Returns:
tensor_like: matrix representation
**Example**
>>> print(qml.MultiControlledX.compute_matrix([0], '1'))
[[1. 0. 0. 0.]
[0. 1. 0. 0.]
[0. 0. 0. 1.]
[0. 0. 1. 0.]]
>>> print(qml.MultiControlledX.compute_matrix([1], '0'))
[[0. 1. 0. 0.]
[1. 0. 0. 0.]
[0. 0. 1. 0.]
[0. 0. 0. 1.]]
"""
if control_values is None:
control_values = "1" * len(control_wires)
if isinstance(control_values, str):
if len(control_values) != len(control_wires):
raise ValueError("Length of control bit string must equal number of control wires.")
# Make sure all values are either 0 or 1
if not set(control_values).issubset({"1", "0"}):
raise ValueError("String of control values can contain only '0' or '1'.")
control_int = int(control_values, 2)
else:
raise ValueError("Control values must be passed as a string.")
padding_left = control_int * 2
padding_right = 2 ** (len(control_wires) + 1) - 2 - padding_left
cx = block_diag(np.eye(padding_left), PauliX.compute_matrix(), np.eye(padding_right))
return cx
@property
def control_wires(self):
return self.wires[:~0]
def adjoint(self):
return MultiControlledX(
wires=self.wires,
control_values=self.hyperparameters["control_values"],
)
def pow(self, z):
return super().pow(z % 2)
@staticmethod
def compute_decomposition(wires=None, work_wires=None, control_values=None, **kwargs):
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.MultiControlledX.decomposition`.
Args:
wires (Iterable[Any] or Wires): wires that the operation acts on
work_wires (Wires): optional work wires used to decompose
the operation into a series of Toffoli gates.
control_values (str): a string of bits representing the state of the control
wires to control on (default is the all 1s state)
Returns:
list[Operator]: decomposition into lower level operations
**Example:**
>>> print(qml.MultiControlledX.compute_decomposition(wires=[0,1,2,3],control_values="111", work_wires=qml.wires.Wires("aux")))
[Toffoli(wires=[2, 'aux', 3]),
Toffoli(wires=[0, 1, 'aux']),
Toffoli(wires=[2, 'aux', 3]),
Toffoli(wires=[0, 1, 'aux'])]
"""
target_wire = wires[~0]
control_wires = wires[:~0]
if control_values is None:
control_values = "1" * len(control_wires)
if len(control_wires) > 2 and len(work_wires) == 0:
raise ValueError(
"At least one work wire is required to decompose operation: MultiControlledX"
)
flips1 = [
qml.PauliX(control_wires[i]) for i, val in enumerate(control_values) if val == "0"
]
if len(control_wires) == 1:
decomp = [qml.CNOT(wires=[control_wires[0], target_wire])]
elif len(control_wires) == 2:
decomp = [qml.Toffoli(wires=[*control_wires, target_wire])]
else:
num_work_wires_needed = len(control_wires) - 2
if len(work_wires) >= num_work_wires_needed:
decomp = MultiControlledX._decomposition_with_many_workers(
control_wires, target_wire, work_wires
)
else:
work_wire = work_wires[0]
decomp = MultiControlledX._decomposition_with_one_worker(
control_wires, target_wire, work_wire
)
flips2 = [
qml.PauliX(control_wires[i]) for i, val in enumerate(control_values) if val == "0"
]
return flips1 + decomp + flips2
@staticmethod
def _decomposition_with_many_workers(control_wires, target_wire, work_wires):
"""Decomposes the multi-controlled PauliX gate using the approach in Lemma 7.2 of
https://arxiv.org/abs/quant-ph/9503016, which requires a suitably large register of
work wires"""
num_work_wires_needed = len(control_wires) - 2
work_wires = work_wires[:num_work_wires_needed]
work_wires_reversed = list(reversed(work_wires))
control_wires_reversed = list(reversed(control_wires))
gates = []
for i in range(len(work_wires)):
ctrl1 = control_wires_reversed[i]
ctrl2 = work_wires_reversed[i]
t = target_wire if i == 0 else work_wires_reversed[i - 1]
gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))
gates.append(qml.Toffoli(wires=[*control_wires[:2], work_wires[0]]))
for i in reversed(range(len(work_wires))):
ctrl1 = control_wires_reversed[i]
ctrl2 = work_wires_reversed[i]
t = target_wire if i == 0 else work_wires_reversed[i - 1]
gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))
for i in range(len(work_wires) - 1):
ctrl1 = control_wires_reversed[i + 1]
ctrl2 = work_wires_reversed[i + 1]
t = work_wires_reversed[i]
gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))
gates.append(qml.Toffoli(wires=[*control_wires[:2], work_wires[0]]))
for i in reversed(range(len(work_wires) - 1)):
ctrl1 = control_wires_reversed[i + 1]
ctrl2 = work_wires_reversed[i + 1]
t = work_wires_reversed[i]
gates.append(qml.Toffoli(wires=[ctrl1, ctrl2, t]))
return gates
@staticmethod
def _decomposition_with_one_worker(control_wires, target_wire, work_wire):
"""Decomposes the multi-controlled PauliX gate using the approach in Lemma 7.3 of
https://arxiv.org/abs/quant-ph/9503016, which requires a single work wire"""
tot_wires = len(control_wires) + 2
partition = int(np.ceil(tot_wires / 2))
first_part = control_wires[:partition]
second_part = control_wires[partition:]
gates = [
MultiControlledX(
wires=first_part + work_wire,
work_wires=second_part + target_wire,
),
MultiControlledX(
wires=second_part + work_wire + target_wire,
work_wires=first_part,
),
MultiControlledX(
wires=first_part + work_wire,
work_wires=second_part + target_wire,
),
MultiControlledX(
wires=second_part + work_wire + target_wire,
work_wires=first_part,
),
]
return gates
class Barrier(Operation):
r"""Barrier(wires)
The Barrier operator, used to separate the compilation process into blocks or as a visual tool.
**Details:**
* Number of wires: AnyWires
* Number of parameters: 0
Args:
only_visual (bool): True if we do not want it to have an impact on the compilation process. Default is False.
wires (Sequence[int] or int): the wires the operation acts on
"""
num_params = 0
"""int: Number of trainable parameters that the operator depends on."""
num_wires = AnyWires
par_domain = None
def __init__(self, only_visual=False, wires=Wires([]), do_queue=True, id=None):
self.only_visual = only_visual
self.hyperparameters["only_visual"] = only_visual
super().__init__(wires=wires, do_queue=do_queue, id=id)
@staticmethod
def compute_decomposition(wires, only_visual=False): # pylint: disable=unused-argument
r"""Representation of the operator as a product of other operators (static method).
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.Barrier.decomposition`.
``Barrier`` decomposes into an empty list for all arguments.
Args:
wires (Iterable, Wires): wires that the operator acts on
only_visual (Bool): True if we do not want it to have an impact on the compilation process. Default is False.
Returns:
list: decomposition of the operator
**Example:**
>>> print(qml.Barrier.compute_decomposition(0))
[]
"""
return []
def label(self, decimals=None, base_label=None, cache=None):
return "||"
def _controlled(self, _):
return Barrier(wires=self.wires)
def adjoint(self, do_queue=True):
return Barrier(wires=self.wires, do_queue=do_queue)
def pow(self, z):
return [self.__copy__()]
class WireCut(Operation):
r"""WireCut(wires)
The wire cut operation, used to manually mark locations for wire cuts.
.. note::
This operation is designed for use as part of the circuit cutting workflow.
Check out the :func:`qml.cut_circuit() <pennylane.cut_circuit>` transform for more details.
**Details:**
* Number of wires: AnyWires
* Number of parameters: 0
Args:
wires (Sequence[int] or int): the wires the operation acts on
"""
num_params = 0
num_wires = AnyWires
grad_method = None
@staticmethod
def compute_decomposition(wires): # pylint: disable=unused-argument
r"""Representation of the operator as a product of other operators (static method).
Since this operator is a placeholder inside a circuit, it decomposes into an empty list.
Args:
wires (Any, Wires): Wire that the operator acts on.
Returns:
list[Operator]: decomposition of the operator
**Example:**
>>> print(qml.WireCut.compute_decomposition(0))
[]
"""
return []
def label(self, decimals=None, base_label=None, cache=None):
return "//"
def adjoint(self):
return WireCut(wires=self.wires)
def pow(self, z):
return [self.__copy__()]
| 29.675307 | 134 | 0.573694 |
a0899e2cbcbfe8ce3e7889761e06a2f46c756665 | 266 | py | Python | simforest/__init__.py | megaduks/SimilarityForest | f85665830e29bc97c954742ef5952bc6336df52e | [
"MIT"
] | 1 | 2019-05-27T00:32:01.000Z | 2019-05-27T00:32:01.000Z | simforest/__init__.py | megaduks/SimilarityForest | f85665830e29bc97c954742ef5952bc6336df52e | [
"MIT"
] | null | null | null | simforest/__init__.py | megaduks/SimilarityForest | f85665830e29bc97c954742ef5952bc6336df52e | [
"MIT"
] | 1 | 2019-05-27T12:50:58.000Z | 2019-05-27T12:50:58.000Z | """
Similarity Forest implementation based on
'Similarity Forests', Saket Sathe and Charu C. Aggarwal, KDD 2017 Research Paper
"""
from ._simforest import SimilarityForest
from ._simforest import AxesSampler
__all__ = (
'SimilarityForest',
'AxesSampler'
)
| 20.461538 | 80 | 0.759398 |
ef5a644444309719ffdb7d8764526f2bfa2b8ef2 | 2,598 | py | Python | resto_client/responses/feature_collection_response.py | CNES/resto_client | 7048bd79c739e33882ebd664790dcf0528e81aa4 | [
"Apache-2.0"
] | 6 | 2019-12-20T09:12:30.000Z | 2021-07-08T11:44:55.000Z | resto_client/responses/feature_collection_response.py | CNES/resto_client | 7048bd79c739e33882ebd664790dcf0528e81aa4 | [
"Apache-2.0"
] | null | null | null | resto_client/responses/feature_collection_response.py | CNES/resto_client | 7048bd79c739e33882ebd664790dcf0528e81aa4 | [
"Apache-2.0"
] | 1 | 2019-12-17T20:16:39.000Z | 2019-12-17T20:16:39.000Z | # -*- coding: utf-8 -*-
"""
.. admonition:: License
Copyright 2019 CNES
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied. See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List # @UnusedImport
from resto_client.base_exceptions import InconsistentResponse, InvalidResponse
from resto_client.entities.resto_feature_collection import RestoFeatureCollection
from .resto_json_response import RestoJsonResponseSimple
class FeatureCollectionResponse(RestoJsonResponseSimple):
"""
Response received from SearchCollectionRequest.
"""
needed_fields = ['type', 'features', 'properties']
optional_fields: List[str] = []
def identify_response(self) -> None:
"""
Verify that the response is a valid FeatureCollection geojson object.
:raises InconsistentResponse: if the dictionary does not contain a valid Resto response.
:raises InvalidResponse: if fields are not as expected.
"""
# Firstly verify that the needed fields are present
super(FeatureCollectionResponse, self).identify_response()
# Secondly verify geojson constraints on these fields, (not verified by geojson package)
if self._original_response['type'] != 'FeatureCollection':
msg = 'Waited a FeatureCollection geojson response. Received a {} response instead.'
raise InconsistentResponse(msg.format(self._original_response['type']))
if not isinstance(self._original_response['features'], list):
msg = 'features field in a FeatureCollection must be a list. Found a {} instead.'
raise InvalidResponse(msg.format((self._original_response['features'])))
if not isinstance(self._original_response['properties'], dict):
msg = 'properties field in a FeatureCollection must be a dict. Found a {} instead.'
raise InvalidResponse(msg.format((self._original_response['properties'])))
def as_resto_object(self) -> RestoFeatureCollection:
"""
:returns: the response expressed as a Resto object
"""
return RestoFeatureCollection(self._normalized_response)
| 43.3 | 100 | 0.718245 |
bcf241daf3b872300e2c3dacaf68a09648ad0184 | 107 | py | Python | 2483.py | barroslipe/urionlinejudge | a20d8199d9a92b30ea394a6c949967d2fc51aa34 | [
"MIT"
] | null | null | null | 2483.py | barroslipe/urionlinejudge | a20d8199d9a92b30ea394a6c949967d2fc51aa34 | [
"MIT"
] | null | null | null | 2483.py | barroslipe/urionlinejudge | a20d8199d9a92b30ea394a6c949967d2fc51aa34 | [
"MIT"
] | null | null | null | n = int(input())
print("Feliz nat", end = "")
for i in range(0, n):
print("a", end = "")
print("l!") | 13.375 | 28 | 0.495327 |
fc1d048ee9ee6103acbd646c67395f86ef46e312 | 7,050 | py | Python | talent/google/cloud/talent_v4beta1/gapic/transports/profile_service_grpc_transport.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 2 | 2021-11-26T07:08:43.000Z | 2022-03-07T20:20:04.000Z | talent/google/cloud/talent_v4beta1/gapic/transports/profile_service_grpc_transport.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 6 | 2019-05-27T22:05:58.000Z | 2019-08-05T16:46:16.000Z | talent/google/cloud/talent_v4beta1/gapic/transports/profile_service_grpc_transport.py | hugovk/google-cloud-python | b387134827dbc3be0e1b431201e0875798002fda | [
"Apache-2.0"
] | 1 | 2019-03-29T18:26:16.000Z | 2019-03-29T18:26:16.000Z | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import google.api_core.grpc_helpers
from google.cloud.talent_v4beta1.proto import profile_service_pb2_grpc
class ProfileServiceGrpcTransport(object):
"""gRPC transport class providing stubs for
google.cloud.talent.v4beta1 ProfileService API.
The transport provides access to the raw gRPC stubs,
which can be used to take advantage of advanced
features of gRPC.
"""
# The scopes needed to make gRPC calls to all of the methods defined
# in this service.
_OAUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/jobs",
)
def __init__(
self, channel=None, credentials=None, address="jobs.googleapis.com:443"
):
"""Instantiate the transport class.
Args:
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
address (str): The address where the service is hosted.
"""
# If both `channel` and `credentials` are specified, raise an
# exception (channels come with credentials baked in already).
if channel is not None and credentials is not None:
raise ValueError(
"The `channel` and `credentials` arguments are mutually " "exclusive."
)
# Create the channel.
if channel is None:
channel = self.create_channel(
address=address,
credentials=credentials,
options={
"grpc.max_send_message_length": -1,
"grpc.max_receive_message_length": -1,
}.items(),
)
self._channel = channel
# gRPC uses objects called "stubs" that are bound to the
# channel and provide a basic method for each RPC.
self._stubs = {
"profile_service_stub": profile_service_pb2_grpc.ProfileServiceStub(channel)
}
@classmethod
def create_channel(
cls, address="jobs.googleapis.com:443", credentials=None, **kwargs
):
"""Create and return a gRPC channel object.
Args:
address (str): The host for the channel to use.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
kwargs (dict): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
"""
return google.api_core.grpc_helpers.create_channel(
address, credentials=credentials, scopes=cls._OAUTH_SCOPES, **kwargs
)
@property
def channel(self):
"""The gRPC channel used by the transport.
Returns:
grpc.Channel: A gRPC channel object.
"""
return self._channel
@property
def list_profiles(self):
"""Return the gRPC stub for :meth:`ProfileServiceClient.list_profiles`.
Lists profiles by filter. The order is unspecified.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["profile_service_stub"].ListProfiles
@property
def create_profile(self):
"""Return the gRPC stub for :meth:`ProfileServiceClient.create_profile`.
Creates and returns a new profile.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["profile_service_stub"].CreateProfile
@property
def get_profile(self):
"""Return the gRPC stub for :meth:`ProfileServiceClient.get_profile`.
Gets the specified profile.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["profile_service_stub"].GetProfile
@property
def update_profile(self):
"""Return the gRPC stub for :meth:`ProfileServiceClient.update_profile`.
Updates the specified profile and returns the updated result.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["profile_service_stub"].UpdateProfile
@property
def delete_profile(self):
"""Return the gRPC stub for :meth:`ProfileServiceClient.delete_profile`.
Deletes the specified profile.
Prerequisite: The profile has no associated applications or assignments
associated.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["profile_service_stub"].DeleteProfile
@property
def search_profiles(self):
"""Return the gRPC stub for :meth:`ProfileServiceClient.search_profiles`.
Searches for profiles within a tenant.
For example, search by raw queries "software engineer in Mountain View"
or search by structured filters (location filter, education filter,
etc.).
See ``SearchProfilesRequest`` for more information.
Returns:
Callable: A callable which accepts the appropriate
deserialized request object and returns a
deserialized response object.
"""
return self._stubs["profile_service_stub"].SearchProfiles
| 35.606061 | 88 | 0.640993 |
df7c978c672b8a8c0091bf219a2a7d0cf481a230 | 7,922 | py | Python | parallel_wavegan/bin/preprocess.py | Escaton615/ParallelWaveGAN | e8217d43eebbc31277b713c1ff6a47558c7916fe | [
"MIT"
] | 4 | 2020-09-12T03:09:29.000Z | 2021-01-23T03:52:06.000Z | parallel_wavegan/bin/preprocess.py | arita37/ParallelWaveGAN | bb32b19f9ccb638de670f8b8d3a1dfed13ecf1c3 | [
"MIT"
] | null | null | null | parallel_wavegan/bin/preprocess.py | arita37/ParallelWaveGAN | bb32b19f9ccb638de670f8b8d3a1dfed13ecf1c3 | [
"MIT"
] | 2 | 2020-11-28T10:16:33.000Z | 2020-12-15T13:57:40.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Perform preprocessing and raw feature extraction."""
import argparse
import logging
import os
import librosa
import numpy as np
import soundfile as sf
import yaml
from tqdm import tqdm
from parallel_wavegan.datasets import AudioDataset
from parallel_wavegan.datasets import AudioSCPDataset
from parallel_wavegan.utils import write_hdf5
def logmelfilterbank(audio,
sampling_rate,
fft_size=1024,
hop_size=256,
win_length=None,
window="hann",
num_mels=80,
fmin=None,
fmax=None,
eps=1e-10,
):
"""Compute log-Mel filterbank feature.
Args:
audio (ndarray): Audio signal (T,).
sampling_rate (int): Sampling rate.
fft_size (int): FFT size.
hop_size (int): Hop size.
win_length (int): Window length. If set to None, it will be the same as fft_size.
window (str): Window function type.
num_mels (int): Number of mel basis.
fmin (int): Minimum frequency in mel basis calculation.
fmax (int): Maximum frequency in mel basis calculation.
eps (float): Epsilon value to avoid inf in log calculation.
Returns:
ndarray: Log Mel filterbank feature (#frames, num_mels).
"""
# get amplitude spectrogram
x_stft = librosa.stft(audio, n_fft=fft_size, hop_length=hop_size,
win_length=win_length, window=window, pad_mode="reflect")
spc = np.abs(x_stft).T # (#frames, #bins)
# get mel basis
fmin = 0 if fmin is None else fmin
fmax = sampling_rate / 2 if fmax is None else fmax
mel_basis = librosa.filters.mel(sampling_rate, fft_size, num_mels, fmin, fmax)
return np.log10(np.maximum(eps, np.dot(spc, mel_basis.T)))
def main():
"""Run preprocessing process."""
parser = argparse.ArgumentParser(
description="Preprocess audio and then extract features (See detail in parallel_wavegan/bin/preprocess.py).")
parser.add_argument("--wav-scp", "--scp", default=None, type=str,
help="kaldi-style wav.scp file. you need to specify either scp or rootdir.")
parser.add_argument("--segments", default=None, type=str,
help="kaldi-style segments file. if use, you must to specify both scp and segments.")
parser.add_argument("--rootdir", default=None, type=str,
help="directory including wav files. you need to specify either scp or rootdir.")
parser.add_argument("--dumpdir", type=str, required=True,
help="directory to dump feature files.")
parser.add_argument("--config", type=str, required=True,
help="yaml format configuration file.")
parser.add_argument("--verbose", type=int, default=1,
help="logging level. higher is more logging. (default=1)")
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.warning('Skip DEBUG/INFO messages')
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# check arguments
if (args.wav_scp is not None and args.rootdir is not None) or \
(args.wav_scp is None and args.rootdir is None):
raise ValueError("Please specify either --rootdir or --wav-scp.")
# get dataset
if args.rootdir is not None:
dataset = AudioDataset(
args.rootdir, "*.wav",
audio_load_fn=sf.read,
return_utt_id=True,
)
else:
dataset = AudioSCPDataset(
args.wav_scp,
segments=args.segments,
return_utt_id=True,
return_sampling_rate=True,
)
# check directly existence
if not os.path.exists(args.dumpdir):
os.makedirs(args.dumpdir, exist_ok=True)
# process each data
for utt_id, (audio, fs) in tqdm(dataset):
# check
assert len(audio.shape) == 1, \
f"{utt_id} seems to be multi-channel signal."
assert np.abs(audio).max() <= 1.0, \
f"{utt_id} seems to be different from 16 bit PCM."
assert fs == config["sampling_rate"], \
f"{utt_id} seems to have a different sampling rate."
# trim silence
if config["trim_silence"]:
audio, _ = librosa.effects.trim(audio,
top_db=config["trim_threshold_in_db"],
frame_length=config["trim_frame_size"],
hop_length=config["trim_hop_size"])
if "sampling_rate_for_feats" not in config:
x = audio
sampling_rate = config["sampling_rate"]
hop_size = config["hop_size"]
else:
# NOTE(kan-bayashi): this procedure enables to train the model with different
# sampling rate for feature and audio, e.g., training with mel extracted
# using 16 kHz audio and 24 kHz audio as a target waveform
x = librosa.resample(audio, fs, config["sampling_rate_for_feats"])
sampling_rate = config["sampling_rate_for_feats"]
assert config["hop_size"] * config["sampling_rate_for_feats"] % fs == 0, \
"hop_size must be int value. please check sampling_rate_for_feats is correct."
hop_size = config["hop_size"] * config["sampling_rate_for_feats"] // fs
# extract feature
mel = logmelfilterbank(x,
sampling_rate=sampling_rate,
hop_size=hop_size,
fft_size=config["fft_size"],
win_length=config["win_length"],
window=config["window"],
num_mels=config["num_mels"],
fmin=config["fmin"],
fmax=config["fmax"])
# make sure the audio length and feature length are matched
audio = np.pad(audio, (0, config["fft_size"]), mode="reflect")
audio = audio[:len(mel) * config["hop_size"]]
assert len(mel) * config["hop_size"] == len(audio)
# apply global gain
if config["global_gain_scale"] > 0.0:
audio *= config["global_gain_scale"]
if np.abs(audio).max() >= 1.0:
logging.warn(f"{utt_id} causes clipping. "
f"it is better to re-consider global gain scale.")
continue
# save
if config["format"] == "hdf5":
write_hdf5(os.path.join(args.dumpdir, f"{utt_id}.h5"), "wave", audio.astype(np.float32))
write_hdf5(os.path.join(args.dumpdir, f"{utt_id}.h5"), "feats", mel.astype(np.float32))
elif config["format"] == "npy":
np.save(os.path.join(args.dumpdir, f"{utt_id}-wave.npy"),
audio.astype(np.float32), allow_pickle=False)
np.save(os.path.join(args.dumpdir, f"{utt_id}-feats.npy"),
mel.astype(np.float32), allow_pickle=False)
else:
raise ValueError("support only hdf5 or npy format.")
if __name__ == "__main__":
main()
| 40.418367 | 117 | 0.581293 |
0e9935c81b33d3fa5ac2e3a8eb3a8f21923a5633 | 1,437 | py | Python | load_loss.py | liminghao0914/involution_GAN | f049f76a49fc99d56efac4519f0b87931c2dcdcd | [
"MIT"
] | 3 | 2021-05-18T05:46:10.000Z | 2021-05-26T07:37:23.000Z | load_loss.py | liminghao0914/involution_GAN | f049f76a49fc99d56efac4519f0b87931c2dcdcd | [
"MIT"
] | null | null | null | load_loss.py | liminghao0914/involution_GAN | f049f76a49fc99d56efac4519f0b87931c2dcdcd | [
"MIT"
] | 1 | 2021-05-17T14:31:03.000Z | 2021-05-17T14:31:03.000Z | import os
import json
import matplotlib.pyplot as plt
import numpy as np
subpath = 'celeb_igan_3.0'
file = os.path.join("..", "Baseline GAN", "logs", subpath, 'lossv1.log')
with open(file, 'r') as f:
s = f.readline()
s = s.strip()
log = json.loads(s)
# print(len(log['lossG']))
# print(len(log['lossD']))
x_g = np.arange(1, len(log['lossg'][:5000]) * 10, 10)
x_d = np.arange(1, len(log['lossd'][:5000]) * 10, 10)
x_gp = np.arange(1, len(log['gp'][:5000]) * 10, 10)
# lossg, = plt.plot(x_g, np.array(log['lossg']), color="red")
# lossd, = plt.plot(x_d, np.array(log['lossd']), color="blue")
# gp, = plt.plot(x_gp, np.array(log['gp']), color="green")
plt.axes(yscale="log")
loss = plt.plot(x_d, -np.array(log['lossd'][:5000]) - np.array(log['gp'][:5000]))
# lossg,=plt.plot(np.array(log['lossg']), color="red")
# lossd,=plt.plot(np.array(log['lossd']), color="blue")
# gp,=plt.plot(np.array(log['gp']), color="yellow")
# for i in range(30, 1000):
# if log['lossD'][i] > -5:
# log['lossD'][i] = 0.6*log['lossD'][i]+0.4*log['lossD'][i-1]
plt.xlabel('Training Steps')
plt.ylabel('EMD Loss')
# plt.legend(['loss G, loss D, penalty'])
# plt.legend(labels=['lossg', 'lossd', 'gp'])
# plt.ylim([-200000, -10])
# plt.plot(np.array(log['lossD'])[150:], color='r')
# plt.plot(log['lossG'], color='g')
# plt.yscale('symlog')
pic = os.path.join("..", "Baseline GAN", "logs", subpath, subpath + '.png')
plt.savefig(pic)
plt.show()
| 33.418605 | 81 | 0.607516 |
07f289f83e328437223c1d883900e2dae6167e46 | 6,650 | py | Python | continual_rl/policies/progress_and_compress/progress_and_compress_monobeast.py | AGI-Labs/continual_rl | bcf17d879e8a983340be233ff8f740c424d0f303 | [
"MIT"
] | 19 | 2021-07-27T05:20:09.000Z | 2022-02-27T07:12:05.000Z | continual_rl/policies/progress_and_compress/progress_and_compress_monobeast.py | AGI-Labs/continual_rl | bcf17d879e8a983340be233ff8f740c424d0f303 | [
"MIT"
] | 2 | 2021-11-05T07:36:50.000Z | 2022-03-11T00:21:50.000Z | continual_rl/policies/progress_and_compress/progress_and_compress_monobeast.py | AGI-Labs/continual_rl | bcf17d879e8a983340be233ff8f740c424d0f303 | [
"MIT"
] | 3 | 2021-10-20T06:04:35.000Z | 2022-03-06T22:59:36.000Z | import torch
import threading
import os
import json
from torch.nn import functional as F
from continual_rl.policies.ewc.ewc_monobeast import EWCMonobeast
class ProgressAndCompressMonobeast(EWCMonobeast):
"""
Progress and Compress leverages Online EWC (implemented in EWCMonobeast). We just modify it such that
the knowledge base is what is updated using the EWC loss.
"""
def __init__(self, model_flags, observation_space, action_spaces, policy_class):
super().__init__(model_flags, observation_space, action_spaces, policy_class)
self._train_steps_since_boundary = 0
self._previous_pnc_task_id = None # Distinct from ewc's _prev_task_id
self._step_count_lock = threading.Lock()
def save(self, output_path):
super().save(output_path)
pnc_metadata_path = os.path.join(output_path, "pnc_metadata.json")
metadata = {"prev_pnc_task_id": self._previous_pnc_task_id,
"train_steps_since_boundary": self._train_steps_since_boundary}
with open(pnc_metadata_path, "w+") as pnc_file:
json.dump(metadata, pnc_file)
def load(self, output_path):
super().load(output_path)
pnc_metadata_path = os.path.join(output_path, "pnc_metadata.json")
if os.path.exists(pnc_metadata_path):
self.logger.info(f"Loading pnc metdata from {pnc_metadata_path}")
with open(pnc_metadata_path, "r") as pnc_file:
metadata = json.load(pnc_file)
self._previous_pnc_task_id = metadata["prev_pnc_task_id"]
self._train_steps_since_boundary = metadata["train_steps_since_boundary"]
def _compute_kl_div_loss(self, input, target):
# KLDiv requires inputs to be log-probs, and targets to be probs
old_policy = F.log_softmax(input, dim=-1)
curr_log_policy = F.softmax(target, dim=-1)
kl_loss = torch.nn.KLDivLoss(reduction='sum')(old_policy, curr_log_policy.detach())
return kl_loss
def knowledge_base_loss(self, task_flags, model, initial_agent_state):
ewc_loss, ewc_stats = super().custom_loss(task_flags, model.knowledge_base, initial_agent_state)
# Additionally, minimize KL divergence between KB and active column (only updating KB)
replay_buffer_subset = self._sample_from_task_replay_buffer(task_flags.task_id, self._model_flags.batch_size)
with torch.no_grad():
targets, _ = model(replay_buffer_subset, task_flags.action_space_id)
knowledge_base_outputs, _ = model.knowledge_base(replay_buffer_subset, task_flags.action_space_id)
kl_div_loss = self._compute_kl_div_loss(input=knowledge_base_outputs['policy_logits'],
target=targets['policy_logits'].detach())
total_loss = ewc_loss + self._model_flags.kl_div_scale * kl_div_loss
ewc_stats.update({"kl_div_loss": kl_div_loss.item()})
return total_loss, ewc_stats
def compute_loss(self, model_flags, task_flags, learner_model, batch, initial_agent_state, with_custom_loss=True):
"""
Sometimes we want to turn off normal loss computation entirely, so controlling that here.
During the "wake" part of the cycle, we use the normal compute_loss to update the active column (AC).
During "sleep" we use EWC+KL to update the knowledge base (KB).
The P&C paper does not report on cadence/length of the phases, but after discussion with an author,
we're assuming sleep starts after num_train_steps_of_progress number of training steps, and lasts the rest of
the task. (In the paper num_train_steps_of_progress is apparently half of the total steps of the task, and
only the compress datapoints are plotted in Fig 4.)
We are assuming it is happening alongside continued data collection because the paper references "rewards
collected during the compress phase".
"""
# Because we're not going through the normal EWC path
# self._prev_task_id doesn't get initialized early enough, so force it here
if self._prev_task_id is None:
super().custom_loss(task_flags, learner_model.knowledge_base, initial_agent_state)
# Only kick off KB training after we switch to a new task, not including the first one. This is
# being used as boundary detection.
with self._step_count_lock:
current_task_id = task_flags.task_id
self.logger.info(f"Prev id: {self._previous_pnc_task_id}, current id: {current_task_id}, steps since boundary: {self._train_steps_since_boundary}")
if self._previous_pnc_task_id is not None and current_task_id != self._previous_pnc_task_id:
self.logger.info("Boundary detected, resetting active column and starting Progress.")
self._train_steps_since_boundary = 0
# We have entered a new task. Since the model passed in is the learner model, just reset it.
# The active column will be updated after this.
learner_model.reset_active_column()
self._previous_pnc_task_id = current_task_id
if self._train_steps_since_boundary <= self._model_flags.num_train_steps_of_progress:
if self._model_flags.use_collection_pause:
super().set_pause_collection_state(False) # Active column training should result in EWC data collection
# This is the "active column" training setting. The custom loss here would be EWC, so don't include it
loss, stats, pg_loss, baseline_loss = super().compute_loss(model_flags, task_flags, learner_model, batch, initial_agent_state, with_custom_loss=False)
else:
self.logger.info("Compressing...")
if self._model_flags.use_collection_pause:
super().set_pause_collection_state(True) # Don't collect data while compressing
# This is the "knowledge base" training setting
loss, stats = self.knowledge_base_loss(task_flags, learner_model, initial_agent_state)
pg_loss = 0 # No policy gradient when updating the knowledge base
baseline_loss = 0
# Monobeast expects these keys. Since we're bypassing the normal loss, add them into stats just as fakes (0)
extra_keys = ["pg_loss", "baseline_loss", "entropy_loss"]
for key in extra_keys:
assert key not in stats
stats[key] = 0
with self._step_count_lock:
self._train_steps_since_boundary += 1
return loss, stats, pg_loss, baseline_loss
| 54.065041 | 162 | 0.697895 |
27099dca1297f24c5540128df592b4ff07555b99 | 679 | py | Python | python-module/numpy_test_cached.py | pospielov/Linear-Algebra-in-Python-and-Java-Research | 833f22b510c65ec5059543e093e140b033173bda | [
"Apache-2.0"
] | 1 | 2020-09-08T07:45:07.000Z | 2020-09-08T07:45:07.000Z | python-module/numpy_test_cached.py | pospielov/Linear-Algebra-in-Python-and-Java-Research | 833f22b510c65ec5059543e093e140b033173bda | [
"Apache-2.0"
] | null | null | null | python-module/numpy_test_cached.py | pospielov/Linear-Algebra-in-Python-and-Java-Research | 833f22b510c65ec5059543e093e140b033173bda | [
"Apache-2.0"
] | null | null | null | import numpy as np
import timeit
embeddings = np.genfromtxt("embeddings.txt", delimiter=',')
embeddings2 = embeddings[1:10001]
embeddings2 = embeddings2/np.linalg.norm(embeddings2, axis=1, keepdims=True)
def test():
embeddings1 = embeddings[0:1]
embeddings1 = embeddings1/np.linalg.norm(embeddings1, axis=1, keepdims=True)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
# print(np.argmax(dist));
return np.max(dist)
# print(test())
print("started")
print(timeit.repeat("test()", setup="from __main__ import test; gc.enable();", number=1000, repeat=3))
# [16.546649905999402, 16.559010640999986, 16.606338937000146]
| 28.291667 | 103 | 0.717231 |
6a46041ac89892aa4979c423675f204c584f4bfe | 1,167 | py | Python | ansiblemetrics/playbook/avg_play_size.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | 1 | 2020-04-24T16:09:14.000Z | 2020-04-24T16:09:14.000Z | ansiblemetrics/playbook/avg_play_size.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | null | null | null | ansiblemetrics/playbook/avg_play_size.py | radon-h2020/AnsibleMetrics | 8a8e27d9b54fc1578d00526c8663184a2e686cb2 | [
"Apache-2.0"
] | null | null | null | import yaml
from ansiblemetrics.ansible_metric import AnsibleMetric
from ansiblemetrics.general.lines_code import LinesCode
from ansiblemetrics.playbook.num_plays import NumPlays
class AvgPlaySize(AnsibleMetric):
""" This class measures a play's average number of lines of code."""
def count(self):
"""Return the average play size.
Example
-------
.. highlight:: python
.. code-block:: python
from ansiblemetrics.general.avg_play_size import AvgPlaySize
playbook = '''
---
# 1st play
- hosts: all
roles:
- common
# 2nd play
- hosts: monitoring
roles:
- base-apache
- nagios
'''
AvgPlaySize(playbook).count()
>> 4
Returns
-------
int
Average play size, rounded to the nearest unit
"""
plain_yaml = yaml.dump(self.playbook)
loc = LinesCode(plain_yaml).count()
plays = NumPlays(plain_yaml).count()
return round(loc / plays) if plays > 0 else 0
| 23.816327 | 72 | 0.542416 |
9e7d4dc0d8220552449e489669451f9947bcfff6 | 3,828 | py | Python | example/image-classification/symbols/alexnet_fp16.py | axbaretto/mxnet | 5f593885356ff6d14f5519fa18e79b944beb51cd | [
"Apache-2.0"
] | 9 | 2017-07-13T03:12:24.000Z | 2021-11-10T16:15:27.000Z | example/image-classification/symbols/alexnet_fp16.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 3 | 2017-07-10T21:49:18.000Z | 2017-07-12T22:40:06.000Z | example/image-classification/symbols/alexnet_fp16.py | yanghaojin/BMXNet | 102f8d0ed59529bbd162c37bf07ae58ad6c4caa1 | [
"Apache-2.0"
] | 11 | 2018-02-27T15:32:09.000Z | 2021-04-21T08:48:17.000Z | """
Reference:
Krizhevsky, Alex, Ilya Sutskever, and Geoffrey E. Hinton. "Imagenet classification with deep convolutional neural networks." Advances in neural information processing systems. 2012.
"""
import mxnet as mx
import numpy as np
def get_symbol(num_classes, **kwargs):
input_data = mx.symbol.Variable(name="data")
input_data = mx.symbol.Cast(data=input_data, dtype=np.float16)
# stage 1
weight = mx.symbol.Variable(name='conv1_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='conv1_bias', dtype=np.float16)
conv1 = mx.symbol.Convolution(name='conv1',
data=input_data, weight=weight, bias=bias, kernel=(11, 11), stride=(4, 4), num_filter=96)
relu1 = mx.symbol.Activation(data=conv1, act_type="relu")
lrn1 = mx.symbol.LRN(data=relu1, alpha=0.0001, beta=0.75, knorm=2, nsize=5)
pool1 = mx.symbol.Pooling(
data=lrn1, pool_type="max", kernel=(3, 3), stride=(2,2))
# stage 2
weight = mx.symbol.Variable(name='conv2_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='conv2_bias', dtype=np.float16)
conv2 = mx.symbol.Convolution(name='conv2',
data=pool1, weight=weight, bias=bias, kernel=(5, 5), pad=(2, 2), num_filter=256)
relu2 = mx.symbol.Activation(data=conv2, act_type="relu")
lrn2 = mx.symbol.LRN(data=relu2, alpha=0.0001, beta=0.75, knorm=2, nsize=5)
pool2 = mx.symbol.Pooling(data=lrn2, kernel=(3, 3), stride=(2, 2), pool_type="max")
# stage 3
weight = mx.symbol.Variable(name='conv3_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='conv3_bias', dtype=np.float16)
conv3 = mx.symbol.Convolution(name='conv3',
data=pool2, weight=weight, bias=bias, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu3 = mx.symbol.Activation(data=conv3, act_type="relu")
weight = mx.symbol.Variable(name='conv4_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='conv4_bias', dtype=np.float16)
conv4 = mx.symbol.Convolution(name='conv4',
data=relu3, weight=weight, bias=bias, kernel=(3, 3), pad=(1, 1), num_filter=384)
relu4 = mx.symbol.Activation(data=conv4, act_type="relu")
weight = mx.symbol.Variable(name='conv5_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='conv5_bias', dtype=np.float16)
conv5 = mx.symbol.Convolution(name='conv5',
data=relu4, weight=weight, bias=bias, kernel=(3, 3), pad=(1, 1), num_filter=256)
relu5 = mx.symbol.Activation(data=conv5, act_type="relu")
pool3 = mx.symbol.Pooling(data=relu5, kernel=(3, 3), stride=(2, 2), pool_type="max")
# stage 4
flatten = mx.symbol.Flatten(data=pool3)
weight = mx.symbol.Variable(name='fc1_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='fc1_bias', dtype=np.float16)
fc1 = mx.symbol.FullyConnected(name='fc1', data=flatten, weight=weight, bias=bias,
num_hidden=4096)
relu6 = mx.symbol.Activation(data=fc1, act_type="relu")
dropout1 = mx.symbol.Dropout(data=relu6, p=0.5)
# stage 5
weight = mx.symbol.Variable(name='fc2_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='fc2_bias', dtype=np.float16)
fc2 = mx.symbol.FullyConnected(name='fc2', data=dropout1, weight=weight, bias=bias,
num_hidden=4096)
relu7 = mx.symbol.Activation(data=fc2, act_type="relu")
dropout2 = mx.symbol.Dropout(data=relu7, p=0.5)
# stage 6
weight = mx.symbol.Variable(name='fc3_weight', dtype=np.float16)
bias = mx.symbol.Variable(name='fc3_bias', dtype=np.float16)
fc3 = mx.symbol.FullyConnected(name='fc3', data=dropout2, weight=weight, bias=bias,
num_hidden=num_classes)
label = mx.symbol.Variable(name='softmax_label')
label = mx.symbol.Cast(data=label, dtype=np.float16)
softmax = mx.symbol.SoftmaxOutput(data=fc3, name='softmax', label=label)
return softmax
| 54.685714 | 181 | 0.690439 |
11387a0d4b2042a952f8e7b3a8f77fbd5f99e49b | 3,096 | py | Python | src/robot/running/handlerstore.py | Kompakti/robotframework | 3ac75d5212f544018ef1cc99a8b68c222715df5f | [
"ECL-2.0",
"Apache-2.0"
] | 11 | 2017-09-30T05:47:28.000Z | 2019-04-15T11:58:40.000Z | src/robot/running/handlerstore.py | Kompakti/robotframework | 3ac75d5212f544018ef1cc99a8b68c222715df5f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/robot/running/handlerstore.py | Kompakti/robotframework | 3ac75d5212f544018ef1cc99a8b68c222715df5f | [
"ECL-2.0",
"Apache-2.0"
] | 7 | 2018-02-13T10:22:39.000Z | 2019-07-04T07:39:28.000Z | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from operator import attrgetter
from robot.errors import DataError
from robot.utils import NormalizedDict
from .usererrorhandler import UserErrorHandler
class HandlerStore(object):
TEST_LIBRARY_TYPE = 'Test library'
TEST_CASE_FILE_TYPE = 'Test case file'
RESOURCE_FILE_TYPE = 'Resource file'
def __init__(self, source, source_type):
self.source = source
self.source_type = source_type
self._normal = NormalizedDict(ignore='_')
self._embedded = []
def add(self, handler, embedded=False):
if embedded:
self._embedded.append(handler)
elif handler.name not in self._normal:
self._normal[handler.name] = handler
else:
error = 'Keyword with same name defined multiple times.'
self._normal[handler.name] = UserErrorHandler(handler.name, error,
handler.libname)
raise DataError(error)
def __iter__(self):
handlers = list(self._normal.values()) + self._embedded
return iter(sorted(handlers, key=attrgetter('name')))
def __len__(self):
return len(self._normal) + len(self._embedded)
def __contains__(self, name):
if name in self._normal:
return True
return any(template.matches(name) for template in self._embedded)
def create_runner(self, name):
return self[name].create_runner(name)
def __getitem__(self, name):
try:
return self._normal[name]
except KeyError:
return self._find_embedded(name)
def _find_embedded(self, name):
embedded = [template for template in self._embedded
if template.matches(name)]
if len(embedded) == 1:
return embedded[0]
self._raise_no_single_match(name, embedded)
def _raise_no_single_match(self, name, found):
if self.source_type == self.TEST_CASE_FILE_TYPE:
source = self.source_type
else:
source = "%s '%s'" % (self.source_type, self.source)
if not found:
raise DataError("%s contains no keywords matching name '%s'."
% (source, name))
error = ["%s contains multiple keywords matching name '%s':"
% (source, name)]
names = sorted(handler.name for handler in found)
raise DataError('\n '.join(error + names))
| 36 | 78 | 0.640827 |
1e97d79f284b33e22c02aa1aec4412a46c8c5e96 | 7,058 | py | Python | bot.py | bcbwilla/bcbbot | 4dbfd011aed270464391c33129a46f4c362999d8 | [
"MIT"
] | 1 | 2016-03-03T17:37:27.000Z | 2016-03-03T17:37:27.000Z | bot.py | bcbwilla/bcbbot | 4dbfd011aed270464391c33129a46f4c362999d8 | [
"MIT"
] | null | null | null | bot.py | bcbwilla/bcbbot | 4dbfd011aed270464391c33129a46f4c362999d8 | [
"MIT"
] | null | null | null | import sys
import time
import socket
import inspect
import logging
import itertools
import yaml
import requests
import commands
class IRC(object):
""" Wrapper class providing access to some standard IRC commands """
def __init__(self):
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def connect(self, host, port):
self._socket.connect((host, port))
def password(self, password):
self._socket.send('PASS ' + password + '\n')
def nick(self, nick):
self._socket.send('NICK ' + nick + '\n')
def user(self, un):
self._socket.send('USER %s %s %s :%s\n' % (un, un, un, un))
def join(self, channel):
self._socket.send('JOIN #' + channel + '\n')
def pong(self, msg):
self._socket.send('PONG %s\r\n' % msg)
def recv(self, recv_size=1024):
return self._socket.recv(recv_size)
def privmsg(self, target, message):
self._socket.send('PRIVMSG #%s :%s\n' % (target, message))
def close(self):
self._socket.close()
class BcbBot(object):
""" BcbBot """
def __init__(self, username, password, channel, port=6667, host='irc.twitch.tv'):
"""
:param username: bot username
:param password: twitch api password
:param channel: channel the bot listens to
:param port: irc port
:param host: irc address
"""
self.username = username
self.password = password
self.channel = channel
self.host = host
self.port = port
# recent commands
self.command_history = []
# command objects
self.commands = {}
# direct irc connection to chat
self.irc = IRC()
self.load_commands()
# users in chat
self.chatters = None
self.chatter_count = None
self.update_chatters()
self.last_poll_time = None
def run(self):
""" main run method """
logging.info('Starting.')
self.connect()
try:
self.listen()
except KeyboardInterrupt:
logging.info('Terminating bot.')
finally:
self.disconnect()
def connect(self):
""" handles logging into Twitch irc chat """
logging.info('Connecting to %s' % self.channel)
self.irc.connect(self.host, self.port)
self.irc.password(self.password)
self.irc.nick(self.username)
self.irc.user(self.username)
self.irc.join(self.channel)
logging.info('Connected to %s' % self.channel)
def disconnect(self):
""" disconnects from Twitch irc chat """
self.irc.close()
def listen(self):
""" listen to Twitch chat
based on http://archive.oreilly.com/pub/h/1968
"""
logging.info('Listening to %s.' % self.channel)
readbuffer = ''
while True:
readbuffer += self.irc.recv()
temp = readbuffer.split("\n")
readbuffer = temp.pop()
for line in temp:
line = line.rstrip().split()
if line[0] == "PING":
self.irc.pong(line[1])
else:
self.process_data(line)
self.poll()
def process_data(self, line):
""" process data line and execute command if needed
:param line: irc chat line
"""
if len(line) <= 3:
return
sender = line[0].split('!')[0][1:]
text = ' '.join(line[3:])[1:]
# handle command
if text.startswith('!'):
logging.info('<%s> %s' % (sender, text))
s = text.split()
command = s[0][1:]
arg = ' '.join(s[1:])
self.process_command(sender, command, arg)
def process_command(self, sender, command, argument):
""" process command
:param sender: username of command sender
:param command: command name
:param argument: command arguments
"""
logging.info('Trying to handle command %s with %s sent by %s' % (command, argument, sender))
if self.can_command():
try:
self.commands[command.lower()].execute(sender, argument)
self.command_history.append((command, time.time()))
except Exception as e:
logging.warning('Unable to handle command %s: %s' % (command, str(e)))
else:
logging.warning('Too many commands, unable to process %s.' % command)
# clean up command history to keep only commands in last 30 seconds
self.command_history = list(itertools.dropwhile(lambda x: time.time()-x[1] > 30, self.command_history))
def chat(self, msg):
""" send message to channel
:param msg: message to send to chat
"""
self.irc.privmsg(self.channel, msg)
def poll(self):
""" update self data and execute periodic commands """
if not self.last_poll_time:
self.last_poll_time = time.time()
# update viewer list
elapsed_time = time.time() - self.last_poll_time
if elapsed_time > 10:
self.update_chatters()
self.last_poll_time = time.time()
# execute periodic commands
for command in self.commands.values():
if hasattr(command, 'periodic') and command.periodic:
command.execute(None, None)
def can_command(self):
""" make sure bot doesn't do too many things too quickly """
if len(self.command_history) > 0:
now = time.time()
return (len(self.command_history)/float((now-self.command_history[0][1]))) < 0.66
else:
return True
def update_chatters(self):
""" get users in channel """
r = requests.get('http://tmi.twitch.tv/group/user/%s/chatters' % self.channel)
if r.ok:
j = r.json()
self.chatter_count = j['chatter_count']
self.chatters = j['chatters']
def load_commands(self):
""" instantiate a command object for each command in the commands directory """
# load commands from command classes in commands directory
class_members = inspect.getmembers(sys.modules['commands'], inspect.isclass)
for m in class_members:
if issubclass(m[1], commands.CommandBase):
name = m[0].lower()
self.commands[name] = m[1](self)
logging.info('Loaded commands %s' % str(self.commands.keys()))
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Please provide a configuration file name!')
else:
try:
with open(str(sys.argv[1])) as fh:
config = yaml.load(fh)
except IOError:
print('Config file not found!')
sys.exit(0)
logging.basicConfig(filename='bcbbot.log', level=logging.DEBUG)
d = BcbBot(config['username'], config['password'], config['channel'])
d.run()
| 30.820961 | 111 | 0.566591 |
f9fe33162aac3ca7bbce505d2fb044dceb549f0a | 2,584 | py | Python | Packs/CommonWidgets/Scripts/RSSWidget/RSSWidget_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 799 | 2016-08-02T06:43:14.000Z | 2022-03-31T11:10:11.000Z | Packs/CommonWidgets/Scripts/RSSWidget/RSSWidget_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 9,317 | 2016-08-07T19:00:51.000Z | 2022-03-31T21:56:04.000Z | Packs/CommonWidgets/Scripts/RSSWidget/RSSWidget_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | 1,297 | 2016-08-04T13:59:00.000Z | 2022-03-31T23:43:06.000Z | import sys
import pytest
from test_data.test_variables import NO_ARTICLE, NO_ARTICLE_RES, ONE_ARTICLE, ONE_ARTICLE_RES, ONE_ARTICLE_STRING, \
ONE_ARTICLE_STRING_FORMATTED, TWO_ARTICLES, TWO_ARTICLES_RES, TWO_ARTICLES_STRING,\
ONE_ARTICLE_NOT_PUBLISHED, ONE_ARTICLE_NOT_PUBLISHED_RES, ONE_ARTICLE_HTML, ONE_ARTICLE_HTML_RES
from RSSWidget import collect_entries_data_from_response, create_widget_content, main
import demistomock as demisto
@pytest.mark.parametrize('parsed_response, limit, expected_result', [
(NO_ARTICLE, sys.maxsize, NO_ARTICLE_RES),
(ONE_ARTICLE_HTML, True, ONE_ARTICLE_HTML_RES),
(ONE_ARTICLE, sys.maxsize, ONE_ARTICLE_RES),
(TWO_ARTICLES, sys.maxsize, TWO_ARTICLES_RES),
(ONE_ARTICLE_NOT_PUBLISHED, sys.maxsize, ONE_ARTICLE_NOT_PUBLISHED_RES),
(TWO_ARTICLES, 1, ONE_ARTICLE_RES),
])
def test_collect_entries_data_from_response(parsed_response, limit, expected_result):
"""
Given: Parsed response from feed.
When: Collecting relevant data from entries.
Then: Verify the collected data.
"""
result = collect_entries_data_from_response(parsed_response, limit=limit)
assert len(result) == len(expected_result)
for entry in expected_result:
assert entry in result
@pytest.mark.parametrize('data, is_version_ge_65, text_output', [
(NO_ARTICLE_RES, False, '## No entries were found.'),
(ONE_ARTICLE_RES, False, ONE_ARTICLE_STRING),
(ONE_ARTICLE_RES, True, ONE_ARTICLE_STRING_FORMATTED),
(TWO_ARTICLES_RES, False, TWO_ARTICLES_STRING)
])
def test_create_widget_content(mocker, data, is_version_ge_65, text_output):
"""
Given: Data about entries to show.
When: Creating the markdown output for the widget.
Then: Verify the markdown string.
"""
import RSSWidget as rssw
mocker.patch.object(rssw, 'is_demisto_version_ge', return_value=is_version_ge_65)
res = create_widget_content(data)
assert res == text_output
@pytest.mark.parametrize('limit, exepcted_result', [
('', TWO_ARTICLES_STRING),
('1', ONE_ARTICLE_STRING),
]
)
def test_full_flow(mocker, requests_mock, limit, exepcted_result):
import RSSWidget as rssw
requests_mock.get('https://test.com')
mocker.patch.object(rssw, 'parse_feed_data', return_value=TWO_ARTICLES)
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'args', return_value={'url': 'https://test.com', 'limit': limit})
main()
res = demisto.results.call_args[0][0]
assert res['ContentsFormat'] == 'markdown'
assert res['Contents'] == exepcted_result
| 35.888889 | 116 | 0.752709 |
180f44189af5b0bf43aacf4507d963b7d9072451 | 5,232 | py | Python | data_processing/obs/calc_durack_ocean_maps.py | DamienIrving/ocean-analysis | 23a6dbf616fb84e6e158e32534ffd394e0df2e3e | [
"MIT"
] | 7 | 2017-06-06T20:20:58.000Z | 2020-02-05T23:28:41.000Z | data_processing/obs/calc_durack_ocean_maps.py | DamienIrving/ocean-analysis | 23a6dbf616fb84e6e158e32534ffd394e0df2e3e | [
"MIT"
] | 17 | 2017-04-06T04:46:37.000Z | 2021-07-01T00:47:50.000Z | data_processing/obs/calc_durack_ocean_maps.py | DamienIrving/ocean-analysis | 23a6dbf616fb84e6e158e32534ffd394e0df2e3e | [
"MIT"
] | 4 | 2021-01-19T01:31:40.000Z | 2022-03-15T00:50:11.000Z | """
Filename: calc_durack_ocean_maps.py
Author: Damien Irving, irving.damien@gmail.com
Description: Calculate the zonal and vertical mean ocean anomaly fields
from the Durack and Wijffels (2010) data files
"""
# Import general Python modules
import sys, os, pdb
import argparse, math
import numpy
import iris
iris.FUTURE.netcdf_no_unlimited = True
# Import my modules
cwd = os.getcwd()
repo_dir = '/'
for directory in cwd.split('/')[1:]:
repo_dir = os.path.join(repo_dir, directory)
if directory == 'ocean-analysis':
break
modules_dir = os.path.join(repo_dir, 'modules')
sys.path.append(modules_dir)
try:
import general_io as gio
import convenient_universal as uconv
import calc_ocean_maps
except ImportError:
raise ImportError('Must run this script from anywhere within the ocean-analysis git repo')
def fix_cube(cube, data_type):
"""Fixes for initial loading of cube"""
cube = iris.util.squeeze(cube)
cube.coord('sea_water_pressure').units = 'dbar'
cube.coord('sea_water_pressure').standard_name = 'depth'
assert data_type in ['trend', 'climatology']
if data_type == 'trend':
cube.data = cube.data / 50.
cube.units = 'K/yr'
return cube
def main(inargs):
"""Run the program."""
variables = ['potential_temperature', 'practical_salinity']
# Read data
change_cube = {}
climatology_cube = {}
for variable in variables:
change_cube[variable] = iris.load_cube(inargs.infile, 'change_over_time_in_sea_water_'+variable)
change_cube[variable] = fix_cube(change_cube[variable], 'trend')
climatology_cube[variable] = iris.load_cube(inargs.infile, 'sea_water_'+variable)
climatology_cube[variable] = fix_cube(climatology_cube[variable], 'climatology')
basin_array_default = calc_ocean_maps.create_basin_array(change_cube[variable])
coord_names = [coord.name() for coord in change_cube[variable].dim_coords]
atts = change_cube[variable].attributes
atts['history'] = gio.write_metadata(file_info={inargs.infile: atts['history']})
atts['model_id'] = 'Durack and Wijffels'
# Calculate maps
for variable in variables:
if variable == 'potential_temperature':
standard_name = 'sea_water_potential_temperature'
var_name = 'thetao'
elif variable == 'practical_salinity':
standard_name = 'sea_water_salinity'
var_name = 'so'
change_cube_list = iris.cube.CubeList([])
climatology_cube_list = iris.cube.CubeList([])
for layer in calc_ocean_maps.vertical_layers.keys():
change_cube_vm = calc_ocean_maps.calc_vertical_mean(change_cube[variable].copy(), layer, coord_names, atts, standard_name, var_name)
change_cube_list.append(change_cube_vm)
climatology_cube_vm = calc_ocean_maps.calc_vertical_mean(climatology_cube[variable].copy(), layer, coord_names, atts, standard_name, var_name)
climatology_cube_list.append(climatology_cube_vm)
if layer in ['surface', 'argo']:
for basin in calc_ocean_maps.basins.keys():
basin_array = calc_ocean_maps.create_basin_array(change_cube_vm)
depth_cube = None
change_cube_list.append(calc_ocean_maps.calc_zonal_vertical_mean(change_cube_vm.copy(), depth_cube, basin_array, basin, layer, atts, standard_name, var_name))
for basin in calc_ocean_maps.basins.keys():
change_cube_zm = calc_ocean_maps.calc_zonal_mean(change_cube[variable].copy(), basin_array_default, basin, atts, standard_name, var_name)
change_cube_list.append(change_cube_zm)
climatology_cube_zm = calc_ocean_maps.calc_zonal_mean(climatology_cube[variable].copy(), basin_array_default, basin, atts, standard_name, var_name)
climatology_cube_list.append(climatology_cube_zm)
iris.save(change_cube_list, eval('inargs.change_outfile_'+var_name))
iris.save(climatology_cube_list, eval('inargs.climatology_outfile_'+var_name))
if __name__ == '__main__':
extra_info ="""
author:
Damien Irving, irving.damien@gmail.com
"""
description='Calculate the zonal and vertical mean ocean anomaly fields from Durack and Wijffels (2010) data files'
parser = argparse.ArgumentParser(description=description,
epilog=extra_info,
argument_default=argparse.SUPPRESS,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("infile", type=str, help="Input data file")
parser.add_argument("change_outfile_thetao", type=str, help="Output file name for potential temperature change data")
parser.add_argument("climatology_outfile_thetao", type=str, help="Output file name for potential temperature climatology data")
parser.add_argument("change_outfile_so", type=str, help="Output file name for salinity change data")
parser.add_argument("climatology_outfile_so", type=str, help="Output file name for salinity climatology data")
args = parser.parse_args()
main(args)
| 39.938931 | 178 | 0.698203 |
5b06eaf37c7ad32f1ab0421c931feed803f6e93e | 8,766 | py | Python | src/python/pants/jvm/goals/coursier_integration_test.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | null | null | null | src/python/pants/jvm/goals/coursier_integration_test.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | 12 | 2022-01-06T23:20:22.000Z | 2022-03-17T05:06:37.000Z | src/python/pants/jvm/goals/coursier_integration_test.py | Eric-Arellano/pants | aaa9756bc4f2cc97bb97851a4295a0de85f374b1 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
from pathlib import Path
from textwrap import dedent
import pytest
from pants.core.target_types import ResourcesGeneratorTarget
from pants.core.target_types import rules as core_rules
from pants.core.util_rules import config_files, source_files
from pants.core.util_rules.external_tool import rules as external_tool_rules
from pants.engine.fs import FileDigest
from pants.jvm.goals.coursier import CoursierResolve
from pants.jvm.goals.coursier import rules as coursier_goal_rules
from pants.jvm.resolve.coursier_fetch import (
Coordinate,
Coordinates,
CoursierLockfileEntry,
CoursierResolvedLockfile,
)
from pants.jvm.resolve.coursier_fetch import rules as coursier_fetch_rules
from pants.jvm.resolve.coursier_setup import rules as coursier_setup_rules
from pants.jvm.target_types import JvmArtifact, JvmDependencyLockfile
from pants.jvm.testutil import maybe_skip_jdk_test
from pants.jvm.util_rules import rules as util_rules
from pants.testutil.rule_runner import RuleRunner
HAMCREST_COORD = Coordinate(
group="org.hamcrest",
artifact="hamcrest-core",
version="1.3",
)
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
preserve_tmpdirs=True,
rules=[
*core_rules(),
*config_files.rules(),
*coursier_fetch_rules(),
*coursier_goal_rules(),
*coursier_setup_rules(),
*external_tool_rules(),
*source_files.rules(),
*util_rules(),
],
target_types=[JvmDependencyLockfile, JvmArtifact, ResourcesGeneratorTarget],
)
@maybe_skip_jdk_test
def test_coursier_resolve_creates_missing_lockfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
resources(
name = 'here_to_provide_dependencies',
dependencies = [
':example-lockfile',
':org.hamcrest_hamcrest-core',
],
sources = ["*.txt"],
)
jvm_artifact(
name = 'org.hamcrest_hamcrest-core',
group = 'org.hamcrest',
artifact = 'hamcrest-core',
version = "1.3",
)
coursier_lockfile(
name = 'example-lockfile',
)
"""
),
}
)
result = rule_runner.run_goal_rule(CoursierResolve, args=["::"])
assert result.exit_code == 0
assert result.stderr == "Updated lockfile at: coursier_resolve.lockfile\n"
expected_lockfile = CoursierResolvedLockfile(
entries=(
CoursierLockfileEntry(
coord=HAMCREST_COORD,
file_name="hamcrest-core-1.3.jar",
direct_dependencies=Coordinates([]),
dependencies=Coordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
),
)
)
assert (
Path(rule_runner.build_root, "coursier_resolve.lockfile").read_bytes()
== expected_lockfile.to_json()
)
@maybe_skip_jdk_test
def test_coursier_resolve_noop_does_not_touch_lockfile(rule_runner: RuleRunner) -> None:
expected_lockfile = CoursierResolvedLockfile(
entries=(
CoursierLockfileEntry(
coord=HAMCREST_COORD,
file_name="hamcrest-core-1.3.jar",
direct_dependencies=Coordinates([]),
dependencies=Coordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
),
)
)
rule_runner.write_files(
{
"BUILD": dedent(
"""\
resources(
name = 'here_to_provide_dependencies',
dependencies = [
':example-lockfile',
':org.hamcrest_hamcrest-core',
],
sources = ["*.txt"],
)
jvm_artifact(
name = 'org.hamcrest_hamcrest-core',
group = 'org.hamcrest',
artifact = 'hamcrest-core',
version = "1.3",
)
coursier_lockfile(
name='example-lockfile',
source="coursier_resolve.lockfile",
)
"""
),
"coursier_resolve.lockfile": expected_lockfile.to_json().decode("utf-8"),
}
)
result = rule_runner.run_goal_rule(CoursierResolve, args=["::"])
assert result.exit_code == 0
assert result.stderr == ""
@maybe_skip_jdk_test
def test_coursier_resolve_updates_lockfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
resources(
name = 'here_to_provide_dependencies',
dependencies = [
':example-lockfile',
':org.hamcrest_hamcrest-core',
],
sources = ["*.txt"],
)
jvm_artifact(
name = 'org.hamcrest_hamcrest-core',
group = 'org.hamcrest',
artifact = 'hamcrest-core',
version = "1.3",
)
coursier_lockfile(
name = 'example-lockfile',
)
"""
),
"coursier_resolve.lockfile": "[]",
}
)
result = rule_runner.run_goal_rule(CoursierResolve, args=["::"])
assert result.exit_code == 0
assert result.stderr == "Updated lockfile at: coursier_resolve.lockfile\n"
expected_lockfile = CoursierResolvedLockfile(
entries=(
CoursierLockfileEntry(
coord=HAMCREST_COORD,
file_name="hamcrest-core-1.3.jar",
direct_dependencies=Coordinates([]),
dependencies=Coordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
),
)
)
assert (
Path(rule_runner.build_root, "coursier_resolve.lockfile").read_bytes()
== expected_lockfile.to_json()
)
@maybe_skip_jdk_test
def test_coursier_resolve_updates_bogus_lockfile(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
"BUILD": dedent(
"""\
resources(
name = 'here_to_provide_dependencies',
dependencies = [
':example-lockfile',
':org.hamcrest_hamcrest-core',
],
sources = ["*.txt"],
)
jvm_artifact(
name = 'org.hamcrest_hamcrest-core',
group = 'org.hamcrest',
artifact = 'hamcrest-core',
version = "1.3",
)
coursier_lockfile(
name = 'example-lockfile',
)
"""
),
"coursier_resolve.lockfile": "]bad json[",
}
)
result = rule_runner.run_goal_rule(CoursierResolve, args=["::"])
assert result.exit_code == 0
assert result.stderr == "Updated lockfile at: coursier_resolve.lockfile\n"
expected_lockfile = CoursierResolvedLockfile(
entries=(
CoursierLockfileEntry(
coord=HAMCREST_COORD,
file_name="hamcrest-core-1.3.jar",
direct_dependencies=Coordinates([]),
dependencies=Coordinates([]),
file_digest=FileDigest(
fingerprint="66fdef91e9739348df7a096aa384a5685f4e875584cce89386a7a47251c4d8e9",
serialized_bytes_length=45024,
),
),
)
)
assert (
Path(rule_runner.build_root, "coursier_resolve.lockfile").read_bytes()
== expected_lockfile.to_json()
)
| 33.976744 | 99 | 0.544832 |
252c938087b1ddbcd243e20cd4981916cf9d8f64 | 2,787 | py | Python | src/tests/dao_test/nicknames_dao_test.py | Veloxization/likahbot | 24e22711f514fc0878cf6fb9e516ad44425ea6a7 | [
"MIT"
] | null | null | null | src/tests/dao_test/nicknames_dao_test.py | Veloxization/likahbot | 24e22711f514fc0878cf6fb9e516ad44425ea6a7 | [
"MIT"
] | null | null | null | src/tests/dao_test/nicknames_dao_test.py | Veloxization/likahbot | 24e22711f514fc0878cf6fb9e516ad44425ea6a7 | [
"MIT"
] | null | null | null | import unittest
import os
from dao.nicknames_dao import NicknamesDAO
class TestNicknamesDAO(unittest.TestCase):
def setUp(self):
self.db_addr = "database/test_db.db"
os.popen(f"sqlite3 {self.db_addr} < database/schema.sql")
self.nicknames_dao = NicknamesDAO(self.db_addr)
def tearDown(self):
self.nicknames_dao.clear_nicknames_table()
def test_all_instances_of_nickname_are_found(self):
self.nicknames_dao.add_nickname("Test", 1234, 9876)
self.nicknames_dao.add_nickname("Test", 2345, 9876)
self.nicknames_dao.add_nickname("Test2", 1234, 9876)
nicknames = self.nicknames_dao.find_nickname("Test")
self.assertEqual(len(nicknames), 2)
def test_all_instances_of_users_nicknames_are_found(self):
self.nicknames_dao.add_nickname("Test", 1234, 9876)
self.nicknames_dao.add_nickname("Test", 2345, 9876)
self.nicknames_dao.add_nickname("Test2", 1234, 9876)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(len(nicknames), 2)
def test_nickname_is_added_correctly(self):
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(len(nicknames), 0)
self.nicknames_dao.add_nickname("Test", 1234, 9876)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(nicknames[0]["nickname"], "Test")
def test_added_nicknames_do_not_exceed_set_limit(self):
self.nicknames_dao.add_nickname("Test1", 1234, 9876)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(len(nicknames), 1)
self.nicknames_dao.add_nickname("Test2", 1234, 9876)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(len(nicknames), 2)
self.nicknames_dao.add_nickname("Test3", 1234, 9876, 1)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(len(nicknames), 1)
self.assertEqual(nicknames[0]["nickname"], "Test3")
def test_user_nicknames_are_cleared_correctly(self):
self.nicknames_dao.add_nickname("Test1", 1234, 9876)
self.nicknames_dao.add_nickname("Test2", 1234, 8765)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(len(nicknames), 1)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 8765)
self.assertEqual(len(nicknames), 1)
self.nicknames_dao.delete_user_nicknames(1234, 9876)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 9876)
self.assertEqual(len(nicknames), 0)
nicknames = self.nicknames_dao.find_user_nicknames(1234, 8765)
self.assertEqual(len(nicknames), 1)
| 47.237288 | 70 | 0.712594 |
cf4db466c5885d920eef7129902bc0299dbd2ca7 | 2,627 | py | Python | python/PHN.py | danijel3/ASRDemos | 33b82d56eba5b508f53f0120193b15f72859173b | [
"Apache-2.0"
] | 57 | 2016-02-10T02:11:50.000Z | 2021-06-23T15:39:00.000Z | python/PHN.py | danijel3/ASRDemos | 33b82d56eba5b508f53f0120193b15f72859173b | [
"Apache-2.0"
] | null | null | null | python/PHN.py | danijel3/ASRDemos | 33b82d56eba5b508f53f0120193b15f72859173b | [
"Apache-2.0"
] | 24 | 2016-04-05T19:31:44.000Z | 2022-01-07T12:17:10.000Z | # -*- coding: utf-8 -*-
import re
import numpy
class Segment:
def __init__(self, xmin=0, xmax=0, text=''):
self.__dict__.update(locals())
def __str__(self):
return str('"' + self.text + '" : ' + str(self.xmin) + ' -- ' + str(self.xmax))
def __repr__(self):
return self.__str__()
class PHN:
def __init__(self):
self.segments = []
def __str__(self):
return str(self.segments)
def __repr__(self):
return self.__str__()
def fromSequence(self, seq, score, seqdict, timestep):
self.segments = []
on = seq[0]
ot = 0
for i, n in enumerate(seq):
if(n != on):
p = numpy.mean(score[ot:i, on])
text = '{} <{:.2%}>'.format(seqdict[on], p)
self.segments.append(Segment(ot * timestep, i * timestep, text))
on = n
ot = i
i = seq.size
p = numpy.mean(score[ot:i, on])
text = '{} <{:.2%}>'.format(seqdict[on], p)
self.segments.append(Segment(ot * timestep, i * timestep, text))
def fromSequence(self, seq, timestep):
self.segments = []
on = seq[0]
ot = 0
for i, n in enumerate(seq):
if(n != on):
text = str(on)
self.segments.append(Segment(ot * timestep, i * timestep, text))
on = n
ot = i
i = seq.size
text = str(on)
self.segments.append(Segment(ot * timestep, i * timestep, text))
def getCode(self, beg, end, nul_val='!'):
for s in self.segments:
if(end < s.xmin):
continue
if(beg > s.xmax):
continue
if(end <= s.xmax and beg >= s.xmin):
return s.text
if(beg <= s.xmin):
if(end - s.xmin >= s.xmin - beg):
return s.text
if(end >= s.xmax):
if(s.xmax - beg >= end - s.xmax):
return s.text
return nul_val
def toSequence(self, nSamples, win_shift, win_size, code_mapping=None, nul_val=-1):
codes = []
for s in range(nSamples):
beg = s * win_shift
end = beg + win_size
c = self.getCode(beg, end).strip()
if code_mapping != None:
if c in code_mapping:
codes.append(code_mapping[c])
else:
codes.append(nul_val)
else:
codes.append(c)
return codes
re_line = re.compile("^([0-9]+) ([0-9]+) (.+)$")
def parseLine(self, line):
m = self.re_line.match(line)
assert m is not None
return int(m.group(1)),int(m.group(2)),m.group(3)
def load(self, filename):
with open(filename, 'r') as f:
for line in f:
xmin,xmax,text=self.parseLine(line)
self.segments.append(Segment(xmin,xmax,text))
def save(self, filename):
with open(filename, 'w') as f:
for seg in self.segments:
f.write('{} {} {}\n'.format(seg.xmin,seg.xmax,seg.text)) | 21.532787 | 84 | 0.583555 |
ed8e404ec9aa9b318e020672e4b10567635875da | 2,429 | py | Python | code/python/User_CF.py | gitsoftsun/GraduationThesis | b632266c4071395b4bc020939e63cace54ab3d94 | [
"Apache-2.0"
] | null | null | null | code/python/User_CF.py | gitsoftsun/GraduationThesis | b632266c4071395b4bc020939e63cace54ab3d94 | [
"Apache-2.0"
] | null | null | null | code/python/User_CF.py | gitsoftsun/GraduationThesis | b632266c4071395b4bc020939e63cace54ab3d94 | [
"Apache-2.0"
] | null | null | null | class User_CF:
def __init__(self,train_file,test_file):
self.train_file = train_file
self.test_file = test_file
self.readData()
def readData(self):
#读取文件,并生成用户-物品的评分表和测试集
self.train = dict() #用户-物品的评分表
for line in open(self.train_file):
# user,item,score = line.strip().split(",")
user,item,score,_ = line.strip().split("\t")
self.train.setdefault(user,{})
self.train[user][item] = int(score)
self.test = dict() #测试集
for line in open(self.test_file):
# user,item,score = line.strip().split(",")
user,item,score,_ = line.strip().split("\t")
self.test.setdefault(user,{})
self.test[user][item] = int(score)
def UserSimilarity(self):
#建立物品-用户的倒排表
self.item_users = dict()
for user,items in self.train.items():
for i in items.keys():
if i not in self.item_users:
self.item_users[i] = set()
self.item_users[i].add(user)
#计算用户-用户相关性矩阵
C = dict() #用户-用户共现矩阵
N = dict() #用户产生行为的物品个数
for i,users in self.item_users.items():
for u in users:
N.setdefault(u,0)
N[u] += 1
C.setdefault(u,{})
for v in users:
if u == v:
continue
C[u].setdefault(v,0)
C[u][v] += 1
#计算用户-用户相似度,余弦相似度
self.W = dict() #相似度矩阵
for u,related_users in C.items():
self.W.setdefault(u,{})
for v,cuv in related_users.items():
self.W[u][v] = cuv / math.sqrt(N[u] * N[v])
return self.W
#给用户user推荐,前K个相关用户
def Recommend(self,user,K=3,N=10):
rank = dict()
action_item = self.train[user].keys() #用户user产生过行为的item
for v,wuv in sorted(self.W[user].items(),key=lambda x:x[1],reverse=True)[0:K]:
#遍历前K个与user最相关的用户
for i,rvi in self.train[v].items():
if i in action_item:
continue
rank.setdefault(i,0)
rank[i] += wuv * rvi
return dict(sorted(rank.items(),key=lambda x:x[1],reverse=True)[0:N]) #推荐结果的取前N个
| 37.953125 | 98 | 0.478798 |
702ae821078a279199ec8a26cb86b056cff90f24 | 1,489 | py | Python | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/access_information_contract.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/access_information_contract.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/access_information_contract.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AccessInformationContract(Model):
"""Tenant access information contract of the API Management service.
:param id: Identifier.
:type id: str
:param primary_key: Primary access key.
:type primary_key: str
:param secondary_key: Secondary access key.
:type secondary_key: str
:param enabled: Determines whether direct access is enabled.
:type enabled: bool
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'secondary_key': {'key': 'secondaryKey', 'type': 'str'},
'enabled': {'key': 'enabled', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(AccessInformationContract, self).__init__(**kwargs)
self.id = kwargs.get('id', None)
self.primary_key = kwargs.get('primary_key', None)
self.secondary_key = kwargs.get('secondary_key', None)
self.enabled = kwargs.get('enabled', None)
| 36.317073 | 76 | 0.597717 |
fe0efe354e563308af649e06f79d03507614dd74 | 13,507 | py | Python | invenio_records_lom/fixtures/demo.py | tu-graz-library/invenio-records-lom | c811506e51a1ed15d11cf10d6e6ef83a4ecc202b | [
"MIT"
] | null | null | null | invenio_records_lom/fixtures/demo.py | tu-graz-library/invenio-records-lom | c811506e51a1ed15d11cf10d6e6ef83a4ecc202b | [
"MIT"
] | 18 | 2020-10-21T07:58:14.000Z | 2022-03-29T12:10:25.000Z | invenio_records_lom/fixtures/demo.py | tu-graz-library/invenio-records-lom | c811506e51a1ed15d11cf10d6e6ef83a4ecc202b | [
"MIT"
] | 7 | 2020-10-06T08:46:40.000Z | 2021-07-06T13:21:29.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 Graz University of Technology.
#
# invenio-records-lom is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Fake LOM demo records."""
import json
from faker import Faker
from flask_principal import Identity
from invenio_access import any_user
from ..proxies import current_records_lom
# ----- functions for LOM datatypes -----
def langstringify(fake: Faker, string: str) -> dict:
"""Wraps `string` in a dict, emulating LOMv1.0-standard LangString-objects."""
return {
"language": create_fake_language(fake),
"string": string,
}
def vocabularify(fake: Faker, choices: list) -> dict:
"""Randomly draw a choice from `choices`, then wrap that choice in a dict, emulating LOMv1.0-standard Vocabulary-objects."""
return {
"source": "LOMv1.0",
"value": fake.random.choice(choices),
}
def create_fake_datetime(fake: Faker) -> dict:
"""Create a fake datetime dict, as per LOMv1.0-standard Datetime-object-specification."""
pattern = fake.random.choice(["YMDhmsTZD", "YMDhms", "YMD", "Y"])
if pattern == "Y":
datetime = fake.year()
elif pattern == "YMD":
datetime = fake.date()
elif pattern == "YMDhms":
datetime = fake.date_time().isoformat()
elif pattern == "YMDhmsTZD":
time_zone_designator = fake.pytimezone()
datetime = fake.date_time(tzinfo=time_zone_designator).isoformat()
return {"dateTime": datetime, "description": langstringify(fake, fake.sentence())}
def create_fake_duration(fake: Faker) -> dict:
"""Create a fake duration dict, as per LOMv1.0-standard Duration-object-specification."""
random = fake.random
pattern = random.choice(["all", "Y", "D", "HM", "S"])
duration = {
"all": "P1Y2M4DT10H35M12.5S",
"Y": f"P{random.randint(1,5)}Y",
"D": f"P{random.randint(1,60)}D",
"HM": f"PT{random.randint(1,3)}H{random.randint(1,59)}M",
"S": f"PT{random.uniform(0.1, 12.5)}S",
}
return {
"duration": duration[pattern],
"description": langstringify(fake, fake.sentence()),
}
def create_fake_vcard(fake: Faker) -> str:
"""Returns a placeholder-string for a vCard-object."""
return "placeholder for vcard"
# ----- functions for elements that are part of more than one category -----
def create_fake_language(fake: Faker) -> str:
"""Create a fake language-code, as required for "language"-keys by LOMv1.0-standard."""
language_codes = [
"EN",
"en-us",
"en-US-philadelphia",
"eng",
"eng-US",
"ENG-us-philadelphia",
]
return fake.random.choice(language_codes)
def create_fake_identifier(fake: Faker) -> dict:
"""Create a fake "identifier"-element, compatible with LOMv1.0-standard."""
catalog = fake.random.choice(["URI", "ISBN"])
if catalog == "URI":
entry = fake.uri()
else:
entry = fake.isbn13()
return {
"catalog": catalog,
"entry": entry,
}
def create_fake_contribute(fake: Faker, roles: list) -> dict:
"""Create a fake "contribute"-element, compatible with LOMv1.0-standard."""
return {
"role": vocabularify(fake, roles),
"entity": [create_fake_vcard(fake) for __ in range(2)],
"date": create_fake_datetime(fake),
}
# ----- functions for categories or used by only one category -----
def create_fake_general(fake: Faker) -> dict:
"""Create a fake "general"-element, compatible with LOMv1.0-standard."""
structures = ["atomic", "collection", "networked", "hierarchical", "linear"]
aggregationLevels = ["1", "2", "3", "4"]
return {
"identifier": [create_fake_identifier(fake) for __ in range(2)],
"title": langstringify(fake, " ".join(fake.words())),
"language": [fake.random.choice([create_fake_language(fake), "none"])],
"description": [langstringify(fake, fake.paragraph()) for __ in range(2)],
"keyword": [langstringify(fake, fake.word()) for __ in range(2)],
"coverage": [langstringify(fake, fake.paragraph()) for __ in range(2)],
"structure": vocabularify(fake, structures),
"aggregationLevel": vocabularify(fake, aggregationLevels),
}
def create_fake_lifecycle(fake: Faker) -> dict:
"""Create a fake "lifeCycle"-element, compatible with LOMv1.0-standard."""
roles = [
"author",
"publisher",
"unknown",
"initiator",
"terminator",
"validator",
"editor",
"graphical designer",
"technical implementer",
"content provider",
"technical validator",
"educational validator",
"script writer",
"instructional designer",
"subject matter expert",
]
statuses = ["draft", "final", "revised", "unavailable"]
random_int = fake.random.randint
return {
"version": langstringify(fake, f"{random_int(0,9)}.{random_int(0,9)}"),
"status": vocabularify(fake, statuses),
"contribute": [create_fake_contribute(fake, roles) for __ in range(2)],
}
def create_fake_metametadata(fake: Faker) -> dict:
"""Create a fake "metaMetadata"-element, compatible with LOMv1.0-standard."""
roles = ["creator", "validator"]
return {
"identifier": [create_fake_identifier(fake) for __ in range(2)],
"contribute": [create_fake_contribute(fake, roles) for __ in range(2)],
"metadataSchemas": ["LOMv1.0"],
"language": create_fake_language(fake),
}
def create_fake_technical(fake: Faker) -> dict:
"""Create a fake "technical"-element, compatible with LOMv1.0-standard."""
return {
"format": [fake.random.choice([fake.mime_type(), "non-digital"])],
"size": str(fake.random.randint(1, 2 ** 32)),
"location": [
fake.uri(),
],
"requirement": [create_fake_requirement(fake) for __ in range(2)],
"installationRemarks": langstringify(fake, fake.paragraph()),
"otherPlatformRequirements": langstringify(fake, fake.paragraph()),
}
def create_fake_requirement(fake: Faker) -> dict:
"""Create a fake "requirement"-element, compatible with LOMv1.0-standard."""
return {
"orComposite": [create_fake_orcomposite(fake) for __ in range(2)],
}
def create_fake_orcomposite(fake: Faker) -> dict:
"""Create a fake "orComposite"-element, compatible with LOMv1.0-standard."""
type_ = fake.random.choice(["operating system", "browser"])
if type_ == "operating system":
requirement_names = [
"pc-dos",
"ms-windows",
"macos",
"unix",
"multi-os",
"none",
]
else:
requirement_names = [
"any",
"netscape communicator",
"ms-internet explorer",
"opera",
"amaya",
]
return {
"type": vocabularify(fake, [type_]),
"name": vocabularify(fake, requirement_names),
"minimumVersion": str(fake.random.randint(1, 4)),
"maximumVersion": str(fake.random.randint(5, 8)),
}
def create_fake_educational(fake: Faker) -> dict:
"""Create a fake "educational"-element, compatible with LOMv1.0-standard."""
interactivity_types = ["active", "expositive", "mixed"]
learning_resource_types = [
"exercise",
"simulation",
"questionnaire",
"diagram",
"figure",
"graph",
"index",
"slide",
"table",
"narrative text",
"exam",
"experiment",
"problem statement",
"self assessment",
"lecture",
]
levels = ["very low", "low", "medium", "high", "very high"]
difficulties = ["very easy", "easy", "medium", "difficult", "very difficult"]
end_user_roles = ["teacher", "author", "learner", "manager"]
contexts = ["school", "higher education", "training", "other"]
random_int = fake.random.randint
return {
"interactivityType": vocabularify(fake, interactivity_types),
"learningResourceType": vocabularify(fake, learning_resource_types),
"interactivityLevel": vocabularify(fake, levels),
"semanticDensity": vocabularify(fake, levels),
"intendedEndUserRole": vocabularify(fake, end_user_roles),
"context": vocabularify(fake, contexts),
"typicalAgeRange": langstringify(fake, f"{random_int(1,4)}-{random_int(5,9)}"),
"difficulty": vocabularify(fake, difficulties),
"typicalLearningTime": create_fake_duration(fake),
"description": langstringify(fake, fake.paragraph()),
"language": [create_fake_language(fake) for __ in range(2)],
}
def create_fake_rights(fake: Faker) -> dict:
"""Create a fake "rights"-element, compatible with LOMv1.0-standard."""
return {
"cost": vocabularify(fake, ["yes", "no"]),
"copyrightAndOtherRestrictions": vocabularify(fake, ["yes", "no"]),
"description": langstringify(fake, fake.paragraph()),
}
def create_fake_relation(fake: Faker) -> dict:
"""Create a fake "relation"-element, compatible with LOMv1.0-standard."""
kinds = [
"ispartof",
"haspart",
"isversionof",
"hasversion",
"isformatof",
"hasformat",
"references",
"isreferencedby",
"isbasedon",
"isbasisfor",
"requires",
"isrequiredby",
]
return {
"kind": vocabularify(fake, kinds),
"resource": {
"identifier": [create_fake_identifier(fake) for __ in range(2)],
"description": [langstringify(fake, fake.paragraph()) for __ in range(2)],
},
}
def create_fake_annotation(fake: Faker) -> dict:
"""Create a fake "annotation"-element, compatible with LOMv1.0-standard."""
return {
"entity": create_fake_vcard(fake),
"date": create_fake_datetime(fake),
"description": langstringify(fake, fake.paragraph()),
}
def create_fake_classification(fake: Faker) -> dict:
"""Create a fake "classification"-element, compatible with LOMv1.0-standard."""
purposes = [
"discipline",
"idea",
"prerequisite",
"educational objective",
"accessability restrictions",
"educational level",
"skill level",
"security level",
"competency",
]
return {
"purpose": vocabularify(fake, purposes),
"taxonPath": [create_fake_taxonpath(fake) for __ in range(2)],
"description": langstringify(fake, fake.paragraph()),
"keyword": langstringify(fake, fake.word()),
}
def create_fake_taxonpath(fake: Faker) -> dict:
"""Create a fake "taxonPath"-element, compatible with LOMv1.0-standard."""
return {
"source": langstringify(fake, fake.word()),
"taxon": [create_fake_taxon(fake) for __ in range(2)],
}
def create_fake_taxon(fake: Faker) -> dict:
"""Create a fake "taxon"-element, compatible with LOMv1.0-standard."""
return {
"id": fake.lexify(
"?????",
letters="ABCDEFGHIJKLMNOPQRSTUVWXYZ.0123456789",
),
"entry": langstringify(fake, fake.word()),
}
# ----- functions for creating LOMv1.0-fakes -----
def create_fake_metadata(fake: Faker) -> dict:
"""Create a fake json-representation of a "lom"-element, compatible with LOMv1.0-standard."""
data_to_use = {
"general": create_fake_general(fake),
"lifeCycle": create_fake_lifecycle(fake),
"metaMetadata": create_fake_metametadata(fake),
"technical": create_fake_technical(fake),
"educational": [create_fake_educational(fake) for __ in range(2)],
"rights": create_fake_rights(fake),
"relation": [create_fake_relation(fake) for __ in range(2)],
"annotation": [create_fake_annotation(fake) for __ in range(2)],
"classification": [create_fake_classification(fake) for __ in range(2)],
}
return json.loads(json.dumps(data_to_use))
def create_fake_record(fake: Faker):
"""Enter fake records in the SQL-database."""
# invenio user identities have integers as `id`s, use a string to avoid collisions
fake_identity = Identity(id="lom_demo")
fake_identity.provides.add(any_user)
fake_access_type = fake.random.choice(["public", "restricted"])
has_embargo = fake.boolean()
if has_embargo:
fake_embargo = {
"until": fake.future_date(end_date="+365d").isoformat(),
"reason": "Fake embargo for fake record.",
"active": True,
}
else:
fake_embargo = {}
fake_access = {
"files": fake_access_type,
"record": fake_access_type,
"embargo": fake_embargo,
}
data = {
# these values get processed by service.config.components
"access": fake_access,
"metadata": create_fake_metadata(fake),
}
service = current_records_lom.records_service
draft = service.create(data=data, identity=fake_identity)
service.publish(id_=draft.id, identity=fake_identity)
def create_fake_records(number: int, seed: int = 42) -> list:
"""Create `number` jsons adhering to LOMv1.0-standard, using `seed` as RNG-seed."""
fake = Faker()
Faker.seed(seed)
return [create_fake_record(fake) for __ in range(number)]
| 33.105392 | 128 | 0.617532 |
37209cda6ffc6ac4e8568fe62cee8558e1e4e517 | 289 | py | Python | submissions_viewer/tests_results/utils.py | ClassroomSuite/SubmissionsViewer | 1f8dd931e1a6c68b289d23deb1c25a2c438137e6 | [
"MIT"
] | null | null | null | submissions_viewer/tests_results/utils.py | ClassroomSuite/SubmissionsViewer | 1f8dd931e1a6c68b289d23deb1c25a2c438137e6 | [
"MIT"
] | 1 | 2020-08-06T14:51:57.000Z | 2020-08-06T14:51:57.000Z | submissions_viewer/tests_results/utils.py | ClassroomSuite/SubmissionsViewer | 1f8dd931e1a6c68b289d23deb1c25a2c438137e6 | [
"MIT"
] | null | null | null | import time
import ipywidgets as widgets
from IPython import display
def _display(out: widgets.Output, content, clear_output=True):
@out.capture(clear_output=clear_output, wait=True)
def _display_out():
display.display(content)
time.sleep(1)
_display_out()
| 20.642857 | 62 | 0.723183 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.