blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 4
721
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
57
| license_type
stringclasses 2
values | repo_name
stringlengths 5
91
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 321
values | visit_date
timestamp[ns]date 2016-08-12 09:31:09
2023-09-06 10:45:07
| revision_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| committer_date
timestamp[ns]date 2010-09-28 14:01:40
2023-09-06 06:22:19
| github_id
int64 426
681M
| star_events_count
int64 101
243k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[ns]date 2012-06-28 18:51:49
2023-09-14 21:59:16
⌀ | gha_created_at
timestamp[ns]date 2008-02-11 22:55:26
2023-08-10 11:14:58
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 26
values | language
stringclasses 2
values | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 6
10.2M
| extension
stringclasses 115
values | filename
stringlengths 3
113
| content
stringlengths 6
10.2M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bc5d495c1e22bef9ea9fefd9230f2288007a0c7c
|
dcd772f567ef8a8a1173a9f437cd68f211fb9362
|
/developer_tools/createSQAtracebilityMatrix.py
|
36d16040ede6d52ed2c18f3f8dc4b7ba1154d5ae
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause",
"BSD-3-Clause"
] |
permissive
|
idaholab/raven
|
39cdce98ad916c638399232cdc01a9be00e200a2
|
2b16e7aa3325fe84cab2477947a951414c635381
|
refs/heads/devel
| 2023-08-31T08:40:16.653099
| 2023-08-29T16:21:51
| 2023-08-29T16:21:51
| 85,989,537
| 201
| 126
|
Apache-2.0
| 2023-09-13T21:55:43
| 2017-03-23T19:29:27
|
C++
|
UTF-8
|
Python
| false
| false
| 4,543
|
py
|
createSQAtracebilityMatrix.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on October 20, 2018
@author: alfoa
"""
import os
from glob import glob
import inspect
import xml.etree.ElementTree as ET
import copy
import sys
from collections import OrderedDict, defaultdict
# import utility functions
import readRequirementsAndCreateLatex as readRequirements
from createRegressionTestDocumentation import testDescription as regressionTests
def contructRequirementMapWithTests(requirementDict):
"""
Method to construct the link between requirements' ids and tests
@ In, requirementDict, dict, the requirement dictionary
@ Out, reqDictionary, dict, the requirement mapping ({'req id':[test1,test2,etc]})
"""
reqDictionary = defaultdict(list)
for testName,req in requirementDict.items():
for reqId in req.find("requirements").text.split():
reqDictionary[reqId.strip()].append(testName)
return reqDictionary
def createLatexFile(reqDictionary,reqDocument,outputLatex):
"""
Method to write a section containing the requirement matrix in LateX
@ In, reqDictionary, dict, the requirement mapping ({'req id':[test1,test2,etc]})
@ In, reqDocument, tuple, (app name, the requirement dictionary)
@ In, outputLatex, string, the output latex file
@ Out, None
"""
app, allGroups = reqDocument
fileObject = open(outputLatex,"w+")
fileObject.write(" \\section{"+app.strip().upper()+":SYSTEM REQUIREMENTS} \n")
fileObject.write(" \\subsection{Requirements Traceability Matrix} \n")
fileObject.write(" This section contains all of the requirements, requirements' description, and \n")
fileObject.write(" requirement test cases. The requirement tests are automatically tested for each \n")
fileObject.write(" CR (Change Request) by the CIS (Continuous Integration System). \n")
fileObject.write(" \\newcolumntype{b}{X} \n")
fileObject.write(" \\newcolumntype{s}{>{\\hsize=.5\\hsize}X} \n")
for group, groupDict in allGroups.items():
fileObject.write(" \\subsubsection{"+group.strip()+"} \n")
for reqSetName,reqSet in groupDict.items():
# create table here
fileObject.write("\\begin{tabularx}{\\textwidth}{|s|s|b|} \n")
fileObject.write("\\hline \n")
fileObject.write("\\textbf{Requirement ID} & \\textbf{Requirement Description} & \\textbf{Test(s)} \\\ \hline \n")
fileObject.write("\\hline \n")
ravenPath = os.path.realpath(os.path.join(os.path.realpath(__file__) ,"..","..",".."))
for reqName,req in reqSet.items():
requirementTests = reqDictionary.get(reqName)
if requirementTests is None:
source = req.get("source")
if source is not None:
requirementTests = source
requirementTests = [] if requirementTests is None else requirementTests
for i in range(len(requirementTests)):
requirementTests[i] = str(i+1) + ")" + requirementTests[i].replace(ravenPath,"").replace("\\","/").replace("_","\_").strip()
fileObject.write(" \\hspace{0pt}"+reqName.strip()+" & \\hspace{0pt}"+req['description']+" & \\hspace{0pt}"+ ' '.join(requirementTests)+" \\\ \hline \n")
fileObject.write("\\hline \n")
fileObject.write("\\caption*{"+reqSetName.strip()+"}\n")
fileObject.write("\\end{tabularx} \n")
fileObject.write("\\end{document}")
fileObject.close()
if __name__ == '__main__':
try:
index = sys.argv.index("-i")
requirementFile = sys.argv[index+1].strip()
except ValueError:
raise ValueError("Not found command line argument -i")
try:
index = sys.argv.index("-o")
outputLatex = sys.argv[index+1].strip()
except ValueError:
raise ValueError("Not found command line argument -o")
reqDocument = readRequirements.readRequirementsXml(requirementFile)
descriptionClass = regressionTests()
_, _, requirementDict = descriptionClass.splitTestDescription()
reqDictionary = contructRequirementMapWithTests(requirementDict)
createLatexFile(reqDictionary,reqDocument,outputLatex)
|
fb5d2260b6e11e5aa8d02b0a631283ec1ea6cb06
|
951c578186220f2499a7aecf99a314e46778fa75
|
/tests/numpy/columns/test_bool.py
|
0c7dbefdefc2389b1bb92036319419a98207d369
|
[
"MIT",
"Python-2.0"
] |
permissive
|
mymarilyn/clickhouse-driver
|
0d06fb1d3b28f61b267307fb6cea1a33d7997df4
|
ce712b5bc7a7900e844c7c8f99a1e3426aa326f7
|
refs/heads/master
| 2023-07-20T08:41:27.193499
| 2023-06-30T08:29:06
| 2023-06-30T08:29:31
| 90,912,724
| 1,108
| 229
|
NOASSERTION
| 2023-05-24T02:54:41
| 2017-05-10T22:13:04
|
Python
|
UTF-8
|
Python
| false
| false
| 2,376
|
py
|
test_bool.py
|
try:
import numpy as np
except ImportError:
np = None
from tests.numpy.testcase import NumpyBaseTestCase
class BoolTestCase(NumpyBaseTestCase):
n = 10
# https://clickhouse.com/docs/en/whats-new/changelog/2021
required_server_version = (21, 12, 0)
def check_result(self, rv, col_type):
data = (np.array(range(self.n)) % 2).astype(bool)
self.assertArraysEqual(rv[0], data)
self.assertEqual(rv[0].dtype, col_type)
def get_query(self, ch_type):
with self.create_table('a {}'.format(ch_type)):
data = [(np.array(range(self.n)) % 2).astype(bool)]
self.client.execute(
'INSERT INTO test (a) VALUES', data, columnar=True
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted, '\n'.join(str(x).lower() for x in data[0]) + '\n'
)
return self.client.execute(query, columnar=True)
def test_bool(self):
rv = self.get_query('Bool')
self.check_result(rv, np.bool_)
def test_insert_nan_into_non_nullable(self):
with self.create_table('a Bool'):
data = [
np.array([True, np.nan], dtype=object)
]
self.client.execute(
'INSERT INTO test (a) VALUES', data, columnar=True
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(
inserted,
'true\nfalse\n'
)
inserted = self.client.execute(query, columnar=True)
self.assertArraysEqual(inserted[0], np.array([True, 0]))
self.assertEqual(inserted[0].dtype, np.bool_)
def test_nullable(self):
with self.create_table('a Nullable(Bool)'):
data = [np.array([False, None, True, None, False])]
self.client.execute(
'INSERT INTO test (a) VALUES', data, columnar=True
)
query = 'SELECT * FROM test'
inserted = self.emit_cli(query)
self.assertEqual(inserted, 'false\n\\N\ntrue\n\\N\nfalse\n')
inserted = self.client.execute(query, columnar=True)
self.assertArraysEqual(inserted[0], data[0])
self.assertEqual(inserted[0].dtype, object)
|
18f8e65842177b58932ba049761bfc95e0b5655e
|
92432bcbe06e86d66d37fe527f5711a32f2f995a
|
/darknet/parser.py
|
65495d4d3f9290409df43a3371600b9ba5025843
|
[
"MIT"
] |
permissive
|
s045pd/DarkNet_ChineseTrading
|
de1e0ed83ec9d0a1555bb4f1b53b50ca402c58de
|
2911be68ca25b54ad4e84c9e470166c45e0860e4
|
refs/heads/new_site_2019
| 2023-08-18T06:25:11.347382
| 2022-10-11T02:18:16
| 2022-10-11T02:18:16
| 162,710,415
| 404
| 83
|
MIT
| 2021-08-25T00:45:24
| 2018-12-21T12:14:26
|
Python
|
UTF-8
|
Python
| false
| false
| 3,148
|
py
|
parser.py
|
import platform
import re
from io import BytesIO
from imgcat import imgcat
from PIL import Image
import ddddocr
from darknet.common import convert_num, error_log
from darknet.log import *
ocr = ddddocr.DdddOcr()
class Parser:
@staticmethod
@error_log()
def predict_captcha(func, path="/entrance/code76.php"):
"""
验证码识别在这里
"""
raw = func(path).content
if platform.system().upper() == "DARWIN":
imgcat(raw)
else:
Image.open(BytesIO(raw))
code = "".join(
re.findall("[0-9a-zA-Z]", ocr.classification(BytesIO(raw).read()))
).upper()
warning(f"predict: {code}")
return code
@staticmethod
@error_log()
def parse_summary(resp):
try:
trs = resp.html.find("table.u_ea_a > tr")[3:-1][::2]
for tr in trs:
href = tr.pq("td:nth-child(6) a").attr["href"]
yield debug(
(
href,
{
"sid": href.split("=")[-1],
"uptime": tr.pq("td:nth-child(2)").text(),
"user": tr.pq("td:nth-child(3)").text(),
"title": tr.pq("td:nth-child(4)").text(),
"priceBTC": convert_num(
tr.pq("td:nth-child(5)").text(), float
),
},
)
)
except Exception as e:
raise e
return []
@staticmethod
@error_log()
def parse_details(resp, img_func, types):
T = resp.html.pq(".v_table_1")
try:
return (
{
"priceUSDT": convert_num(
T("tr:nth-child(3) > td:nth-child(4) > span").text(),
float,
),
"status": resp.html.search("<td>交易状态:</td><td>{}</td>")[0],
"sold": resp.html.search("<td>本单成交:</td><td>{}</td>")[0],
"area": types,
"link": resp.url,
"lasttime": T(
".v_table_1 > tr:nth-child(5) > td:nth-child(6)"
).text(),
"text": resp.html.pq(".div_masterbox > t").text(),
},
{
"img": [
img_func(img.get("src"))
for img in resp.html.pq(".attachbox > img")
]
},
)
except Exception:
# breakpoint()
raise e
@staticmethod
@error_log()
def parse_max_page(resp, just_update=True):
page = 1
try:
page = (
convert_num(resp.html.pq(".button_page")[-1].text.strip(), int)
if not just_update
else 1
)
except Exception as e:
pass
finally:
info(f"max {page=}")
return page
|
3fa962e325ed9b4df80fe107383e5bc75aed890e
|
8de1480d6511ac81c43ebb1fa50875adb1505c3b
|
/awx/main/migrations/0162_alter_unifiedjob_dependent_jobs.py
|
94e91be43f4b30cc2cb8b7688b9e31c3ecefa713
|
[
"Apache-2.0"
] |
permissive
|
ansible/awx
|
bbbb0f3f43835a37fbb3eb3dcd7cfe98116fbbba
|
5e105c2cbd3fe828160540b3043cf6f605ed26be
|
refs/heads/devel
| 2023-08-31T11:45:01.446444
| 2023-08-31T04:58:57
| 2023-08-31T04:58:57
| 91,594,105
| 13,353
| 4,186
|
NOASSERTION
| 2023-09-14T20:20:07
| 2017-05-17T15:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 475
|
py
|
0162_alter_unifiedjob_dependent_jobs.py
|
# Generated by Django 3.2.13 on 2022-05-02 21:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0161_unifiedjob_host_status_counts'),
]
operations = [
migrations.AlterField(
model_name='unifiedjob',
name='dependent_jobs',
field=models.ManyToManyField(editable=False, related_name='unifiedjob_blocked_jobs', to='main.UnifiedJob'),
),
]
|
de1b6fbc4e6cdf3bf4ce0aed17bcdf8b53107a38
|
f82728b6f868a2275e63848f936d2c22354dc024
|
/Basic/Convert Decimal To Binary/SolutionByVaibhavTripathi.py
|
6b76278cd9e03fec621bf954e9491e4fff85c6db
|
[
"MIT"
] |
permissive
|
shoaibrayeen/Programmers-Community
|
0b0fa4ba1427fa472126e640cda52dff3416b939
|
cd0855e604cfd54101aaa9118f01a3e3b4c48c1c
|
refs/heads/master
| 2023-03-16T03:13:48.980900
| 2023-03-12T00:25:37
| 2023-03-12T00:25:37
| 209,997,598
| 285
| 424
|
MIT
| 2023-03-12T00:25:38
| 2019-09-21T14:17:24
|
C++
|
UTF-8
|
Python
| false
| false
| 187
|
py
|
SolutionByVaibhavTripathi.py
|
num = int(input('Enter a number: '))
def decimalToBinary(n):
if(n > 1):
decimalToBinary(n//2)
print(n%2, end='')
decimalToBinary(num)
|
fa7d056776ac078612a33250c5a76798d79eeb6b
|
67e76c827181fb3be7290ae13d58ef261e543924
|
/_old/URLChangeV2/urlchangev2.py
|
d1fca57194f69ecd6e2e12ea7d2a87d38d24dbab
|
[
"BSD-3-Clause"
] |
permissive
|
voussoir/reddit
|
d2a6dd53663cb89e02e35e5a75f5f4b2f5d83216
|
d928ca56a4ab54c6aff5250a228bf063c2a27818
|
refs/heads/master
| 2023-08-23T17:09:45.100744
| 2022-10-01T21:33:46
| 2022-10-01T21:33:46
| 20,117,004
| 477
| 205
| null | 2017-04-15T22:58:01
| 2014-05-23T23:19:46
|
HTML
|
UTF-8
|
Python
| false
| false
| 5,402
|
py
|
urlchangev2.py
|
#/u/GoldenSights
import praw # simple interface to the reddit API, also handles rate limiting of requests
import time
import sqlite3
'''USER CONFIGURATION'''
APP_ID = ""
APP_SECRET = ""
APP_URI = ""
APP_REFRESH = ""
# https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
SUBREDDIT = "GoldTesting"
#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..."
HEADER = "You have linked to a .gif file on GFYCat. Here are the webm links:\n\n"
#This will be at the top of the comment above the results
PARENTSTRING = ["http://giant.gfycat.com/", "http://fat.gfycat.com/", "http://zippy.gfycat.com/"]
#These are the words you are looking for
REPLACESTRING = "http://gfycat.com/"
#This is what parentstring gets replaced with.
MAXPOSTS = 100
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 10
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
IGNORESELF = False
#Can the bot account reply to itself?
'''All done!'''
PLEN = len(PARENTSTRING)
WAITS = str(WAIT)
try:
import bot
USERAGENT = bot.getaG()
except ImportError:
pass
sql = sqlite3.connect('sql.db')
print('Loaded SQL Database')
cur = sql.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT)')
cur.execute('CREATE INDEX IF NOT EXISTS oldpost_index ON oldposts(id)')
print('Loaded Completed table')
sql.commit()
r = praw.Reddit(USERAGENT)
r.set_oauth_app_info(APP_ID, APP_SECRET, APP_URI)
r.refresh_access_information(APP_REFRESH)
def scanPosts():
print('Searching '+ SUBREDDIT + ' submissions.')
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_new(limit=MAXPOSTS)
for post in posts:
result = []
pid = post.id
purl = post.url
if any (key.lower() in purl.lower() for key in PARENTSTRING):
cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid])
if not cur.fetchone():
print(pid)
try:
pauthor = post.author.name
if pauthor.lower() != r.user.name.lower() or IGNORESELF is False:
for key in PARENTSTRING:
if key in purl:
result.append(purl.replace(key, REPLACESTRING)[:-4])
break
else:
print('Will not reply to self.')
except ValueError:
print('Not a valid url')
except AttributeError:
print('Comment author does not exist')
except Exception:
print('Error.')
if len(result) > 0:
final = HEADER + '\n\n'.join(result)
print('\tCreating comment')
post.add_comment(final)
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
def scanComs():
print('Searching '+ SUBREDDIT + ' comments.')
subreddit = r.get_subreddit(SUBREDDIT)
posts = subreddit.get_comments(limit=MAXPOSTS)
for post in posts:
result = []
pid = post.id
pbody = post.body
if any (key.lower() in pbody.lower() for key in PARENTSTRING):
cur.execute('SELECT * FROM oldposts WHERE ID=?', [pid])
if not cur.fetchone():
pbodysplit = pbody.split()
print(pid)
for sent in pbodysplit:
if any(key.lower() in sent.lower() for key in PARENTSTRING):
try:
pauthor = post.author.name
if pauthor.lower() != r.user.name.lower() or IGNORESELF is False:
for key in PARENTSTRING:
if key in sent:
url = sent.replace(key, REPLACESTRING)
url = url.replace('.gif', '')
if '(' in url:
url = url[url.index('(')+1:]
url = url.replace(')', '')
result.append(url)
break
else:
print('Will not reply to self')
except ValueError:
print('Not a valid url')
except AttributeError:
print('Comment author does not exist')
except Exception:
print('Error.')
if len(result) > 0:
final = HEADER + '\n\n'.join(result)
print('\tCreating comment')
post.reply(final)
cur.execute('INSERT INTO oldposts VALUES(?)', [pid])
sql.commit()
while True:
try:
scanPosts()
scanComs()
except Exception as e:
print('An error has occured:', str(e))
print('Running again in ' + WAITS + ' seconds \n')
sql.commit()
time.sleep(WAIT)
|
14261f5a1d596f720c67f24f1401fb5c253db72d
|
e3b2af12e7ca91895e42401b2be32f9b731f226a
|
/openff/toolkit/_tests/test_parameters.py
|
10ca8f4fbeb38d4f7a9f924d84d6cd58438f03d0
|
[
"MIT"
] |
permissive
|
openforcefield/openff-toolkit
|
c2f4b654f6b47ea01c52fad9ea88134c3d381fa3
|
7fd1a2123b9895d1631d252aeb2708aedf5bfc4c
|
refs/heads/main
| 2023-09-01T14:04:01.282978
| 2023-08-30T19:34:52
| 2023-08-30T19:34:52
| 86,884,600
| 129
| 49
|
MIT
| 2023-09-14T00:40:57
| 2017-04-01T04:48:05
|
Python
|
UTF-8
|
Python
| false
| false
| 102,308
|
py
|
test_parameters.py
|
"""
Test classes and function in module openff.toolkit.typing.engines.smirnoff.parameters.
"""
from collections import defaultdict
from inspect import isabstract, isclass
from typing import Dict, List, Tuple
import numpy
import pytest
from numpy.testing import assert_almost_equal
from openff.units import unit
from packaging.version import Version
import openff.toolkit.typing.engines.smirnoff.parameters
from openff.toolkit._tests.mocking import VirtualSiteMocking
from openff.toolkit._tests.utils import does_not_raise
from openff.toolkit.topology import Molecule, Topology
from openff.toolkit.typing.engines.smirnoff import ForceField
from openff.toolkit.typing.engines.smirnoff.parameters import (
BondHandler,
ChargeIncrementModelHandler,
ElectrostaticsHandler,
GBSAHandler,
ImproperTorsionHandler,
IndexedParameterAttribute,
LibraryChargeHandler,
ParameterAttribute,
ParameterHandler,
ParameterList,
ParameterType,
ProperTorsionHandler,
VirtualSiteHandler,
_cal_mol_a2,
_linear_inter_or_extrapolate,
_ParameterAttributeHandler,
vdWHandler,
)
from openff.toolkit.utils.collections import ValidatedList
from openff.toolkit.utils.exceptions import (
DuplicateParameterError,
IncompatibleParameterError,
IncompatibleUnitError,
MissingIndexedAttributeError,
NotEnoughPointsForInterpolationError,
ParameterLookupError,
SMIRNOFFSpecError,
SMIRNOFFSpecUnimplementedError,
SMIRNOFFVersionError,
)
class TestParameterAttribute:
"""Test cases for the descriptor ParameterAttribute."""
def test_default_value(self):
"""Default values are assigned correctly on initialization."""
class MyParameter:
attr_optional = ParameterAttribute(default=2)
my_par = MyParameter()
assert my_par.attr_optional == 2
def test_none_default_value(self):
"""None is a valid default value for ParameterAttribute."""
class MyParameter:
attr_optional = ParameterAttribute(default=None)
my_par = MyParameter()
assert my_par.attr_optional is None
def test_required_value(self):
"""AttributeError is raised if a required attribute is accessed before being initialized."""
class MyParameter:
attr_required = ParameterAttribute()
my_par = MyParameter()
with pytest.raises(AttributeError):
my_par.attr_required
def test_unit_validation(self):
"""ParameterAttributes attached to a unit are validated correctly."""
class MyParameter:
attr_unit = ParameterAttribute(
unit=unit.kilocalorie / unit.mole / unit.angstrom**2
)
my_par = MyParameter()
# TypeError is raised when setting a unit-less value.
with pytest.raises(IncompatibleUnitError, match="should have units of"):
my_par.attr_unit = 3.0
# TypeError is raised when setting a value with incorrect units.
with pytest.raises(IncompatibleUnitError, match="should have units of"):
my_par.attr_unit = 3.0 * unit.kilocalorie / unit.mole
# Otherwise the attribute is assigned correctly.
value = 3.0 * unit.kilocalorie / unit.mole / unit.angstrom**2
my_par.attr_unit = value
assert my_par.attr_unit == value
assert my_par.attr_unit.units == value.units
def test_quantity_string_parsing(self):
"""ParameterAttributes attached to units convert strings into Quantity objects."""
class MyParameter:
attr_unit = ParameterAttribute(unit=unit.meter / unit.second**2)
my_par = MyParameter()
my_par.attr_unit = "3.0*meter/second**2"
assert my_par.attr_unit == 3.0 * unit.meter / unit.second**2
assert my_par.attr_unit.units == unit.meter / unit.second**2
# Assigning incorrect units still raises an error.
with pytest.raises(IncompatibleUnitError, match="should have units of"):
my_par.attr_unit = "3.0"
with pytest.raises(IncompatibleUnitError, match="should have units of"):
my_par.attr_unit = "3.0*meter/second"
def test_custom_converter(self):
"""Custom converters of ParameterAttributes are executed correctly."""
class MyParameter:
attr_all_to_float = ParameterAttribute(converter=float)
attr_int_to_float = ParameterAttribute()
@attr_int_to_float.converter
def attr_int_to_float(self, attr, value):
"""Convert only integers to float"""
if isinstance(value, int):
return float(value)
elif not isinstance(value, float):
raise TypeError()
return value
my_par = MyParameter()
# Both strings and integers are converted to floats when casted with float().
my_par.attr_all_to_float = "1.0"
assert (
isinstance(my_par.attr_all_to_float, float)
and my_par.attr_all_to_float == 1.0
)
my_par.attr_all_to_float = 2
assert (
isinstance(my_par.attr_all_to_float, float)
and my_par.attr_all_to_float == 2.0
)
# Only integers are converted with the custom converter function.
with pytest.raises(TypeError):
my_par.attr_int_to_float = "1.0"
my_par.attr_int_to_float = 2
assert (
isinstance(my_par.attr_int_to_float, float)
and my_par.attr_int_to_float == 2.0
)
def test_default_pass_validation(self):
"""The default value of ParameterAttribute is always allowed regardless of the validator/converter."""
class MyParameter:
attr = ParameterAttribute(
default=None, unit=unit.angstrom, converter=unit.Quantity
)
my_par = MyParameter()
my_par.attr = 3.0 * unit.nanometer
my_par.attr = None
assert my_par.attr is None
def test_get_descriptor_object(self):
"""When the descriptor is called from the class, the ParameterAttribute descriptor is returned."""
class MyParameter:
attr = ParameterAttribute()
assert isinstance(MyParameter.attr, ParameterAttribute)
class TestIndexedParameterAttribute:
"""Tests for the IndexedParameterAttribute descriptor."""
def test_tuple_conversion(self):
"""IndexedParameterAttribute converts internally sequences to ValidatedList."""
class MyParameter:
attr_indexed = IndexedParameterAttribute()
my_par = MyParameter()
my_par.attr_indexed = [1, 2, 3]
assert isinstance(my_par.attr_indexed, ValidatedList)
def test_indexed_default(self):
"""IndexedParameterAttribute handles default values correctly."""
class MyParameter:
attr_indexed_optional = IndexedParameterAttribute(default=None)
my_par = MyParameter()
assert my_par.attr_indexed_optional is None
# Assigning the default is allowed.
my_par.attr_indexed_optional = None
assert my_par.attr_indexed_optional is None
def test_units_on_all_elements(self):
"""IndexedParameterAttribute validates every single element of the sequence."""
class MyParameter:
attr_indexed_unit = IndexedParameterAttribute(unit=unit.gram)
my_par = MyParameter()
# Strings are correctly converted.
my_par.attr_indexed_unit = ["1.0*gram", 2 * unit.gram]
assert my_par.attr_indexed_unit == [1.0 * unit.gram, 2 * unit.gram]
# Incompatible units on a single elements are correctly caught.
with pytest.raises(IncompatibleUnitError, match="should have units of"):
my_par.attr_indexed_unit = [3.0, 2 * unit.gram]
with pytest.raises(IncompatibleUnitError, match="should have units of"):
my_par.attr_indexed_unit = [2 * unit.gram, 4.0 * unit.meter]
def test_converter_on_all_elements(self):
"""IndexedParameterAttribute calls custom converters on every single element of the sequence."""
class MyParameter:
attr_indexed_converter = IndexedParameterAttribute(converter=float)
my_par = MyParameter()
my_par.attr_indexed_converter = [1, "2.0", "1e-3", 4.0]
assert my_par.attr_indexed_converter == [1.0, 2.0, 1e-3, 4.0]
def test_validate_new_elements(self):
"""New elements set in the list are correctly validated."""
class MyParameter:
attr_indexed = IndexedParameterAttribute(converter=int)
my_par = MyParameter()
my_par.attr_indexed = (1, 2, 3)
# Modifying one or more elements of the list should validate them.
my_par.attr_indexed[2] = "4"
assert my_par.attr_indexed[2] == 4
my_par.attr_indexed[0:3] = ["2", "3", 4]
assert my_par.attr_indexed == [2, 3, 4]
# Same for append().
my_par.attr_indexed.append("5")
assert my_par.attr_indexed[3] == 5
# And extend.
my_par.attr_indexed.extend([6, "7"])
assert my_par.attr_indexed[5] == 7
my_par.attr_indexed += ["8", 9]
assert my_par.attr_indexed[6] == 8
# And insert.
my_par.attr_indexed.insert(5, "10")
assert my_par.attr_indexed[5] == 10
class TestInterpolation:
"""Test method(s) that are used for functionality like fractional bond order-dependent parameter interpolation"""
@pytest.mark.parametrize(
("fractional_bond_order", "k_interpolated"),
[(1.6, 1.48), (0.7, 0.76), (2.3, 2.04)],
)
def test_linear_inter_or_extrapolate(self, fractional_bond_order, k_interpolated):
"""Test that linear interpolation works as expected"""
k_bondorder = {
1: 1 * unit.kilocalorie / unit.mole,
2: 1.8 * unit.kilocalorie / unit.mole,
}
k = _linear_inter_or_extrapolate(k_bondorder, fractional_bond_order)
assert_almost_equal(k.m, k_interpolated)
def test_linear_inter_or_extrapolate_one_point(self):
"""Test that linear interpolation raises an error if attempted with just one point"""
k_bondorder = {
2: 1.8 * unit.kilocalorie / unit.mole,
}
with pytest.raises(NotEnoughPointsForInterpolationError):
_linear_inter_or_extrapolate(k_bondorder, 1)
@pytest.mark.parametrize(
("fractional_bond_order", "k_interpolated"),
[(1.6, 1.48), (0.7, 0.76), (2.3, 2.01), (3.1, 2.57)],
)
def test_linear_inter_or_extrapolate_3_terms(
self, fractional_bond_order, k_interpolated
):
"""Test that linear interpolation works as expected for three terms"""
k_bondorder = {
1: 1 * unit.kilocalorie / unit.mole,
2: 1.8 * unit.kilocalorie / unit.mole,
3: 2.5 * unit.kilocalorie / unit.mole,
}
k = _linear_inter_or_extrapolate(k_bondorder, fractional_bond_order)
assert_almost_equal(k.m, k_interpolated)
def test_linear_inter_or_extrapolate_below_zero(self):
"""Test that linear interpolation does not error if resulting k less than 0"""
k_bondorder = {
1: 1 * unit.kilocalorie / unit.mole,
2: 2.3 * unit.kilocalorie / unit.mole,
}
fractional_bond_order = 0.2
k = _linear_inter_or_extrapolate(k_bondorder, fractional_bond_order)
assert k.m < 0
class TestParameterAttributeHandler:
"""Test suite for the base class _ParameterAttributeHandler."""
def test_access_get_set_single_indexed_attribute_legacy(self):
"""Single indexed attributes such as k1 can be accessed through normal attribute syntax."""
class MyParameterType(_ParameterAttributeHandler):
k = IndexedParameterAttribute()
my_parameter = MyParameterType(k=[1, 2, 3])
# Getting the attribute works.
assert my_parameter.k1 == 1
assert my_parameter.k2 == 2
assert my_parameter.k3 == 3
# So does setting it.
my_parameter.k2 = 5
assert my_parameter.k2 == 5
assert my_parameter.k == [1, 5, 3]
# Accessing k4 raises an index error.
with pytest.raises(
IndexError, match="'k4' is out of bounds for indexed attribute 'k'"
):
my_parameter.k4
with pytest.raises(
IndexError, match="'k4' is out of bounds for indexed attribute 'k'"
):
my_parameter.k4 = 2
# For other attributes, the behavior is normal.
with pytest.raises(AttributeError, match="has no attribute 'x'"):
my_parameter.x
# Monkey-patching.
my_parameter.x = 3
def test_access_get_set_single_indexed_attribute(self):
"""Single indexed attributes such as k1 can be accessed through normal attribute syntax."""
class MyParameterType(_ParameterAttributeHandler):
k = IndexedParameterAttribute()
my_parameter = MyParameterType(k=[1, 2, 3])
# Getting the attribute works.
assert my_parameter.k1 == 1
assert my_parameter.k2 == 2
assert my_parameter.k3 == 3
# So does setting it.
my_parameter.k2 = 5
assert my_parameter.k2 == 5
assert my_parameter.k == [1, 5, 3]
# Accessing k4 raises an index error.
with pytest.raises(
MissingIndexedAttributeError,
match="'k4' is out of bounds for indexed attribute 'k'",
):
my_parameter.k4
with pytest.raises(
MissingIndexedAttributeError,
match="'k4' is out of bounds for indexed attribute 'k'",
):
my_parameter.k4 = 2
# For other attributes, the behavior is normal.
with pytest.raises(AttributeError, match="has no attribute 'x'"):
my_parameter.x
# Monkey-patching.
my_parameter.x = 3
def test_hasattr(self):
"""Single indexed attributes such as k1 can be accessed through normal attribute syntax."""
class MyParameterType(_ParameterAttributeHandler):
k = IndexedParameterAttribute()
my_parameter = MyParameterType(k=[1, 2, 3])
assert hasattr(my_parameter, "k3")
assert not hasattr(my_parameter, "k4")
def test_mro_access_get_set_single_indexed_attribute(self):
"""Attribute access is forwarded correctly to the next MRO classes."""
class MixIn:
"""Utility class to keep track of whether __get/setattr__ are called."""
data = {}
def __getattr__(self, item):
self.getattr_flag = True
try:
return self.data[item]
except KeyError:
raise AttributeError()
def __setattr__(self, key, value):
self.data[key] = value
super().__setattr__("setattr_flag", True)
def assert_getattr(self):
assert self.getattr_flag is True
self.getattr_flag = False
def assert_setattr(self):
assert self.setattr_flag is True
super().__setattr__("setattr_flag", False)
class MyParameterType(_ParameterAttributeHandler, MixIn):
k = IndexedParameterAttribute()
my_parameter = MyParameterType(k=[1, 2, 3])
# Non-existing parameters.
my_parameter.a = 2
my_parameter.assert_setattr()
my_parameter.a1 = 4
my_parameter.assert_setattr()
my_parameter.a
my_parameter.assert_getattr()
my_parameter.a1
my_parameter.assert_getattr()
class TestParameterHandler:
length = 1 * unit.angstrom
k = 10 * unit.kilocalorie / unit.mole / unit.angstrom**2
def test_tagname(self):
"""Test the TAGNAME getter and default behavior"""
ph = ParameterHandler(skip_version_check=True)
assert ph.TAGNAME is None
bh = BondHandler(skip_version_check=True)
assert bh.TAGNAME == "Bonds"
def test_add_parameter(self):
"""Test the behavior of add_parameter"""
bh = BondHandler(skip_version_check=True)
param1 = {
"smirks": "[*:1]-[*:2]",
"length": self.length,
"k": self.k,
"id": "b1",
}
param2 = {
"smirks": "[*:1]=[*:2]",
"length": self.length,
"k": self.k,
"id": "b2",
}
param3 = {
"smirks": "[*:1]#[*:2]",
"length": self.length,
"k": self.k,
"id": "b3",
}
bh.add_parameter(param1)
bh.add_parameter(param2)
bh.add_parameter(param3)
assert [p.id for p in bh._parameters] == ["b1", "b2", "b3"]
param_duplicate_smirks = {
"smirks": param2["smirks"],
"length": 2 * self.length,
"k": 2 * self.k,
}
# Ensure a duplicate parameter cannot be added
with pytest.raises(DuplicateParameterError):
bh.add_parameter(param_duplicate_smirks)
dict_to_add_by_smirks = {
"smirks": "[#1:1]-[#6:2]",
"length": self.length,
"k": self.k,
"id": "d1",
}
dict_to_add_by_index = {
"smirks": "[#1:1]-[#8:2]",
"length": self.length,
"k": self.k,
"id": "d2",
}
param_to_add_by_smirks = BondHandler.BondType(
**{
"smirks": "[#6:1]-[#6:2]",
"length": self.length,
"k": self.k,
"id": "p1",
}
)
param_to_add_by_index = BondHandler.BondType(
**{
"smirks": "[#6:1]=[#8:2]",
"length": self.length,
"k": self.k,
"id": "p2",
}
)
param_several_apart = {
"smirks": "[#1:1]-[#7:2]",
"length": self.length,
"k": self.k,
"id": "s0",
}
# The `before` parameter should come after the `after` parameter
# in the parameter list; i.e. in this list of ['-', '=', '#'], it is
# impossible to add a new parameter after '=' *and* before '-'
with pytest.raises(ValueError):
# Test invalid parameter order by SMIRKS
bh.add_parameter(
dict_to_add_by_smirks, after="[*:1]=[*:2]", before="[*:1]-[*:2]"
)
with pytest.raises(ValueError):
# Test invalid parameter order by index
bh.add_parameter(dict_to_add_by_index, after=1, before=0)
# Add d1 before param b2
bh.add_parameter(dict_to_add_by_smirks, before="[*:1]=[*:2]")
assert [p.id for p in bh._parameters] == ["b1", "d1", "b2", "b3"]
# Add d2 after index 2 (which is also param b2)
bh.add_parameter(dict_to_add_by_index, after=2)
assert [p.id for p in bh._parameters] == ["b1", "d1", "b2", "d2", "b3"]
# Add p1 before param b3
bh.add_parameter(parameter=param_to_add_by_smirks, before="[*:1]=[*:2]")
assert [p.id for p in bh._parameters] == ["b1", "d1", "p1", "b2", "d2", "b3"]
# Add p2 after index 2 (which is param p1)
bh.add_parameter(parameter=param_to_add_by_index, after=2)
assert [p.id for p in bh._parameters] == [
"b1",
"d1",
"p1",
"p2",
"b2",
"d2",
"b3",
]
# Add s0 between params that are several positions apart
bh.add_parameter(param_several_apart, after=1, before=6)
assert [p.id for p in bh._parameters] == [
"b1",
"d1",
"s0",
"p1",
"p2",
"b2",
"d2",
"b3",
]
def test_different_units_to_dict(self):
"""Test ParameterHandler.to_dict() function when some parameters are in
different units (proper behavior is to convert all quantities to the last-
read unit)
"""
bh = BondHandler(skip_version_check=True)
bh.add_parameter(
{
"smirks": "[*:1]-[*:2]",
"length": 1 * unit.angstrom,
"k": 10 * unit.kilocalorie / unit.mole / unit.angstrom**2,
}
)
bh.add_parameter(
{
"smirks": "[*:1]=[*:2]",
"length": 0.2 * unit.nanometer,
"k": 0.4 * unit.kilojoule / unit.mole / unit.nanometer**2,
}
)
bh_dict = bh.to_dict()
assert bh_dict["Bond"][0]["length"] == unit.Quantity(
value=1, units=unit.angstrom
)
assert bh_dict["Bond"][1]["length"] == unit.Quantity(
value=2, units=unit.angstrom
)
def test_to_dict_maintain_units(self):
"""Test ParameterHandler.to_dict() function when parameters were provided in different units"""
bh = BondHandler(skip_version_check=True)
bh.add_parameter(
{
"smirks": "[*:1]-[*:2]",
"length": 1 * unit.angstrom,
"k": 10 * unit.kilocalorie / unit.mole / unit.angstrom**2,
}
)
bh.add_parameter(
{
"smirks": "[*:1]=[*:2]",
"length": 0.2 * unit.nanometer,
"k": 0.4 * unit.kilojoule / unit.mole / unit.nanometer**2,
}
)
bh_dict = bh.to_dict()
assert bh_dict["Bond"][0]["length"] == unit.Quantity(1.0, unit.angstrom)
assert bh_dict["Bond"][0]["length"].units == unit.angstrom
assert bh_dict["Bond"][1]["length"] == unit.Quantity(0.2, unit.nanometer)
assert bh_dict["Bond"][1]["length"].units == unit.nanometer
def test_missing_section_version(self):
"""Test that exceptions are raised if invalid or improper section versions are provided during intialization"""
# Generate a SMIRNOFFSpecError by not providing a section version
with pytest.raises(
SMIRNOFFSpecError, match="Missing version while trying to construct"
):
ParameterHandler()
# Successfully create ParameterHandler by skipping version check
ParameterHandler(skip_version_check=True)
# Successfully create ParameterHandler by providing max supported version
ParameterHandler(version=ParameterHandler._MAX_SUPPORTED_SECTION_VERSION)
# Successfully create ParameterHandler by providing min supported version
ParameterHandler(version=ParameterHandler._MIN_SUPPORTED_SECTION_VERSION)
# Generate a SMIRNOFFSpecError by providing a value higher than the max supported
with pytest.raises(
SMIRNOFFVersionError,
match="SMIRNOFF offxml file was written with version 1000.0, "
"but this version of ForceField only supports version",
):
ParameterHandler(version="1000.0")
# Generate a SMIRNOFFSpecError by providing a value lower than the min supported
with pytest.raises(
SMIRNOFFVersionError,
match="SMIRNOFF offxml file was written with version 0.1, "
"but this version of ForceField only supports version",
):
ParameterHandler(version="0.1")
def test_supported_version_range(self):
"""
Ensure that version values in various formats can be correctly parsed and validated
"""
class MyPHSubclass(ParameterHandler):
_MIN_SUPPORTED_SECTION_VERSION = Version("0.3")
_MAX_SUPPORTED_SECTION_VERSION = Version("2")
with pytest.raises(SMIRNOFFVersionError):
MyPHSubclass(version=0.1)
with pytest.raises(Exception, match="Could not convert .*list"):
MyPHSubclass(version=[0])
MyPHSubclass(version=0.3)
MyPHSubclass(version=1)
MyPHSubclass(version="1.9")
MyPHSubclass(version=2.0)
with pytest.raises(SMIRNOFFVersionError):
MyPHSubclass(version=2.1)
def test_write_same_version_as_was_set(self):
"""Ensure that a ParameterHandler remembers the version that was set when it was initialized."""
class MyPHSubclass(ParameterHandler):
_MIN_SUPPORTED_SECTION_VERSION = Version("0.3")
_MAX_SUPPORTED_SECTION_VERSION = Version("2")
my_ph = MyPHSubclass(version=1.234)
assert my_ph.to_dict()["version"] == str(Version("1.234"))
def test_add_delete_cosmetic_attributes(self):
"""Test ParameterHandler.to_dict() function when some parameters are in
different units (proper behavior is to convert all quantities to the last-
read unit)
"""
bh = BondHandler(skip_version_check=True)
bh.add_parameter(
{
"smirks": "[*:1]-[*:2]",
"length": 1 * unit.angstrom,
"k": 10 * unit.kilocalorie / unit.mole / unit.angstrom**2,
}
)
bh.add_parameter(
{
"smirks": "[*:1]=[*:2]",
"length": 0.2 * unit.nanometer,
"k": 0.4 * unit.kilojoule / unit.mole / unit.nanometer**2,
}
)
assert not (bh.attribute_is_cosmetic("pilot"))
# Ensure the cosmetic attribute is present by default during output
bh.add_cosmetic_attribute("pilot", "alice")
param_dict = bh.to_dict()
assert ("pilot", "alice") in param_dict.items()
assert bh.attribute_is_cosmetic("pilot")
# Ensure the cosmetic attribute isn't present if we request that it be discarded
param_dict = bh.to_dict(discard_cosmetic_attributes=True)
assert "pilot" not in param_dict
# Manually delete the cosmetic attribute and ensure it doesn't get written out
bh.delete_cosmetic_attribute("pilot")
param_dict = bh.to_dict()
assert "pilot" not in param_dict
assert not (bh.attribute_is_cosmetic("pilot"))
def test_get_parameter(self):
"""Test that ParameterHandler.get_parameter can lookup function"""
bh = BondHandler(skip_version_check=True, allow_cosmetic_attributes=True)
bh.add_parameter(
{
"smirks": "[*:1]-[*:2]",
"length": 1 * unit.angstrom,
"k": 10 * unit.kilocalorie / unit.mole / unit.angstrom**2,
"id": "b0",
}
)
bh.parameters[0].add_cosmetic_attribute("foo", "bar")
# Check base behavior
params = bh.get_parameter({"smirks": "[*:1]-[*:2]"})
assert params[0].length == unit.Quantity(1.0, unit.angstrom)
assert params[0].k == unit.Quantity(
10.0, unit.kilocalorie / unit.mole / unit.angstrom**2
)
# Ensure a query with no matches returns an empty list
assert not bh.get_parameter({"smirks": "xyz"})
# Ensure searching for a nonexistent attr does not raise an exception
assert not bh.get_parameter({"bAdAttR": "0"})
# Check for optional and cosmetic attrs
optional_params = bh.get_parameter({"id": "b0"})
cosmetic_params = bh.get_parameter({"foo": "bar"})
assert optional_params[0].id == "b0"
assert cosmetic_params[0]._foo == "bar"
# Ensure selection behaves a "OR" not "AND"
bh.add_parameter(
{
"smirks": "[#1:1]-[#6:2]",
"length": 1 * unit.angstrom,
"k": 10 * unit.kilocalorie / unit.mole / unit.angstrom**2,
"id": "b1",
}
)
params = bh.get_parameter({"id": "b0", "smirks": "[#1:1]-[#6:2]"})
assert "b0" in [param.id for param in params]
assert "[*:1]-[*:2]" in [param.smirks for param in params]
# Ensure selection does not return duplicates if multiple matches
params = bh.get_parameter({"id": "b1", "smirks": "[#1:1]-[#6:2]"})
assert len(params) == 1
def test_create_force(self):
class MyParameterHandler(ParameterHandler):
pass
handler = MyParameterHandler(version=0.3)
with pytest.raises(
NotImplementedError, match="no longer create OpenMM forces."
):
handler.create_force()
class TestParameterList:
"""Test capabilities of ParameterList for accessing and manipulating SMIRNOFF parameter definitions."""
def test_create(self):
"""Test creation of a parameter list."""
p1 = ParameterType(smirks="[*:1]")
p2 = ParameterType(smirks="[#1:1]")
ParameterList([p1, p2])
def test_index(self):
"""
Tests the ParameterList.index() function by attempting lookups by SMIRKS and by ParameterType equivalence.
"""
p1 = ParameterType(smirks="[*:1]")
p2 = ParameterType(smirks="[#1:1]")
p3 = ParameterType(smirks="[#7:1]")
parameters = ParameterList([p1, p2, p3])
assert parameters.index(p1) == 0
assert parameters.index(p2) == 1
assert parameters.index(p3) == 2
assert parameters.index("[*:1]") == 0
assert parameters.index("[#1:1]") == 1
assert parameters.index("[#7:1]") == 2
with pytest.raises(
ParameterLookupError, match=r"SMIRKS \[#2:1\] not found in ParameterList"
):
parameters.index("[#2:1]")
p4 = ParameterType(smirks="[#2:1]")
with pytest.raises(ValueError, match="is not in list"):
parameters.index(p4)
def test_contains(self):
"""Test ParameterList __contains__ overloading."""
p1 = ParameterType(smirks="[*:1]")
p2 = ParameterType(smirks="[#1:1]")
p3 = ParameterType(smirks="[#7:1]")
parameters = ParameterList([p1, p2])
assert p1 in parameters
assert p2 in parameters
assert p3 not in parameters
assert p1.smirks in parameters
assert p2.smirks in parameters
assert p3.smirks not in parameters
def test_del(self):
"""
Test ParameterList __del__ overloading.
"""
p1 = ParameterType(smirks="[*:1]")
p2 = ParameterType(smirks="[#1:1]")
p3 = ParameterType(smirks="[#7:1]")
parameters = ParameterList([p1, p2, p3])
with pytest.raises(IndexError, match="list assignment index out of range"):
del parameters[4]
with pytest.raises(
ParameterLookupError,
match=r"SMIRKS \[#6:1\] not found in ParameterList",
):
del parameters["[#6:1]"]
# Test that original list deletion behavior is preserved.
del parameters[2]
assert len(parameters) == 2
assert p1 in parameters
assert p2 in parameters
assert p3 not in parameters
# Test that we can delete elements by their smirks.
del parameters["[#1:1]"]
assert len(parameters) == 1
assert p1 in parameters
assert p2 not in parameters
def test_append(self):
"""
Test ParameterList.append, ensuring that the new parameter was added to the bottom of the list
and that it is properly recorded as the most recently-added.
"""
p1 = ParameterType(smirks="[*:1]-[*:2]")
p2 = ParameterType(smirks="[*:1]=[*:2]")
param_list = ParameterList()
param_list.append(p1)
assert len(param_list) == 1
assert "[*:1]-[*:2]" in param_list
param_list.append(p2)
assert len(param_list) == 2
assert "[*:1]=[*:2]" in param_list
assert param_list[-1] == p2
def test_insert(self):
"""
Test ParameterList.insert, ensuring that the new parameter was added to the proper spot in
the list and that it is propertly recorded as the most recently added.
"""
p1 = ParameterType(smirks="[*:1]-[*:2]")
p2 = ParameterType(smirks="[*:1]=[*:2]")
p3 = ParameterType(smirks="[*:1]#[*:2]")
param_list = ParameterList([p1, p2])
param_list.insert(1, p3)
assert param_list[1] == p3
def test_extend(self):
"""
Test ParameterList.extend, ensuring that the new parameter was added to the proper spot in
the list and that it is propertly recorded as the most recently added.
"""
p1 = ParameterType(smirks="[*:1]-[*:2]")
p2 = ParameterType(smirks="[*:1]=[*:2]")
param_list1 = ParameterList()
param_list2 = ParameterList([p1, p2])
param_list1.extend(param_list2)
assert len(param_list1) == 2
assert "[*:1]-[*:2]" in param_list1
assert "[*:1]=[*:2]" in param_list1
assert param_list1[-1] == p2
def test_to_list(self):
"""Test basic ParameterList.to_list() function, ensuring units are preserved"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.01 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
p2 = BondHandler.BondType(
smirks="[*:1]=[*:2]",
length=1.02 * unit.angstrom,
k=6 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
p3 = BondHandler.BondType(
smirks="[*:1]#[*:3]",
length=1.03 * unit.angstrom,
k=7 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
parameter_list = ParameterList([p1, p2, p3])
ser_param_list = parameter_list.to_list()
assert len(ser_param_list) == 3
assert ser_param_list[0]["length"] == 1.01 * unit.angstrom
def test_round_trip(self):
"""Test basic ParameterList.to_list() function and constructor"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.01 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
p2 = BondHandler.BondType(
smirks="[*:1]=[*:2]",
length=1.02 * unit.angstrom,
k=6 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
p3 = BondHandler.BondType(
smirks="[*:1]#[*:3]",
length=1.03 * unit.angstrom,
k=7 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
parameter_list = ParameterList([p1, p2, p3])
param_dict_list = parameter_list.to_list()
parameter_list_2 = ParameterList()
for param_dict in param_dict_list:
new_parameter = BondHandler.BondType(**param_dict)
parameter_list_2.append(new_parameter)
assert parameter_list.to_list() == parameter_list_2.to_list()
class TestParameterType:
def test_find_all_parameter_attrs(self):
"""ParameterType find all ParameterAttributes in the declared order."""
class MyParameter(ParameterType):
attr = ParameterAttribute()
indexed = IndexedParameterAttribute()
parameter_attributes = MyParameter._get_parameter_attributes()
# The function should find also the parent's attributes and in the correct order.
expected_attributes = ["smirks", "id", "parent_id", "attr", "indexed"]
assert list(parameter_attributes.keys()) == expected_attributes
# The keys map to the descriptor instances.
assert type(parameter_attributes["attr"]) is ParameterAttribute
assert type(parameter_attributes["indexed"]) is IndexedParameterAttribute
def test_find_all_indexed_parameter_attrs(self):
"""ParameterType find all IndexedParameterAttributes."""
class MyParameter(ParameterType):
attr = ParameterAttribute()
indexed = IndexedParameterAttribute()
attr2 = ParameterAttribute()
indexed2 = IndexedParameterAttribute(default=None)
expected_names = ["indexed", "indexed2"]
parameter_attributes = MyParameter._get_indexed_parameter_attributes()
assert list(parameter_attributes.keys()) == expected_names
assert all(
isinstance(parameter_attributes[name], IndexedParameterAttribute)
for name in expected_names
)
def test_find_all_required_and_optional_parameter_attrs(self):
"""ParameterType distinguish between required and optional ParameterAttributes."""
class MyParameter(ParameterType):
required = ParameterAttribute()
optional = ParameterAttribute(default=1)
required_indexed = IndexedParameterAttribute()
optional_indexed2 = IndexedParameterAttribute(default=None)
expected_names = ["smirks", "required", "required_indexed"]
parameter_attributes = MyParameter._get_required_parameter_attributes()
assert list(parameter_attributes.keys()) == expected_names
expected_names = ["id", "parent_id", "optional", "optional_indexed2"]
parameter_attributes = MyParameter._get_optional_parameter_attributes()
assert list(parameter_attributes.keys()) == expected_names
def test_required_attribute_on_init(self):
"""ParameterType raises TypeError if a required attribute is not specified on construction."""
class MyParameter(ParameterType):
required = ParameterAttribute()
optional = ParameterAttribute(default=None)
with pytest.raises(
SMIRNOFFSpecError, match="require the following missing parameters"
):
MyParameter(smirks="[*:1]", optional=1)
def test_add_delete_cosmetic_attributes(self):
"""
Test ParameterType.add_cosmetic_attribute, delete_cosmetic_attribute,
attribute_is_cosmetic, and to_dict() functions for proper behavior
"""
class MyParameter(ParameterType):
required = ParameterAttribute()
my_par = MyParameter(smirks="[*:1]", required="aaa")
assert not (my_par.attribute_is_cosmetic("pilot"))
# Ensure the cosmetic attribute is present by default during output
my_par.add_cosmetic_attribute("pilot", "alice")
param_dict = my_par.to_dict()
assert ("pilot", "alice") in param_dict.items()
assert my_par.attribute_is_cosmetic("pilot")
# Ensure the cosmetic attribute isn't present if we request that it be discarded
param_dict = my_par.to_dict(discard_cosmetic_attributes=True)
assert "pilot" not in param_dict
# Manually delete the cosmetic attribute and ensure it doesn't get written out
my_par.delete_cosmetic_attribute("pilot")
param_dict = my_par.to_dict()
assert "pilot" not in param_dict
assert not (my_par.attribute_is_cosmetic("pilot"))
def test_indexed_attrs(self):
"""ParameterType handles indexed attributes correctly."""
class MyParameter(ParameterType):
a = IndexedParameterAttribute()
b = IndexedParameterAttribute()
my_par = MyParameter(smirks="[*:1]", a1=1, a3=3, a2=2, b1=4, b2=5, b3=6)
assert my_par.a == [1, 2, 3]
assert my_par.b == [4, 5, 6]
def test_sequence_init_indexed_attr(self):
"""ParameterType handle indexed attributes initialized with sequences correctly."""
class MyParameter(ParameterType):
a = IndexedParameterAttribute()
my_par = MyParameter(smirks="[*:1]", a=(1, 2))
assert my_par.a == [1, 2]
def test_same_length_indexed_attrs(self):
"""ParameterType raises TypeError if indexed attributes of different lengths are given."""
class MyParameter(ParameterType):
a = IndexedParameterAttribute()
b = IndexedParameterAttribute()
with pytest.raises(
TypeError, match="indexed attributes have different lengths"
):
MyParameter(smirks="[*:1]", a1=1, a2=2, a3=3, b1=1, b2=2)
def test_error_single_value_plus_index(self):
"""ParameterType raises an error if an indexed attribute is specified with and without index."""
class MyParameter(ParameterType):
a = IndexedParameterAttribute()
with pytest.raises(
TypeError, match="'a' has been specified with and without index"
):
MyParameter(smirks="[*:1]", a=[1], a1=2)
def test_find_all_defined_parameter_attrs(self):
"""ParameterType._get_defined_attributes() discards None default-value attributes."""
class MyParameter(ParameterType):
required1 = ParameterAttribute()
optional1 = ParameterAttribute(default=None)
optional2 = IndexedParameterAttribute(default=None)
optional3 = ParameterAttribute(default=5)
required2 = IndexedParameterAttribute()
optional4 = ParameterAttribute(default=2)
my_par = MyParameter(smirks="[*:1]", required1=0, optional1=10, required2=[0])
# _get_defined_parameter_attributes discards only the attribute
# that are set to None as a default value.
expected_names = [
"smirks",
"required1",
"required2",
"optional1",
"optional3",
"optional4",
]
parameter_attributes = my_par._get_defined_parameter_attributes()
assert list(parameter_attributes.keys()) == expected_names
def test_base_parametertype_to_dict(self):
"""
Test ParameterType to_dict.
"""
p1 = ParameterType(smirks="[*:1]")
param_dict = p1.to_dict()
assert param_dict["smirks"] == "[*:1]"
assert len(param_dict.keys()) == 1
def test_repr(self):
class NamedType(ParameterType):
pass
assert NamedType(smirks="[*:1]").__repr__().startswith("<NamedType")
class TestBondType:
"""Tests for the BondType class."""
def test_bondtype_to_dict(self):
"""
Test BondType to_dict.
"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.02 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
param_dict = p1.to_dict()
assert param_dict == {
"smirks": "[*:1]-[*:2]",
"length": unit.Quantity(1.02, unit.angstrom),
"k": unit.Quantity(5, unit.kilocalorie_per_mole / unit.angstrom**2),
}
def test_bondtype_partial_bondorders(self):
"""
Test the parsing of a BondType with k_bondorder1/2/3 definitions
"""
length = 1.4 * unit.angstrom
k1 = 101 * unit.kilocalorie / unit.mole / unit.angstrom**2
k2 = 202 * unit.kilocalorie / unit.mole / unit.angstrom**2
k3 = 303 * unit.kilocalorie / unit.mole / unit.angstrom**2
param = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=length,
k_bondorder1=k1,
k_bondorder2=k2,
k_bondorder3=k3,
)
assert param.k_bondorder == {1: k1, 2: k2, 3: k3}
def test_bondtype_bad_params(self):
"""
Test the over/underspecification of k/k_bondorderN are caught
"""
length = 1.4 * unit.angstrom
length1 = 1.5 * unit.angstrom
length2 = 1.3 * unit.angstrom
k = 50 * unit.kilocalorie / unit.mole / unit.angstrom**2
k1 = 101 * unit.kilocalorie / unit.mole / unit.angstrom**2
k2 = 202 * unit.kilocalorie / unit.mole / unit.angstrom**2
with pytest.raises(SMIRNOFFSpecError, match="Either k or k_bondorder"):
BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=length,
)
with pytest.raises(SMIRNOFFSpecError, match="BOTH k and k_bondorder"):
BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=length,
k=k,
k_bondorder1=k1,
k_bondorder2=k2,
)
with pytest.raises(
SMIRNOFFSpecError, match="Either length or length_bondorder"
):
BondHandler.BondType(
smirks="[*:1]-[*:2]",
k=k,
)
with pytest.raises(SMIRNOFFSpecError, match="BOTH length and length_bondorder"):
BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=length,
k=k,
length_bondorder1=length1,
length_bondorder2=length2,
)
def test_bondtype_to_dict_custom_output_units(self):
"""
Test BondType to_dict with custom output units.
"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.02 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
param_dict = p1.to_dict()
assert abs(param_dict["length"].m_as(unit.nanometer) - 0.102) < 1e-10
def test_read_write_optional_parameter_attribute(self):
"""
Test ParameterTypes' ability to store and write out optional attributes passed to __init__()
"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.02 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
id="b1",
)
param_dict = p1.to_dict()
assert ("id", "b1") in param_dict.items()
def test_read_write_cosmetic_parameter_attribute(self):
"""
Test ParameterTypes' ability to store and write out cosmetic attributes passed to __init__()
"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.02 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
pilot="alice",
allow_cosmetic_attributes=True,
)
param_dict = p1.to_dict(discard_cosmetic_attributes=False)
assert ("pilot", "alice") in param_dict.items()
def test_read_but_dont_write_cosmetic_parameter_attribute(self):
"""
Test ParameterTypes' ability to ignore cosmetic attributes passed to __init__() if instructed
"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.02 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
pilot="alice",
allow_cosmetic_attributes=True,
)
param_dict = p1.to_dict(discard_cosmetic_attributes=True)
assert ("pilot", "alice") not in param_dict.items()
def test_error_cosmetic_parameter_attribute(self):
"""
Test that ParameterTypes raise an error on receiving unexpected attributes passed to __init__()
"""
with pytest.raises(SMIRNOFFSpecError, match="Unexpected kwarg (pilot: alice)*"):
BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.02 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
pilot="alice",
allow_cosmetic_attributes=False,
)
def test_add_delete_cosmetic_attrib(self):
"""
Test adding and deleting cosmetic attributes for already-initialized ParameterType objects
"""
p1 = BondHandler.BondType(
smirks="[*:1]-[*:2]",
length=1.02 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
# Ensure the cosmetic attribute is present by default during output
p1.add_cosmetic_attribute("pilot", "alice")
param_dict = p1.to_dict()
assert ("pilot", "alice") in param_dict.items()
# Ensure the cosmetic attribute isn't present if we request that it be discarded
param_dict = p1.to_dict(discard_cosmetic_attributes=True)
assert ("pilot", "alice") not in param_dict.items()
# Manually delete the cosmetic attribute and ensure it doesn't get written out
p1.delete_cosmetic_attribute("pilot")
param_dict = p1.to_dict()
assert ("pilot", "alice") not in param_dict.items()
class TestBondHandler:
@pytest.mark.parametrize(
("fractional_bond_order", "k_interpolated", "length_interpolated"),
[
(1.0, 101, 1.4),
(1.5, 112.0, 1.35),
(1.99, 122.78, 1.301),
(2.1, 125.2, 1.29),
],
)
def test_linear_interpolate(
self, fractional_bond_order, k_interpolated, length_interpolated
):
"""Test that linear interpolation works as expected"""
k_bondorder = {
1: 101 * unit.kilocalorie / unit.mole / unit.angstrom**2,
2: 123 * unit.kilocalorie / unit.mole / unit.angstrom**2,
}
length_bondorder = {
1: 1.4 * unit.angstrom,
2: 1.3 * unit.angstrom,
}
k = _linear_inter_or_extrapolate(k_bondorder, fractional_bond_order)
length = _linear_inter_or_extrapolate(length_bondorder, fractional_bond_order)
assert_almost_equal(k.m, k_interpolated, 1)
assert_almost_equal(length.m, length_interpolated, 2)
def test_different_defaults_03_04(self):
"""Ensure that the 0.3 and 0.4 versions' defaults are correctly set"""
bh = BondHandler(version=0.3)
assert bh.fractional_bondorder_method == "none"
assert bh.potential == "harmonic"
bh2 = BondHandler(version=0.4)
assert bh2.fractional_bondorder_method == "AM1-Wiberg"
assert bh2.potential == "(k/2)*(r-length)^2"
bh3 = BondHandler(version=0.3, fractional_bondorder_method="AM1-Wiberg")
assert bh3.fractional_bondorder_method == "AM1-Wiberg"
assert bh3.fractional_bondorder_interpolation == "linear"
assert bh3.potential == "harmonic"
def test_harmonic_potentials_are_compatible(self):
"""
Ensure that handlers with `potential="harmonic"` evaluate as compatible with handlers with
potential="(k/2)*(r-length)^2"
"""
bh1 = BondHandler(skip_version_check=True)
bh2 = BondHandler(skip_version_check=True)
bh1.potential = "harmonic"
bh2.potential = "(k/2)*(r-length)^2"
# This comparison should pass, since the potentials defined above are compatible
bh1.check_handler_compatibility(bh2)
def test_am1_wiberg_combine_error(self):
"""Reproduce issue #719 and a suggested fix."""
v3 = BondHandler(version=0.3)
v4 = BondHandler(version=0.4)
v3.add_parameter(
parameter=BondHandler.BondType(
smirks="[#6:1]-[*:2]",
length=1.0 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
)
v4.add_parameter(
parameter=BondHandler.BondType(
smirks="[#8:1]-[*:2]",
length=2.0 * unit.angstrom,
k=5 * unit.kilocalorie / unit.mole / unit.angstrom**2,
)
)
ff_v3 = ForceField()
ff_v4 = ForceField()
ff_v3.register_parameter_handler(v3)
ff_v4.register_parameter_handler(v4)
with pytest.raises(
IncompatibleParameterError,
match="This likely results from mixing bond handlers",
):
ff_v3._load_smirnoff_data(ff_v4._to_smirnoff_data())
# Simulate a user following the recommendation to switch a
# version 0.3 BondHandler to use 'AM1-Wiberg'
ff_v3["Bonds"].fractional_bondorder_method = "AM1-Wiberg"
ff_v3._load_smirnoff_data(ff_v4._to_smirnoff_data())
class TestProperTorsionType:
"""Tests for the ProperTorsionType class."""
def test_single_term_proper_torsion(self):
"""
Test creation and serialization of a single-term proper torsion
"""
p1 = ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1=5 * unit.kilocalorie / unit.mole,
)
param_dict = p1.to_dict()
assert ("k1", 5 * unit.kilocalorie / unit.mole) in param_dict.items()
assert ("phase1", 30 * unit.degree) in param_dict.items()
assert ("periodicity1", 2) in param_dict.items()
assert "idivf" not in param_dict
def test_single_term_proper_torsion_w_idivf(self):
"""
Test creation and serialization of a single-term proper torsion
"""
p1 = ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1=5 * unit.kilocalorie / unit.mole,
idivf1=4,
)
param_dict = p1.to_dict()
assert ("k1", 5 * unit.kilocalorie / unit.mole) in param_dict.items()
assert ("phase1", 30 * unit.degree) in param_dict.items()
assert ("periodicity1", 2) in param_dict.items()
assert ("idivf1", 4) in param_dict.items()
def test_multi_term_proper_torsion(self):
"""
Test creation and serialization of a multi-term proper torsion
"""
p1 = ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1=5 * unit.kilocalorie / unit.mole,
phase2=31 * unit.degree,
periodicity2=3,
k2=6 * unit.kilocalorie / unit.mole,
)
param_dict = p1.to_dict()
assert param_dict["k1"] == 5 * unit.kilocalorie / unit.mole
assert param_dict["phase1"] == 30 * unit.degree
assert param_dict["periodicity1"] == 2
assert param_dict["k2"] == 6 * unit.kilocalorie / unit.mole
assert param_dict["phase2"] == 31 * unit.degree
assert param_dict["periodicity2"] == 3
def test_multi_term_proper_torsion_skip_index(self):
"""
Test creation and serialization of a multi-term proper torsion where
the indices are not consecutive and a SMIRNOFFSpecError is raised
"""
with pytest.raises(
SMIRNOFFSpecError, match=r"Unexpected kwarg \(phase3: 31 deg\)*."
):
ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1=5 * unit.kilocalorie / unit.mole,
phase3=31 * unit.degree,
periodicity3=3,
k3=6 * unit.kilocalorie / unit.mole,
)
def test_multi_term_proper_torsion_bad_units(self):
"""
Test creation and serialization of a multi-term proper torsion where
one of the terms has incorrect units
"""
with pytest.raises(IncompatibleUnitError, match="should have units of"):
ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1=5 * unit.kilocalorie / unit.mole,
phase2=31 * unit.angstrom, # This should be caught
periodicity2=3,
k2=6 * unit.kilocalorie / unit.mole,
)
def test_single_term_proper_torsion_bo(self):
"""
Test creation and serialization of a single-term proper torsion with bond order interpolation.
"""
p1 = ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]~[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1_bondorder1=1 * unit.kilocalorie / unit.mole,
k1_bondorder2=1.8 * unit.kilocalorie / unit.mole,
)
param_dict = p1.to_dict()
assert ("k1_bondorder1", 1 * unit.kilocalorie / unit.mole) in param_dict.items()
assert (
"k1_bondorder2",
1.8 * unit.kilocalorie / unit.mole,
) in param_dict.items()
assert ("phase1", 30 * unit.degree) in param_dict.items()
assert ("periodicity1", 2) in param_dict.items()
assert "idivf" not in param_dict
assert len(p1.k_bondorder) == 1
assert len(p1.k_bondorder[0]) == 2
assert {1, 2} == set(p1.k_bondorder[0].keys())
def test_single_term_proper_torsion_bo_w_idivf(self):
"""
Test creation and serialization of a single-term proper torsion with bond order interpolation.
With `idivf1` specified.
"""
p1 = ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1_bondorder1=1 * unit.kilocalorie / unit.mole,
k1_bondorder2=1.8 * unit.kilocalorie / unit.mole,
idivf1=4,
)
param_dict = p1.to_dict()
assert ("k1_bondorder1", 1 * unit.kilocalorie / unit.mole) in param_dict.items()
assert (
"k1_bondorder2",
1.8 * unit.kilocalorie / unit.mole,
) in param_dict.items()
assert ("phase1", 30 * unit.degree) in param_dict.items()
assert ("periodicity1", 2) in param_dict.items()
assert ("idivf1", 4) in param_dict.items()
def test_multi_term_proper_torsion_bo(self):
"""
Test creation and serialization of a multi-term proper torsion with bond order interpolation.
"""
p1 = ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1_bondorder1=1 * unit.kilocalorie / unit.mole,
k1_bondorder2=1.8 * unit.kilocalorie / unit.mole,
phase2=31 * unit.degree,
periodicity2=3,
k2_bondorder1=1.2 * unit.kilocalorie / unit.mole,
k2_bondorder2=1.9 * unit.kilocalorie / unit.mole,
)
param_dict = p1.to_dict()
assert param_dict["k1_bondorder1"] == 1 * unit.kilocalorie / unit.mole
assert param_dict["k1_bondorder2"] == 1.8 * unit.kilocalorie / unit.mole
assert param_dict["phase1"] == 30 * unit.degree
assert param_dict["periodicity1"] == 2
assert param_dict["k2_bondorder1"] == 1.2 * unit.kilocalorie / unit.mole
assert param_dict["k2_bondorder2"] == 1.9 * unit.kilocalorie / unit.mole
assert param_dict["phase2"] == 31 * unit.degree
assert param_dict["periodicity2"] == 3
def test_multi_term_proper_torsion_bo_getters_setters(self):
"""
Test getters and setters of a multi-term proper torsion with bond order interpolation.
"""
p1 = ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1_bondorder1=1 * unit.kilocalorie / unit.mole,
k1_bondorder2=1.8 * unit.kilocalorie / unit.mole,
phase2=31 * unit.degree,
periodicity2=3,
k2_bondorder1=1.2 * unit.kilocalorie / unit.mole,
k2_bondorder2=1.9 * unit.kilocalorie / unit.mole,
)
assert p1.k1_bondorder1 == 1.0 * unit.kilocalorie / unit.mole
p1.k1_bondorder1 = 2.0 * unit.kilocalorie / unit.mole
assert p1.k1_bondorder1 == 2.0 * unit.kilocalorie / unit.mole
assert p1.k2_bondorder2 == 1.9 * unit.kilocalorie / unit.mole
p1.k2_bondorder2 = 2.9 * unit.kilocalorie / unit.mole
assert p1.k2_bondorder2 == 2.9 * unit.kilocalorie / unit.mole
def test_multi_term_proper_torsion_bo_skip_index(self):
"""
Test creation and serialization of a multi-term proper torsion where
the indices are not consecutive and a SMIRNOFFSpecError is raised
AND we are doing bond order interpolation
"""
with pytest.raises(
SMIRNOFFSpecError, match=r"Unexpected kwarg \(k3_bondorder1*."
):
ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1_bondorder1=1 * unit.kilocalorie / unit.mole,
k1_bondorder2=1.8 * unit.kilocalorie / unit.mole,
phase3=31 * unit.degree,
periodicity3=3,
k3_bondorder1=1.2 * unit.kilocalorie / unit.mole,
k3_bondorder2=1.9 * unit.kilocalorie / unit.mole,
)
def test_single_term_single_bo_exception(self):
"""Test behavior where a single bond order term is specified for a single k"""
# raises no error, as checks are handled at parameterization
# we may add a `validate` method later that is called manually by user when they want it
ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]~[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1_bondorder1=1 * unit.kilocalorie / unit.mole,
)
def test_multi_term_single_bo_exception(self):
"""Test behavior where a single bond order term is specified for each of multiple k"""
# TODO : currently raises no error, as checks are handled at parameterization
# is this a spec thing that we should be checking?
# if so, it will be painful to implement
ProperTorsionHandler.ProperTorsionType(
smirks="[*:1]-[*:2]-[*:3]-[*:4]",
phase1=30 * unit.degree,
periodicity1=2,
k1_bondorder1=1 * unit.kilocalorie / unit.mole,
phase2=31 * unit.degree,
periodicity2=3,
k2_bondorder1=1.2 * unit.kilocalorie / unit.mole,
)
class TestProperTorsionHandler:
def test_torsion_handler_charmm_potential(self):
"""
Test creation of TorsionHandlers with the deprecated 0.2 potential value "charmm" instead of the current
supported potential value "fourier".
"""
import re
# Test creating ProperTorsionHandlers
err_msg = re.escape(
"Attempted to set ProperTorsionHandler.potential to charmm. Currently, "
"only the following values are supported: ['k*(1+cos(periodicity*theta-phase))']."
)
with pytest.raises(SMIRNOFFSpecError, match=err_msg):
ProperTorsionHandler(potential="charmm", skip_version_check=True)
ProperTorsionHandler(
potential="k*(1+cos(periodicity*theta-phase))", skip_version_check=True
)
# Same test, but with ImproperTorsionHandler
err_msg = re.escape(
"Attempted to set ImproperTorsionHandler.potential to charmm. Currently, "
"only the following values are supported: ['k*(1+cos(periodicity*theta-phase))']."
)
with pytest.raises(SMIRNOFFSpecError, match=err_msg):
ImproperTorsionHandler(potential="charmm", skip_version_check=True)
ImproperTorsionHandler(
potential="k*(1+cos(periodicity*theta-phase))", skip_version_check=True
)
class TestvdWHandler:
def test_add_param_str(self):
"""
Ensure that string input is supported, given the added complication that the
sigma/rmin_half setters silently set each other's value.
See https://github.com/openforcefield/openff-toolkit/issues/788
"""
vdw_handler = vdWHandler(version=0.4)
param1 = {
"epsilon": "0.5 * kilocalorie/mole",
"rmin_half": "1.2 * angstrom",
"smirks": "[*:1]",
"id": "n99",
}
param2 = {
"epsilon": "0.1 * kilocalorie/mole",
"sigma": "0.8 * angstrom",
"smirks": "[#1:1]",
"id": "n00",
}
vdw_handler.add_parameter(param1)
vdw_handler.add_parameter(param2)
assert vdw_handler.get_parameter({"smirks": "[*:1]"})[0].id == "n99"
assert vdw_handler.get_parameter({"smirks": "[#1:1]"})[0].id == "n00"
def test_set_invalid_scale_factor(self):
handler = vdWHandler(version=0.4)
with pytest.raises(SMIRNOFFSpecError, match="unable to handle scale12"):
handler.scale12 = 0.1
with pytest.raises(SMIRNOFFSpecError, match="unable to handle scale13"):
handler.scale13 = 0.1
with pytest.raises(SMIRNOFFSpecError, match="unable to handle scale15"):
handler.scale15 = 0.1
class TestvdWHandlerUpConversion:
"""
Test the implementation of OFF-EP-0008:
https://openforcefield.github.io/standards/enhancement-proposals/off-ep-0008/
"""
def test_upconversion(self):
converted = vdWHandler(version=0.3, method="cutoff")
new = vdWHandler(version=0.4)
assert converted.version == new.version == Version("0.4")
# Up-conversion from default (only) value of .method in 0.3 happens
# happens to match default values of new attributes version 0.4
assert converted.periodic_method == new.periodic_method == "cutoff"
assert converted.nonperiodic_method == new.nonperiodic_method == "no-cutoff"
try:
assert not hasattr(converted, "method")
except AttributeError:
# https://github.com/openforcefield/openff-toolkit/issues/1680
pytest.skip("ParameterAttribute.__delete__ not implemented")
def test_issue_1668(self):
"""Reproduce https://github.com/openforcefield/openff-toolkit/issues/1688"""
handler1 = vdWHandler(version=0.3)
handler2 = vdWHandler(version=0.3, method="cutoff")
assert handler1.version == handler2.version
assert handler1.periodic_method == handler2.periodic_method
assert handler1.nonperiodic_method == handler2.nonperiodic_method
def test_upconversion_unknown_kwarg(self):
with pytest.raises(
NotImplementedError,
match=r"Did not know.*`method=\"no-cutoff",
):
vdWHandler(
version=0.3,
method="no-cutoff",
)
def test_invalid_0_4_kwargs(self):
with pytest.raises(
SMIRNOFFSpecError,
match="removed in version 0.4 of the vdW",
):
vdWHandler(version=0.4, method="cutoff")
class TestvdWType:
"""
Test the behavior of vdWType
"""
def test_sigma_rmin_half(self):
"""Test the setter/getter behavior or sigma and rmin_half"""
from openff.toolkit.typing.engines.smirnoff.parameters import vdWHandler
data = {
"smirks": "[*:1]",
"rmin_half": 0.5 * unit.angstrom,
"epsilon": 0.5 * unit.kilocalorie / unit.mole,
}
param = vdWHandler.vdWType(**data)
assert param.sigma is not None
assert param.rmin_half is not None
assert numpy.isclose(
param.sigma.m_as(unit.angstrom),
(2.0 * param.rmin_half / 2 ** (1 / 6)).m_as(unit.angstrom),
)
assert "sigma" not in param.to_dict()
assert "rmin_half" in param.to_dict()
param.sigma = param.sigma
assert numpy.isclose(param.rmin_half.m_as(unit.angstrom), 0.5)
assert "sigma" in param.to_dict()
assert "rmin_half" not in param.to_dict()
param.rmin_half = param.rmin_half
assert numpy.isclose(
param.sigma.m_as(unit.angstrom),
(2.0 * param.rmin_half / 2 ** (1 / 6)).m_as(unit.angstrom),
)
assert "sigma" not in param.to_dict()
assert "rmin_half" in param.to_dict()
class TestElectrostaticsHandler:
def test_solvent_dielectric(self):
with pytest.raises(
SMIRNOFFSpecUnimplementedError,
match="make use of `solvent_d",
):
ElectrostaticsHandler(version=0.3, method="PME", solvent_dielectric=4)
handler = ElectrostaticsHandler(version=0.4)
assert handler.solvent_dielectric is None
handler.solvent_dielectric = None
with pytest.raises(
SMIRNOFFSpecUnimplementedError,
match="make use of `solvent_d",
):
handler.solvent_dielectric = 78.3
def test_unknown_periodic_potential(self):
handler = ElectrostaticsHandler(version=0.4)
with pytest.raises(
NotImplementedError,
match="unexpected periodic potential",
):
handler.periodic_potential = "PPPM"
def test_set_invalid_scale_factor(self):
handler = ElectrostaticsHandler(version=0.4)
with pytest.raises(SMIRNOFFSpecError, match="unable to handle scale12"):
handler.scale12 = 0.1
with pytest.raises(SMIRNOFFSpecError, match="unable to handle scale13"):
handler.scale13 = 0.1
with pytest.raises(SMIRNOFFSpecError, match="unable to handle scale15"):
handler.scale15 = 0.1
class TestElectrostaticsHandlerUpconversion:
"""
Test the implementation of OFF-EP-0005:
https://openforcefield.github.io/standards/enhancement-proposals/off-ep-0005/
"""
default_reaction_field_expression = (
"charge1*charge2/(4*pi*epsilon0)*(1/r + k_rf*r^2 - c_rf);"
"k_rf=(cutoff^(-3))*(solvent_dielectric-1)/(2*solvent_dielectric+1);"
"c_rf=cutoff^(-1)*(3*solvent_dielectric)/(2*solvent_dielectric+1)"
)
@pytest.mark.parametrize(
("old_method", "new_method"),
[
("PME", "Ewald3D-ConductingBoundary"),
("Coulomb", "Coulomb"),
("reaction-field", default_reaction_field_expression),
],
)
def test_upconversion(self, old_method, new_method):
handler = ElectrostaticsHandler(version=0.3, method=old_method)
assert handler.version == Version("0.4")
# Only `periodic_potential` is a function of the values in a version 0.3 handler ...
assert handler.periodic_potential == new_method
# ... for everything else, it's the same
assert handler.nonperiodic_potential == "Coulomb"
assert handler.exception_potential == "Coulomb"
def test_invalid_0_4_kwargs(self):
with pytest.raises(SMIRNOFFSpecError, match="removed in version 0.4 of the E"):
ElectrostaticsHandler(version=0.4, method="PME")
class TestVirtualSiteHandler:
"""
Test the creation of a VirtualSiteHandler and the implemented VirtualSiteTypes
"""
@pytest.mark.parametrize(
"parameter, expected_index",
[
(VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]"), 0),
(VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]"), 0),
(VirtualSiteMocking.divalent_parameter("[*:2][*:1][*:3]", "once"), 0),
(VirtualSiteMocking.trivalent_parameter("[*:1][*:2][*:3][*:4]"), 0),
],
)
def test_parent_index(self, parameter, expected_index):
assert parameter.parent_index == expected_index
assert (
VirtualSiteHandler.VirtualSiteType.type_to_parent_index(parameter.type)
== expected_index
)
@pytest.mark.parametrize(
"kwargs, expected_raises",
[
(
VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]").to_dict(),
does_not_raise(),
),
(
VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]").to_dict(),
does_not_raise(),
),
(
VirtualSiteMocking.divalent_parameter(
"[*:1][*:2][*:3]", match="once", angle=0.0 * unit.degrees
).to_dict(),
does_not_raise(),
),
(
VirtualSiteMocking.divalent_parameter(
"[*:1][*:2][*:3]",
match="all_permutations",
angle=2.0 * unit.degrees,
).to_dict(),
does_not_raise(),
),
(
VirtualSiteMocking.trivalent_parameter(
"[*:1][*:2][*:3][*:4]"
).to_dict(),
does_not_raise(),
),
# Validate `type`
(
{},
pytest.raises(SMIRNOFFSpecError, match="the `type` keyword is missing"),
),
(
{"type": "InvalidType"},
pytest.raises(
SMIRNOFFSpecError,
match="'InvalidType' is not a supported virtual site type",
),
),
# Validate `match`
(
{"type": "BondCharge"},
pytest.raises(
SMIRNOFFSpecError, match="the `match` keyword is missing"
),
),
(
{"type": "BondCharge", "match": "once"},
pytest.raises(
SMIRNOFFSpecError,
match="match='once' not supported with type='BondCharge'",
),
),
(
{"type": "BondCharge", "match": "once"},
pytest.raises(
SMIRNOFFSpecError,
match="match='once' not supported with type='BondCharge'",
),
),
(
{"type": "MonovalentLonePair", "match": "once"},
pytest.raises(
SMIRNOFFSpecError,
match="match='once' not supported with type='MonovalentLonePair'",
),
),
(
{
"type": "DivalentLonePair",
"match": "once",
"outOfPlaneAngle": 2.0 * unit.degrees,
},
pytest.raises(
SMIRNOFFSpecError,
match="match='once' not supported with "
"type='DivalentLonePair' and is_in_plane=False",
),
),
(
{"type": "TrivalentLonePair", "match": "all_permutations"},
pytest.raises(
SMIRNOFFSpecError,
match="match='all_permutations' not supported with "
"type='TrivalentLonePair'",
),
),
],
)
def test_add_default_init_kwargs_validation(self, kwargs, expected_raises):
with expected_raises:
VirtualSiteHandler.VirtualSiteType._add_default_init_kwargs(kwargs)
@pytest.mark.parametrize(
"kwargs, expected_kwargs",
[
(
{"type": "BondCharge", "match": "all_permutations"},
{
"type": "BondCharge",
"match": "all_permutations",
"outOfPlaneAngle": None,
"inPlaneAngle": None,
"sigma": 0.0 * unit.angstrom,
"epsilon": 0.0 * unit.kilocalorie_per_mole,
},
),
(
{
"type": "BondCharge",
"match": "all_permutations",
"rmin_half": 1.0 * unit.angstrom,
},
{
"type": "BondCharge",
"match": "all_permutations",
"outOfPlaneAngle": None,
"inPlaneAngle": None,
"rmin_half": 1.0 * unit.angstrom,
"epsilon": 0.0 * unit.kilocalorie_per_mole,
},
),
(
{"type": "MonovalentLonePair", "match": "all_permutations"},
{
"type": "MonovalentLonePair",
"match": "all_permutations",
"sigma": 0.0 * unit.angstrom,
"epsilon": 0.0 * unit.kilocalorie_per_mole,
},
),
],
)
def test_add_default_init_kwargs_values(self, kwargs, expected_kwargs):
assert kwargs != expected_kwargs
VirtualSiteHandler.VirtualSiteType._add_default_init_kwargs(kwargs)
assert kwargs == expected_kwargs
@pytest.mark.parametrize(
"parameter, in_plane_angle, expected_raises",
[
(
VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]"),
0.0 * unit.degrees,
pytest.raises(
SMIRNOFFSpecError,
match="'BondCharge' sites do not support `inPlaneAngle`",
),
),
(
VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]"),
None,
does_not_raise(),
),
(
VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]"),
1.0 * unit.angstrom,
pytest.raises(IncompatibleUnitError),
),
(
VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]"),
130.0 * unit.degrees,
does_not_raise(),
),
],
)
def test_in_plane_angle_converter(self, parameter, in_plane_angle, expected_raises):
parameter_dict = parameter.to_dict()
parameter_dict["inPlaneAngle"] = in_plane_angle
with expected_raises:
new_parameter = VirtualSiteHandler.VirtualSiteType(**parameter_dict)
assert new_parameter.inPlaneAngle == in_plane_angle
@pytest.mark.parametrize(
"parameter, out_of_plane_angle, expected_raises",
[
(
VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]"),
0.0 * unit.degrees,
pytest.raises(
SMIRNOFFSpecError,
match="'BondCharge' sites do not support `outOfPlaneAngle`",
),
),
(
VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]"),
None,
does_not_raise(),
),
(
VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]"),
130.0 * unit.degrees,
does_not_raise(),
),
],
)
def test_out_of_plane_angle_converter(
self, parameter, out_of_plane_angle, expected_raises
):
parameter_dict = parameter.to_dict()
parameter_dict["outOfPlaneAngle"] = out_of_plane_angle
with expected_raises:
new_parameter = VirtualSiteHandler.VirtualSiteType(**parameter_dict)
assert new_parameter.outOfPlaneAngle == out_of_plane_angle
def test_serialize_roundtrip(self):
force_field = ForceField()
handler = force_field.get_parameter_handler("VirtualSites")
handler.add_parameter(
parameter=VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]")
)
handler.add_parameter(
parameter=VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]")
)
handler.add_parameter(
parameter=VirtualSiteMocking.divalent_parameter("[*:2][*:1][*:3]", "once")
)
handler.add_parameter(
parameter=VirtualSiteMocking.trivalent_parameter("[*:1][*:2][*:3][*:4]")
)
offxml_string = force_field.to_string()
roundtripped_force_field = ForceField(offxml_string)
assert offxml_string == roundtripped_force_field.to_string()
@pytest.mark.parametrize(
"smiles, matched_indices, parameter, expected_raises",
[
(
"[Cl:1][H:2]",
(1, 2),
VirtualSiteMocking.bond_charge_parameter("[Cl:1][H:2]"),
does_not_raise(),
),
(
"[N:1]([H:2])([H:3])[H:4]",
(1, 2, 3),
VirtualSiteMocking.monovalent_parameter("[*:2][N:1][*:3]"),
pytest.raises(NotImplementedError, match="please describe what it is"),
),
],
)
def test_validate_found_match(
self, smiles, matched_indices, parameter, expected_raises
):
molecule = Molecule.from_smiles(smiles, allow_undefined_stereo=True)
topology: Topology = molecule.to_topology()
atoms = {i: atom for i, atom in enumerate(topology.atoms)}
with expected_raises:
VirtualSiteHandler._validate_found_match(atoms, matched_indices, parameter)
@pytest.mark.parametrize(
"handler_a, handler_b, expected_raises",
[
# Currently, no other test cases as only one `exclusion_policy` is supported
(
VirtualSiteHandler(version="0.3"),
VirtualSiteHandler(version="0.3"),
does_not_raise(),
)
],
)
def test_check_handler_compatability(self, handler_a, handler_b, expected_raises):
with expected_raises:
handler_a.check_handler_compatibility(handler_b)
@pytest.mark.parametrize(
"parameters, smiles, expected_matches",
[
# Check that a basic BondCharge virtual site can be applied
(
[VirtualSiteMocking.bond_charge_parameter("[Cl:1]-[C:2]")],
"[Cl:2][C:1]([H:3])([H:4])[H:5]",
{(1, 0): {("[Cl:1]-[C:2]", "EP")}},
),
# Check that two bond charge vsites with different names can be applied
# to the same atoms
(
[
VirtualSiteMocking.bond_charge_parameter("[Cl:1]-[C:2]", name="EP"),
VirtualSiteMocking.bond_charge_parameter("[Cl:1]-[C:2]", name="LP"),
],
"[Cl:1][C:2]([H:3])([H:4])[H:5]",
{(0, 1): {("[Cl:1]-[C:2]", "EP"), ("[Cl:1]-[C:2]", "LP")}},
),
# Check that a bond charge vsite can be applied to a symmetric moiety
(
[VirtualSiteMocking.bond_charge_parameter("[C:1]#[C:2]")],
"[H:1][C:2]#[C:3][Cl:4]",
{(1, 2): {("[C:1]#[C:2]", "EP")}, (2, 1): {("[C:1]#[C:2]", "EP")}},
),
# Check that a monovalent lone pair vsite can be applied to a relatively symmetric moiety
(
[VirtualSiteMocking.monovalent_parameter("[O:1]=[C:2]-[*:3]")],
"[O:2]=[C:1]([H:3])[Cl:4]",
{
(1, 0, 2): {("[O:1]=[C:2]-[*:3]", "EP")},
(1, 0, 3): {("[O:1]=[C:2]-[*:3]", "EP")},
},
),
# Check that monovalent lone pair vsites override correctly
(
[
VirtualSiteMocking.monovalent_parameter("[O:1]=[C:2]-[*:3]"),
VirtualSiteMocking.monovalent_parameter("[O:1]=[C:2]-[Cl:3]"),
],
"[O:2]=[C:1]([H:3])[Cl:4]",
{(1, 0, 3): {("[O:1]=[C:2]-[Cl:3]", "EP")}},
),
# Check that a single-particle in-plane divalent lone pair vsite
# can be applied
(
[
VirtualSiteMocking.divalent_parameter(
"[H:2][O:1][H:3]", match="once", angle=0.0 * unit.degree
)
],
"[H:1][O:2][H:3]",
{(1, 0, 2): {("[H:2][O:1][H:3]", "EP")}},
),
# Check that a two-particle out-of-plane divalent lone pair vsite can be applied
(
[
VirtualSiteMocking.divalent_parameter(
"[H:2][O:1][H:3]",
match="all_permutations",
angle=30.0 * unit.degree,
)
],
"[H:1][O:2][H:3]",
{
(1, 0, 2): {("[H:2][O:1][H:3]", "EP")},
(1, 2, 0): {("[H:2][O:1][H:3]", "EP")},
},
),
# Check that a single-particle in-plane divalent lone pair site can
# be applied to a symmetric molecule
(
[
VirtualSiteMocking.divalent_parameter(
"[*:2]-[N:1]~[*:3]", match="once", angle=0.0 * unit.degree
)
],
"[H:1][N:2]=[N:3][H:4]",
{
(1, 0, 2): {("[*:2]-[N:1]~[*:3]", "EP")},
(2, 3, 1): {("[*:2]-[N:1]~[*:3]", "EP")},
},
),
# Check that a two-particle out-of-plane divalent lone pair vsite
# can be applied to a symmetric molecule
(
[
VirtualSiteMocking.divalent_parameter(
"[*:2]~[N:1]~[*:3]",
match="all_permutations",
angle=30.0 * unit.degree,
)
],
"[H:1][N:2]=[N:3][H:4]",
{
(1, 0, 2): {("[*:2]~[N:1]~[*:3]", "EP")},
(1, 2, 0): {("[*:2]~[N:1]~[*:3]", "EP")},
(2, 1, 3): {("[*:2]~[N:1]~[*:3]", "EP")},
(2, 3, 1): {("[*:2]~[N:1]~[*:3]", "EP")},
},
),
# Check that a trivalent lone pair vsite can be applied to a simple molecule
(
[VirtualSiteMocking.trivalent_parameter("[N:1]([H:2])([H:3])[H:4]")],
"[N:2]([H:1])([H:3])[H:4]",
{(1, 0, 2, 3): {("[N:1]([H:2])([H:3])[H:4]", "EP")}},
),
],
)
def test_find_matches(
self,
parameters: List[VirtualSiteHandler.VirtualSiteType],
smiles: str,
expected_matches: Dict[Tuple[int, ...], List[Tuple[str, str]]],
):
molecule = Molecule.from_mapped_smiles(smiles, allow_undefined_stereo=True)
handler = VirtualSiteHandler(version="0.3")
for parameter in parameters:
handler.add_parameter(parameter=parameter)
matches = handler.find_matches(molecule.to_topology(), unique=False)
matched_smirks = defaultdict(set)
for match_list in matches.values():
for match in match_list:
matched_smirks[match.environment_match.topology_atom_indices].add(
(match.parameter_type.smirks, match.parameter_type.name)
)
assert {**matched_smirks} == expected_matches
def test_find_matches_multiple_molecules(self):
topology = Topology.from_molecules(
[
Molecule.from_mapped_smiles("[Cl:2][C:1]([H:3])([H:4])[H:5]"),
Molecule.from_mapped_smiles("[O:2]=[C:1]([H:3])[F:4]"),
]
)
handler = VirtualSiteHandler(version="0.3")
handler.add_parameter(
parameter=VirtualSiteMocking.bond_charge_parameter("[Cl:1]-[C:2]")
)
handler.add_parameter(
parameter=VirtualSiteMocking.monovalent_parameter("[O:1]=[C:2]-[*:3]")
)
matches = handler.find_matches(topology, unique=False)
matched_smirks = defaultdict(set)
for match_key in matches:
match_list: List = matches[match_key]
for match in match_list:
matched_smirks[match.environment_match.topology_atom_indices].add(
(match.parameter_type.smirks, match.parameter_type.name)
)
expected_matches = {
(1, 0): {("[Cl:1]-[C:2]", "EP")},
(6, 5, 7): {("[O:1]=[C:2]-[*:3]", "EP")},
(6, 5, 8): {("[O:1]=[C:2]-[*:3]", "EP")},
}
assert {**matched_smirks} == expected_matches
@pytest.mark.parametrize(
"query_parameter, query_key, expected_index",
[
(
VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]", name="LP"),
None,
2,
),
(None, ("BondCharge", "[*:1][*:2]", "EP"), 3),
(None, ("BondCharge", "[*:1][*:2]", "LP"), 1),
(None, ("MonovalentLonePair", "[*:1][*:2][*:3]", "EP"), 0),
(None, ("MonovalentLonePair", "[*:1][*:2][*:3]", "LP"), 2),
(None, ("DivalentLonePair", "[*:1][*:2][*:3]", "LP"), None),
],
)
def test_index_of_parameter(self, query_parameter, query_key, expected_index):
handler = VirtualSiteHandler(version="0.3")
for parameter in [
VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]", name="EP"),
VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]", name="LP"),
VirtualSiteMocking.monovalent_parameter("[*:1][*:2][*:3]", name="LP"),
VirtualSiteMocking.bond_charge_parameter("[*:1][*:2]", name="EP"),
]:
handler.add_parameter(parameter=parameter)
assert handler._index_of_parameter(query_parameter, query_key) == expected_index
def test_invalid_num_charge_increments(self):
with pytest.raises(
SMIRNOFFSpecError,
match="'BondCharge' virtual sites expect exactly 2 charge increments,",
):
VirtualSiteHandler.VirtualSiteType(
type="BondCharge",
smirks="[*]-[*]",
name="EP",
charge_increment=[0.1, 0.2, 0.3] * unit.elementary_charge,
match="all_permutations",
distance=2.0 * unit.angstrom,
)
class TestLibraryChargeHandler:
def test_create_library_charge_handler(self):
"""Test creation of an empty LibraryChargeHandler"""
LibraryChargeHandler(skip_version_check=True)
def test_library_charge_type_wrong_num_charges(self):
"""Ensure that an error is raised if a LibraryChargeType is initialized with a different number of
tagged atoms and charges"""
LibraryChargeHandler.LibraryChargeType(
smirks="[#6:1]-[#7:2]",
charge1=0.1 * unit.elementary_charge,
charge2=-0.1 * unit.elementary_charge,
)
LibraryChargeHandler.LibraryChargeType(
smirks="[#6:1]-[#7:2]-[#6]",
charge1=0.1 * unit.elementary_charge,
charge2=-0.1 * unit.elementary_charge,
)
with pytest.raises(
SMIRNOFFSpecError,
match="initialized with unequal number of tagged atoms and charges",
):
LibraryChargeHandler.LibraryChargeType(
smirks="[#6:1]-[#7:2]",
charge1=0.05 * unit.elementary_charge,
charge2=0.05 * unit.elementary_charge,
charge3=-0.1 * unit.elementary_charge,
)
with pytest.raises(
SMIRNOFFSpecError,
match="initialized with unequal number of tagged atoms and charges",
):
LibraryChargeHandler.LibraryChargeType(
smirks="[#6:1]-[#7:2]-[#6]",
charge1=0.05 * unit.elementary_charge,
charge2=0.05 * unit.elementary_charge,
charge3=-0.1 * unit.elementary_charge,
)
with pytest.raises(
SMIRNOFFSpecError,
match="initialized with unequal number of tagged atoms and charges",
):
LibraryChargeHandler.LibraryChargeType(
smirks="[#6:1]-[#7:2]-[#6]", charge1=0.05 * unit.elementary_charge
)
def test_library_charge_type_from_molecule(self):
mol = Molecule.from_smiles("CCO")
with pytest.raises(ValueError, match="missing partial"):
LibraryChargeHandler.LibraryChargeType.from_molecule(mol)
mol.partial_charges = numpy.linspace(-0.4, 0.4, 9) * unit.elementary_charge
library_charges = LibraryChargeHandler.LibraryChargeType.from_molecule(mol)
assert isinstance(library_charges, LibraryChargeHandler.LibraryChargeType)
assert library_charges.smirks == mol.to_smiles(mapped=True)
assert library_charges.charge == [*mol.partial_charges]
class TestChargeIncrementModelHandler:
def test_create_charge_increment_model_handler(self):
"""Test creation of ChargeIncrementModelHandlers"""
handler = ChargeIncrementModelHandler(skip_version_check=True)
assert handler.number_of_conformers == 1
assert handler.partial_charge_method == "AM1-Mulliken"
handler = ChargeIncrementModelHandler(
skip_version_check=True, number_of_conformers=10
)
handler = ChargeIncrementModelHandler(
skip_version_check=True, number_of_conformers=1
)
handler = ChargeIncrementModelHandler(
skip_version_check=True, number_of_conformers="10"
)
handler = ChargeIncrementModelHandler(
skip_version_check=True, number_of_conformers=0
)
handler = ChargeIncrementModelHandler(
skip_version_check=True, number_of_conformers="0"
)
with pytest.raises(TypeError):
handler = ChargeIncrementModelHandler(
skip_version_check=True, number_of_conformers=None
)
with pytest.raises(SMIRNOFFSpecError):
handler = ChargeIncrementModelHandler(
skip_version_check=True, n_conformers=[10]
)
handler = ChargeIncrementModelHandler(
skip_version_check=True, partial_charge_method="AM1-Mulliken"
)
handler = ChargeIncrementModelHandler(
skip_version_check=True, partial_charge_method="Gasteiger"
)
handler = ChargeIncrementModelHandler(
skip_version_check=True, partial_charge_method=None
)
def test_charge_increment_model_handler_getters_setters(self):
"""Test ChargeIncrementModelHandler getters and setters"""
handler = ChargeIncrementModelHandler(skip_version_check=True)
assert handler.number_of_conformers == 1
assert handler.partial_charge_method == "AM1-Mulliken"
handler.number_of_conformers = 2
assert handler.number_of_conformers == 2
handler.number_of_conformers = "3"
assert handler.number_of_conformers == 3
with pytest.raises(ValueError):
handler.number_of_conformers = "string that can't be cast to int"
def test_charge_increment_model_handlers_are_compatible(self):
"""Test creation of ChargeIncrementModelHandlers"""
handler1 = ChargeIncrementModelHandler(skip_version_check=True)
handler2 = ChargeIncrementModelHandler(skip_version_check=True)
handler1.check_handler_compatibility(handler2)
handler3 = ChargeIncrementModelHandler(
skip_version_check=True, number_of_conformers="9"
)
with pytest.raises(IncompatibleParameterError):
handler1.check_handler_compatibility(handler3)
def test_charge_increment_type_wrong_num_increments(self):
"""Ensure that an error is raised if a ChargeIncrementType is initialized with a different number of
tagged atoms and chargeincrements"""
ChargeIncrementModelHandler.ChargeIncrementType(
smirks="[#6:1]-[#7:2]",
charge_increment1=0.1 * unit.elementary_charge,
charge_increment2=-0.1 * unit.elementary_charge,
)
ChargeIncrementModelHandler.ChargeIncrementType(
smirks="[#6:1]-[#7:2]-[#6]",
charge_increment1=0.1 * unit.elementary_charge,
charge_increment2=-0.1 * unit.elementary_charge,
)
with pytest.raises(
SMIRNOFFSpecError,
match="an invalid combination of tagged atoms and charge increments",
):
ChargeIncrementModelHandler.ChargeIncrementType(
smirks="[#6:1]-[#7:2]",
charge_increment1=0.05 * unit.elementary_charge,
charge_increment2=0.05 * unit.elementary_charge,
charge_increment3=-0.1 * unit.elementary_charge,
)
with pytest.raises(
SMIRNOFFSpecError,
match="an invalid combination of tagged atoms and charge increments",
):
ChargeIncrementModelHandler.ChargeIncrementType(
smirks="[#6:1]-[#7:2]-[#6]",
charge_increment1=0.05 * unit.elementary_charge,
charge_increment2=0.05 * unit.elementary_charge,
charge_increment3=-0.1 * unit.elementary_charge,
)
ChargeIncrementModelHandler.ChargeIncrementType(
smirks="[#6:1]-[#7:2]-[#6]",
charge_increment1=0.05 * unit.elementary_charge,
)
def test_charge_increment_one_ci_missing(self):
"""Test creating a chargeincrement parameter with a missing value"""
ChargeIncrementModelHandler.ChargeIncrementType(
smirks="[*:1]-[*:2]",
charge_increment=[0.1 * unit.elementary_charge],
)
ChargeIncrementModelHandler.ChargeIncrementType(
smirks="[*:1]-[*:2]",
charge_increment=[
0.1 * unit.elementary_charge,
-0.1 * unit.elementary_charge,
],
)
class TestGBSAHandler:
def test_create_default_gbsahandler(self):
"""Test creation of an empty GBSAHandler, with all default attributes"""
gbsa_handler = GBSAHandler(skip_version_check=True)
assert gbsa_handler.gb_model == "OBC1"
assert gbsa_handler.solvent_dielectric == 78.5
assert gbsa_handler.solute_dielectric == 1
assert gbsa_handler.sa_model == "ACE"
assert gbsa_handler.surface_area_penalty == 5.4 * _cal_mol_a2
assert gbsa_handler.solvent_radius == 1.4 * unit.angstrom
def test_gbsahandler_setters(self):
"""Test creation of an empty GBSAHandler, with all default attributes"""
gbsa_handler = GBSAHandler(skip_version_check=True)
gbsa_handler.gb_model = "OBC2"
gbsa_handler.gb_model = "HCT"
gbsa_handler.gb_model = "OBC1"
with pytest.raises(SMIRNOFFSpecError):
gbsa_handler.gb_model = "Something invalid"
gbsa_handler.solvent_dielectric = 50.0
gbsa_handler.solvent_dielectric = "50.0"
with pytest.raises(ValueError):
gbsa_handler.solvent_dielectric = "string that can not be cast to float"
gbsa_handler.solute_dielectric = 2.5
gbsa_handler.solute_dielectric = "3.5"
with pytest.raises(ValueError):
gbsa_handler.solute_dielectric = "string that can not be cast to float"
gbsa_handler.sa_model = "ACE"
# NOTE -- Right now, the SMIRNOFF spec will implicitly assume these are the same.
gbsa_handler.sa_model = None
gbsa_handler.sa_model = "None"
with pytest.raises(TypeError):
gbsa_handler.sa_model = "Invalid SA option"
gbsa_handler.surface_area_penalty = (
1.23 * unit.kilocalorie / unit.mole / unit.nanometer**2
)
with pytest.raises(IncompatibleUnitError):
gbsa_handler.surface_area_penalty = (
1.23 * unit.degree / unit.mole / unit.nanometer**2
)
gbsa_handler.solvent_radius = 300 * unit.femtometer
with pytest.raises(IncompatibleUnitError):
gbsa_handler.solvent_radius = 3000 * unit.radian
def test_gbsahandlers_are_compatible(self):
"""
Test the check_handler_compatibility function of GBSAHandler
"""
gbsa_handler_1 = GBSAHandler(skip_version_check=True)
gbsa_handler_2 = GBSAHandler(skip_version_check=True)
# Perform a check which should pass
gbsa_handler_1.check_handler_compatibility(gbsa_handler_2)
# Perform a check which should fail
gbsa_handler_3 = GBSAHandler(
skip_version_check=True, solvent_radius=1.3 * unit.angstrom
)
with pytest.raises(
IncompatibleParameterError, match="Difference between 'solvent_radius' "
):
gbsa_handler_1.check_handler_compatibility(gbsa_handler_3)
class TestParameterTypeReExports:
def test_parametertype_reexports(self):
params_module = openff.toolkit.typing.engines.smirnoff.parameters
def subclass_attrs(obj, classinfo):
"""Iterate over members of ``obj`` that are concrete, public subclasses of ``classinfo``"""
return filter(
lambda nv: ( # (name, value)
isclass(nv[1])
and issubclass(nv[1], classinfo)
and not isabstract(nv[1])
and not nv[0].startswith("_")
),
vars(obj).items(),
)
for _, paramhandler in subclass_attrs(params_module, ParameterHandler):
for paramtype_name, paramtype in subclass_attrs(
paramhandler, ParameterType
):
assert paramtype_name in vars(params_module), (
f"ParameterType {paramtype_name!r} is "
f"not re-exported from parameters module"
)
assert vars(params_module)[paramtype_name] is paramtype, (
f"Exported attribute parameters.{paramtype_name} "
f"does not match ParameterType {paramtype_name!r}"
)
assert paramtype_name in params_module.__all__, (
f"ParameterType {paramtype_name!r} "
f"missing from parameters.__all__"
)
# TODO: test_(all attributes of all ParameterTypes)
# TODO: test_add_parameter_fractional_bondorder
# TODO: test_get_indexed_attrib
# TODO: test_set_unitbearing_attrib (requires implementing __getattr__ and __setattr__)
# TODO: test_parametertype_unit_getattr
# TODO: test_parametertype_unit_setattr
# TODO: test_optional_attribs
# TODO: test_optional_indexed_attribs
# TODO: test_(X)handler_compatibility, where X is all handlers
|
facf672b9cbd869516a5a52aaf254ca8c2fa788d
|
3a6a211ea0d32405497fbd6486c490bb147e25f9
|
/telemetry/telemetry/internal/testing/dependency_test_dir/other_animals/moose/moose/horn/horn_object.py
|
1a883eff0748c7fd596cbfa27ee4310f737dda1b
|
[
"BSD-3-Clause"
] |
permissive
|
catapult-project/catapult
|
e2cbdd5eb89f3b1492fc8752494e62ea1df4bae0
|
53102de187a48ac2cfc241fef54dcbc29c453a8e
|
refs/heads/main
| 2021-05-25T07:37:22.832505
| 2021-05-24T08:01:49
| 2021-05-25T06:07:38
| 33,947,548
| 2,032
| 742
|
BSD-3-Clause
| 2022-08-26T16:01:18
| 2015-04-14T17:49:05
|
HTML
|
UTF-8
|
Python
| false
| false
| 218
|
py
|
horn_object.py
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Horn(object):
def IsBig(self):
return True
|
06d5cc975fddf8c59ed3d0716f508fc8db4e37c9
|
f487532281c1c6a36a5c62a29744d8323584891b
|
/sdk/python/pulumi_azure/loganalytics/cluster.py
|
8bf4cca86815016905662054b5b3abbf466ce322
|
[
"BSD-3-Clause",
"MPL-2.0",
"Apache-2.0"
] |
permissive
|
pulumi/pulumi-azure
|
a8f8f21c46c802aecf1397c737662ddcc438a2db
|
c16962e5c4f5810efec2806b8bb49d0da960d1ea
|
refs/heads/master
| 2023-08-25T00:17:05.290397
| 2023-08-24T06:11:55
| 2023-08-24T06:11:55
| 103,183,737
| 129
| 57
|
Apache-2.0
| 2023-09-13T05:44:10
| 2017-09-11T20:19:15
|
Java
|
UTF-8
|
Python
| false
| false
| 24,389
|
py
|
cluster.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ClusterArgs', 'Cluster']
@pulumi.input_type
class ClusterArgs:
def __init__(__self__, *,
identity: pulumi.Input['ClusterIdentityArgs'],
resource_group_name: pulumi.Input[str],
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
size_gb: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Cluster resource.
:param pulumi.Input['ClusterIdentityArgs'] identity: An `identity` block as defined below. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] location: The Azure Region where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] name: The name which should be used for this Log Analytics Cluster. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[int] size_gb: The capacity of the Log Analytics Cluster is specified in GB/day. Possible values include `500`, `1000`, `2000` or `5000`. Defaults to `1000`.
> **NOTE:** The cluster capacity must start at 500 GB and can be set to 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicated clusters](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/cost-logs#dedicated-clusters). In v3.x the default value is `1000` GB, in v4.0 of the provider this will default to `500` GB.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Log Analytics Cluster.
"""
pulumi.set(__self__, "identity", identity)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if size_gb is not None:
pulumi.set(__self__, "size_gb", size_gb)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def identity(self) -> pulumi.Input['ClusterIdentityArgs']:
"""
An `identity` block as defined below. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: pulumi.Input['ClusterIdentityArgs']):
pulumi.set(self, "identity", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the Resource Group where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Log Analytics Cluster. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="sizeGb")
def size_gb(self) -> Optional[pulumi.Input[int]]:
"""
The capacity of the Log Analytics Cluster is specified in GB/day. Possible values include `500`, `1000`, `2000` or `5000`. Defaults to `1000`.
> **NOTE:** The cluster capacity must start at 500 GB and can be set to 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicated clusters](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/cost-logs#dedicated-clusters). In v3.x the default value is `1000` GB, in v4.0 of the provider this will default to `500` GB.
"""
return pulumi.get(self, "size_gb")
@size_gb.setter
def size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size_gb", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Log Analytics Cluster.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ClusterState:
def __init__(__self__, *,
cluster_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input['ClusterIdentityArgs']] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
size_gb: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Cluster resources.
:param pulumi.Input[str] cluster_id: The GUID of the cluster.
:param pulumi.Input['ClusterIdentityArgs'] identity: An `identity` block as defined below. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] location: The Azure Region where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] name: The name which should be used for this Log Analytics Cluster. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[int] size_gb: The capacity of the Log Analytics Cluster is specified in GB/day. Possible values include `500`, `1000`, `2000` or `5000`. Defaults to `1000`.
> **NOTE:** The cluster capacity must start at 500 GB and can be set to 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicated clusters](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/cost-logs#dedicated-clusters). In v3.x the default value is `1000` GB, in v4.0 of the provider this will default to `500` GB.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Log Analytics Cluster.
"""
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if identity is not None:
pulumi.set(__self__, "identity", identity)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if size_gb is not None:
pulumi.set(__self__, "size_gb", size_gb)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The GUID of the cluster.
"""
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter
def identity(self) -> Optional[pulumi.Input['ClusterIdentityArgs']]:
"""
An `identity` block as defined below. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "identity")
@identity.setter
def identity(self, value: Optional[pulumi.Input['ClusterIdentityArgs']]):
pulumi.set(self, "identity", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
The Azure Region where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name which should be used for this Log Analytics Cluster. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the Resource Group where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="sizeGb")
def size_gb(self) -> Optional[pulumi.Input[int]]:
"""
The capacity of the Log Analytics Cluster is specified in GB/day. Possible values include `500`, `1000`, `2000` or `5000`. Defaults to `1000`.
> **NOTE:** The cluster capacity must start at 500 GB and can be set to 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicated clusters](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/cost-logs#dedicated-clusters). In v3.x the default value is `1000` GB, in v4.0 of the provider this will default to `500` GB.
"""
return pulumi.get(self, "size_gb")
@size_gb.setter
def size_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "size_gb", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags which should be assigned to the Log Analytics Cluster.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class Cluster(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
size_gb: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
> **Note:** Log Analytics Clusters are subject to 14-day soft delete policy. Clusters created with the same resource group & name as a previously deleted cluster will be recovered rather than creating anew.
Manages a Log Analytics Cluster.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_cluster = azure.loganalytics.Cluster("exampleCluster",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
identity=azure.loganalytics.ClusterIdentityArgs(
type="SystemAssigned",
))
```
## Import
Log Analytics Clusters can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/cluster:Cluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.OperationalInsights/clusters/cluster1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ClusterIdentityArgs']] identity: An `identity` block as defined below. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] location: The Azure Region where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] name: The name which should be used for this Log Analytics Cluster. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[int] size_gb: The capacity of the Log Analytics Cluster is specified in GB/day. Possible values include `500`, `1000`, `2000` or `5000`. Defaults to `1000`.
> **NOTE:** The cluster capacity must start at 500 GB and can be set to 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicated clusters](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/cost-logs#dedicated-clusters). In v3.x the default value is `1000` GB, in v4.0 of the provider this will default to `500` GB.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Log Analytics Cluster.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ClusterArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
> **Note:** Log Analytics Clusters are subject to 14-day soft delete policy. Clusters created with the same resource group & name as a previously deleted cluster will be recovered rather than creating anew.
Manages a Log Analytics Cluster.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_cluster = azure.loganalytics.Cluster("exampleCluster",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
identity=azure.loganalytics.ClusterIdentityArgs(
type="SystemAssigned",
))
```
## Import
Log Analytics Clusters can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:loganalytics/cluster:Cluster example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.OperationalInsights/clusters/cluster1
```
:param str resource_name: The name of the resource.
:param ClusterArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ClusterArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
size_gb: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ClusterArgs.__new__(ClusterArgs)
if identity is None and not opts.urn:
raise TypeError("Missing required property 'identity'")
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["size_gb"] = size_gb
__props__.__dict__["tags"] = tags
__props__.__dict__["cluster_id"] = None
super(Cluster, __self__).__init__(
'azure:loganalytics/cluster:Cluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
identity: Optional[pulumi.Input[pulumi.InputType['ClusterIdentityArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
size_gb: Optional[pulumi.Input[int]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Cluster':
"""
Get an existing Cluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_id: The GUID of the cluster.
:param pulumi.Input[pulumi.InputType['ClusterIdentityArgs']] identity: An `identity` block as defined below. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] location: The Azure Region where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] name: The name which should be used for this Log Analytics Cluster. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[str] resource_group_name: The name of the Resource Group where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
:param pulumi.Input[int] size_gb: The capacity of the Log Analytics Cluster is specified in GB/day. Possible values include `500`, `1000`, `2000` or `5000`. Defaults to `1000`.
> **NOTE:** The cluster capacity must start at 500 GB and can be set to 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicated clusters](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/cost-logs#dedicated-clusters). In v3.x the default value is `1000` GB, in v4.0 of the provider this will default to `500` GB.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags which should be assigned to the Log Analytics Cluster.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ClusterState.__new__(_ClusterState)
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["identity"] = identity
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["size_gb"] = size_gb
__props__.__dict__["tags"] = tags
return Cluster(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[str]:
"""
The GUID of the cluster.
"""
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter
def identity(self) -> pulumi.Output['outputs.ClusterIdentity']:
"""
An `identity` block as defined below. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
The Azure Region where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name which should be used for this Log Analytics Cluster. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the Resource Group where the Log Analytics Cluster should exist. Changing this forces a new Log Analytics Cluster to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="sizeGb")
def size_gb(self) -> pulumi.Output[Optional[int]]:
"""
The capacity of the Log Analytics Cluster is specified in GB/day. Possible values include `500`, `1000`, `2000` or `5000`. Defaults to `1000`.
> **NOTE:** The cluster capacity must start at 500 GB and can be set to 1000, 2000 or 5000 GB/day. For more information on cluster costs, see [Dedicated clusters](https://docs.microsoft.com/en-us/azure/azure-monitor/logs/cost-logs#dedicated-clusters). In v3.x the default value is `1000` GB, in v4.0 of the provider this will default to `500` GB.
"""
return pulumi.get(self, "size_gb")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags which should be assigned to the Log Analytics Cluster.
"""
return pulumi.get(self, "tags")
|
ee750241d1f1b25bd6ac63a8e27cccd357fccb63
|
5c00bd92979f6e20038926ec45068fe8e6a61565
|
/mushroom_rl/algorithms/policy_search/black_box_optimization/pgpe.py
|
9f504fca96e68626571de7494eb654c0a539fa21
|
[
"MIT"
] |
permissive
|
MushroomRL/mushroom-rl
|
2bf34ce38664a114ad37dc0468e1721e048359ab
|
2decae31459a3481130afe1263bc0a5ba7954a99
|
refs/heads/dev
| 2023-08-30T16:33:56.100589
| 2023-08-05T15:24:13
| 2023-08-05T15:24:13
| 83,158,675
| 477
| 128
|
MIT
| 2023-08-27T20:30:43
| 2017-02-25T19:59:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,902
|
py
|
pgpe.py
|
import numpy as np
from mushroom_rl.algorithms.policy_search.black_box_optimization import BlackBoxOptimization
class PGPE(BlackBoxOptimization):
"""
Policy Gradient with Parameter Exploration algorithm.
"A Survey on Policy Search for Robotics", Deisenroth M. P., Neumann G.,
Peters J.. 2013.
"""
def __init__(self, mdp_info, distribution, policy, optimizer,
features=None):
"""
Constructor.
Args:
optimizer: the gradient step optimizer.
"""
self.optimizer = optimizer
self._add_save_attr(optimizer='mushroom')
super().__init__(mdp_info, distribution, policy, features)
def _update(self, Jep, theta):
baseline_num_list = list()
baseline_den_list = list()
diff_log_dist_list = list()
# Compute derivatives of distribution and baseline components
for i in range(len(Jep)):
J_i = Jep[i]
theta_i = theta[i]
diff_log_dist = self.distribution.diff_log(theta_i)
diff_log_dist2 = diff_log_dist**2
diff_log_dist_list.append(diff_log_dist)
baseline_num_list.append(J_i * diff_log_dist2)
baseline_den_list.append(diff_log_dist2)
# Compute baseline
baseline = np.mean(baseline_num_list, axis=0) / \
np.mean(baseline_den_list, axis=0)
baseline[np.logical_not(np.isfinite(baseline))] = 0.
# Compute gradient
grad_J_list = list()
for i in range(len(Jep)):
diff_log_dist = diff_log_dist_list[i]
J_i = Jep[i]
grad_J_list.append(diff_log_dist * (J_i - baseline))
grad_J = np.mean(grad_J_list, axis=0)
omega_old = self.distribution.get_parameters()
omega_new = self.optimizer(omega_old, grad_J)
self.distribution.set_parameters(omega_new)
|
6dfd1553ac3ac689c8a7459122af0820b1bf9843
|
507103d591ed6993203db92fd8dffc992e8bcd5c
|
/k2/python/host/tests/properties_test.py
|
c34eb2cbe27121eb684d0b5496ff31a067a2d088
|
[
"Apache-2.0"
] |
permissive
|
k2-fsa/k2
|
6e661bd505f06583af779f4249bbb8ea87a0d662
|
2b2ac14b326d61d79d04e53fbd69b1ff6d630411
|
refs/heads/master
| 2023-09-03T11:57:13.505432
| 2023-08-23T21:58:26
| 2023-08-23T21:58:26
| 256,463,281
| 851
| 192
|
Apache-2.0
| 2023-08-26T06:51:21
| 2020-04-17T09:44:05
|
Cuda
|
UTF-8
|
Python
| false
| false
| 8,162
|
py
|
properties_test.py
|
#!/usr/bin/env python3
#
# Copyright 2020 Xiaomi Corporation (author: Haowen Qiu)
#
# See ../../../LICENSE for clarification regarding multiple authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# To run this single test, use
#
# ctest --verbose -R host_properties_test_py
#
import unittest
import torch
import k2host
class TestIsValid(unittest.TestCase):
def test_bad_case1(self):
# fsa should contain at least two states
array_size = k2host.IntArray2Size(1, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertFalse(k2host.is_valid(fsa))
def test_bad_case2(self):
# only kFinalSymbol arcs enter the final state
s = r'''
0 1 0 0
0 2 1 0
1 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_valid(fsa))
def test_bad_case3(self):
# `arc_indexes` and `arcs` in this state are not consistent
arc_indexes = torch.IntTensor([0, 2, 2, 2])
arcs = torch.IntTensor([[0, 1, 0, 0], [0, 2, 1, 0], [1, 2, 0, 0]])
fsa = k2host.Fsa(arc_indexes, arcs)
self.assertFalse(k2host.is_valid(fsa))
def test_good_cases1(self):
# empty fsa is valid
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_valid(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 2 0 0
2 3 -1 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_valid(fsa))
def test_good_case3(self):
s = r'''
0 1 0 0
0 2 -1 0
1 2 -1 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_valid(fsa))
class TestIsTopSorted(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 0 0
0 2 0 0
2 1 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_top_sorted(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_top_sorted(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 2 0 0
1 2 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_top_sorted(fsa))
class TestIsArcSorted(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 1 0
0 2 2 0
1 2 2 0
1 3 1 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_arc_sorted(fsa))
def test_bad_cases2(self):
# same label on two arcs
s = r'''
0 2 0 0
0 1 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_arc_sorted(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_arc_sorted(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 2 0 0
1 2 1 0
1 3 2 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_arc_sorted(fsa))
class TestHasSelfLoops(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 0 0
0 2 0 0
1 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.has_self_loops(fsa))
def test_bad_cases2(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertFalse(k2host.has_self_loops(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
1 2 0 0
1 1 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.has_self_loops(fsa))
class TestIsDeterministic(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 2 0
1 2 0 0
1 3 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_deterministic(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_deterministic(fsa))
def test_good_case2(self):
s = r'''
0 1 2 0
1 2 0 0
1 3 2 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_deterministic(fsa))
class TestIsEpsilonFree(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 2 0
0 2 0 0
1 2 1 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_epsilon_free(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_epsilon_free(fsa))
def test_good_case2(self):
s = r'''
0 1 2 0
0 2 1 0
1 2 1 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_epsilon_free(fsa))
class TestIsConnected(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_connected(fsa))
def test_bad_cases2(self):
s = r'''
0 1 0 0
0 2 0 0
2
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_connected(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_connected(fsa))
def test_good_case2(self):
s = r'''
0 1 0 0
0 3 0 0
1 2 0 0
2 3 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_connected(fsa))
def test_good_case3(self):
s = r'''
0 3 0 0
1 2 0 0
2 3 0 0
2 3 0 0
2 4 0 0
3 1 0 0
4
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_connected(fsa))
class TestIsAcyclic(unittest.TestCase):
def test_bad_cases1(self):
s = r'''
0 1 2 0
0 4 0 0
0 2 0 0
1 2 1 0
1 3 0 0
2 1 0 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_acyclic(fsa))
def test_good_cases1(self):
# empty fsa
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_acyclic(fsa))
def test_good_case2(self):
s = r'''
0 1 2 0
0 2 1 0
1 2 0 0
1 3 5 0
2 3 6 0
3
'''
fsa = k2host.str_to_fsa(s)
self.assertTrue(k2host.is_acyclic(fsa))
class TestIsEmpty(unittest.TestCase):
def test_good_cases1(self):
array_size = k2host.IntArray2Size(0, 0)
fsa = k2host.Fsa.create_fsa_with_size(array_size)
self.assertTrue(k2host.is_empty(fsa))
def test_bad_case1(self):
s = r'''
0 1 2 0
1
'''
fsa = k2host.str_to_fsa(s)
self.assertFalse(k2host.is_empty(fsa))
if __name__ == '__main__':
unittest.main()
|
d8eddc22616deb418cf6836af94374c9c3aeb854
|
aee26a4c731a84481a499679c3d4cef9ec954aed
|
/tacker/vnfm/policy_actions/respawn/respawn.py
|
1a89d0b81e0e7679ffaca52a9382a47a65875b1f
|
[
"Apache-2.0"
] |
permissive
|
openstack/tacker
|
6976cbee3afadfd9390849b56da2837feb93e912
|
9c7918f0b501cdeaffae40f585b76fc92b8e196e
|
refs/heads/master
| 2023-09-04T01:22:43.106241
| 2023-08-31T00:06:42
| 2023-08-31T00:42:20
| 21,259,951
| 125
| 172
|
Apache-2.0
| 2021-05-09T06:13:08
| 2014-06-27T01:11:56
|
Python
|
UTF-8
|
Python
| false
| false
| 3,613
|
py
|
respawn.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from oslo_log import log as logging
from tacker.plugins.common import constants
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.policy_actions import abstract_action
from tacker.vnfm import utils as vnfm_utils
from tacker.vnfm import vim_client
LOG = logging.getLogger(__name__)
class VNFActionRespawn(abstract_action.AbstractPolicyAction):
def get_type(self):
return 'respawn'
def get_name(self):
return 'respawn'
def get_description(self):
return 'Tacker VNF respawning policy'
def execute_action(self, plugin, context, vnf_dict, args):
vnf_id = vnf_dict['id']
LOG.info('vnf %s is dead and needs to be respawned', vnf_id)
attributes = vnf_dict['attributes']
vim_id = vnf_dict['vim_id']
def _update_failure_count():
failure_count = int(attributes.get('failure_count', '0')) + 1
failure_count_str = str(failure_count)
LOG.debug("vnf %(vnf_id)s failure count %(failure_count)s",
{'vnf_id': vnf_id, 'failure_count': failure_count_str})
attributes['failure_count'] = failure_count_str
attributes['dead_instance_id_' + failure_count_str] = vnf_dict[
'instance_id']
def _fetch_vim(vim_uuid):
vim_res = vim_client.VimClient().get_vim(context, vim_uuid)
return vim_res
def _delete_heat_stack(vim_auth):
placement_attr = vnf_dict.get('placement_attr', {})
region_name = placement_attr.get('region_name')
heatclient = hc.HeatClient(auth_attr=vim_auth,
region_name=region_name)
heatclient.delete(vnf_dict['instance_id'])
LOG.debug("Heat stack %s delete initiated",
vnf_dict['instance_id'])
vnfm_utils.log_events(context, vnf_dict,
constants.RES_EVT_MONITOR,
"ActionRespawnHeat invoked")
def _respawn_vnf():
update_vnf_dict = plugin.create_vnf_sync(context, vnf_dict)
LOG.info('respawned new vnf %s', update_vnf_dict['id'])
plugin.config_vnf(context, update_vnf_dict)
return update_vnf_dict
if plugin._mark_vnf_dead(vnf_dict['id']):
_update_failure_count()
vim_res = _fetch_vim(vim_id)
if vnf_dict['attributes'].get('monitoring_policy'):
plugin._vnf_monitor.mark_dead(vnf_dict['id'])
_delete_heat_stack(vim_res['vim_auth'])
updated_vnf = _respawn_vnf()
plugin.add_vnf_to_monitor(context, updated_vnf)
LOG.debug("VNF %s added to monitor thread",
updated_vnf['id'])
if vnf_dict['attributes'].get('alarming_policy'):
_delete_heat_stack(vim_res['vim_auth'])
vnf_dict['attributes'].pop('alarming_policy')
_respawn_vnf()
|
7dd4916ae86f5e33f43fb509fa1cbac449ee64a8
|
f3806d9fb54773908cd9704121a543b114470aca
|
/angr/state_plugins/heap/heap_ptmalloc.py
|
d9e4a7aed9c688dfce7f8751e28a72a2857cb118
|
[
"BSD-2-Clause"
] |
permissive
|
angr/angr
|
8ae95fceca51b0a001de56477d984dd01193ac1d
|
37e8ca1c3308ec601ad1d7c6bc8081ff38a7cffd
|
refs/heads/master
| 2023-08-17T03:15:21.007865
| 2023-08-15T18:44:57
| 2023-08-15T18:44:57
| 40,328,394
| 7,184
| 1,306
|
BSD-2-Clause
| 2023-09-14T20:14:23
| 2015-08-06T21:46:55
|
Python
|
UTF-8
|
Python
| false
| false
| 28,967
|
py
|
heap_ptmalloc.py
|
from ..plugin import SimStatePlugin
from .heap_freelist import SimHeapFreelist, Chunk
from .utils import concretize
from ...errors import SimHeapError, SimMergeError, SimSolverError
import logging
l = logging.getLogger("angr.state_plugins.heap.heap_ptmalloc")
sml = logging.getLogger("angr.state_plugins.symbolic_memory")
CHUNK_FLAGS_MASK = 0x07
CHUNK_P_MASK = 0x01
# These are included as sometimes the heap will touch uninitialized locations, which normally causes a warning
def silence_logger():
level = sml.getEffectiveLevel()
sml.setLevel("ERROR")
return level
def unsilence_logger(level):
sml.setLevel(level)
class PTChunk(Chunk):
"""
A chunk, inspired by the implementation of chunks in ptmalloc. Provides a representation of a chunk via a view into
the memory plugin. For the chunk definitions and docs that this was loosely based off of, see glibc malloc/malloc.c,
line 1033, as of commit 5a580643111ef6081be7b4c7bd1997a5447c903f. Alternatively, take the following link.
https://sourceware.org/git/?p=glibc.git;a=blob;f=malloc/malloc.c;h=67cdfd0ad2f003964cd0f7dfe3bcd85ca98528a7;hb=5a580643111ef6081be7b4c7bd1997a5447c903f#l1033
:ivar base: the location of the base of the chunk in memory
:ivar state: the program state that the chunk is resident in
:ivar heap: the heap plugin that the chunk is managed by
"""
def __init__(self, base, sim_state, heap=None):
super().__init__(base, sim_state)
# This is necessary since the heap can't always be referenced through the state, e.g. during heap initialization
self.heap = self.state.heap if heap is None else heap
# Size in bytes of the type used to store a piece of metadata
self._chunk_size_t_size = self.heap._chunk_size_t_size
self._chunk_min_size = self.heap._chunk_min_size
self._chunk_align_mask = self.heap._chunk_align_mask
def get_size(self):
return self.state.memory.load(self.base + self._chunk_size_t_size, self._chunk_size_t_size) & ~CHUNK_FLAGS_MASK
def get_data_size(self):
chunk_size = self.get_size()
if self.is_free():
return chunk_size - 4 * self._chunk_size_t_size
else:
return chunk_size - 2 * self._chunk_size_t_size
def _set_leading_size(self, size):
level = silence_logger()
chunk_flags = (
self.state.memory.load(self.base + self._chunk_size_t_size, self._chunk_size_t_size) & CHUNK_FLAGS_MASK
)
unsilence_logger(level)
self.state.memory.store(self.base + self._chunk_size_t_size, size | chunk_flags, size=self.state.arch.bytes)
def _set_trailing_size(self, size):
if self.is_free():
next_chunk = self.next_chunk()
if next_chunk is not None:
self.state.memory.store(next_chunk.base, size, self.state.arch.bytes)
def set_size(self, size, is_free=None): # pylint:disable=arguments-differ
"""
Use this to set the size on a chunk. When the chunk is new (such as when a free chunk is shrunk to form an
allocated chunk and a remainder free chunk) it is recommended that the is_free hint be used since setting the
size depends on the chunk's freeness, and vice versa.
:param size: size of the chunk
:param is_free: boolean indicating the chunk's freeness
"""
self._set_leading_size(size)
next_chunk = self.next_chunk()
if is_free is not None:
if next_chunk is not None:
next_chunk.set_prev_freeness(is_free)
else:
self.heap._set_final_freeness(is_free)
if is_free is not None and is_free or self.is_free():
if next_chunk is not None:
self.state.memory.store(next_chunk.base, size, size=self.state.arch.bytes)
def set_prev_freeness(self, is_free):
"""
Sets (or unsets) the flag controlling whether the previous chunk is free.
:param is_free: if True, sets the previous chunk to be free; if False, sets it to be allocated
"""
level = silence_logger()
size_field = self.state.memory.load(self.base + self._chunk_size_t_size, self._chunk_size_t_size)
unsilence_logger(level)
if is_free:
self.state.memory.store(
self.base + self._chunk_size_t_size, size_field & ~CHUNK_P_MASK, size=self.state.arch.bytes
)
else:
self.state.memory.store(
self.base + self._chunk_size_t_size, size_field | CHUNK_P_MASK, size=self.state.arch.bytes
)
def is_prev_free(self):
"""
Returns a concrete state of the flag indicating whether the previous chunk is free or not. Issues a warning if
that flag is symbolic and has multiple solutions, and then assumes that the previous chunk is free.
:returns: True if the previous chunk is free; False otherwise
"""
flag = self.state.memory.load(self.base + self._chunk_size_t_size, self._chunk_size_t_size) & CHUNK_P_MASK
def sym_flag_handler(flag):
l.warning("A chunk's P flag is symbolic; assuming it is not set")
return self.state.solver.min_int(flag)
flag = concretize(flag, self.state.solver, sym_flag_handler)
return not flag
def prev_size(self):
"""
Returns the size of the previous chunk, masking off what would be the flag bits if it were in the actual size
field. Performs NO CHECKING to determine whether the previous chunk size is valid (for example, when the
previous chunk is not free, its size cannot be determined).
"""
return self.state.memory.load(self.base, self._chunk_size_t_size) & ~CHUNK_FLAGS_MASK
def is_free(self):
next_chunk = self.next_chunk()
if next_chunk is not None:
return next_chunk.is_prev_free()
else:
flag = (
self.state.memory.load(
self.heap.heap_base + self.heap.heap_size - self._chunk_size_t_size, self._chunk_size_t_size
)
& CHUNK_P_MASK
)
def sym_flag_handler(flag):
l.warning("The final P flag is symbolic; assuming it is not set")
return self.state.solver.min_int(flag)
flag = concretize(flag, self.state.solver, sym_flag_handler)
return not flag
def data_ptr(self):
return self.base + (2 * self._chunk_size_t_size)
def next_chunk(self):
"""
Returns the chunk immediately following (and adjacent to) this one, if it exists.
:returns: The following chunk, or None if applicable
"""
def sym_base_handler(base):
l.warning("A computed chunk base is symbolic; maximizing it")
return self.state.solver.max_int(base)
base = concretize(self.base + self.get_size(), self.state.solver, sym_base_handler)
if base >= self.heap.heap_base + self.heap.heap_size - 2 * self._chunk_size_t_size:
return None
else:
return PTChunk(base, self.state)
def prev_chunk(self):
"""
Returns the chunk immediately prior (and adjacent) to this one, if that chunk is free. If the prior chunk is not
free, then its base cannot be located and this method raises an error.
:returns: If possible, the previous chunk; otherwise, raises an error
"""
if self.is_prev_free():
return PTChunk(self.base - self.prev_size(), self.state)
else:
raise SimHeapError("Attempted to access the previous chunk, but it was not free")
def fwd_chunk(self):
"""
Returns the chunk following this chunk in the list of free chunks. If this chunk is not free, then it resides in
no such list and this method raises an error.
:returns: If possible, the forward chunk; otherwise, raises an error
"""
if self.is_free():
base = self.state.memory.load(
self.base + 2 * self._chunk_size_t_size, self._chunk_size_t_size, endness=self.state.arch.memory_endness
)
return PTChunk(base, self.state)
else:
raise SimHeapError("Attempted to access the forward chunk of an allocated chunk")
def set_fwd_chunk(self, fwd):
self.state.memory.store(
self.base + 2 * self._chunk_size_t_size,
fwd.base,
endness=self.state.arch.memory_endness,
size=self.state.arch.bytes,
)
def bck_chunk(self):
"""
Returns the chunk backward from this chunk in the list of free chunks. If this chunk is not free, then it
resides in no such list and this method raises an error.
:returns: If possible, the backward chunk; otherwise, raises an error
"""
if self.is_free():
base = self.state.memory.load(
self.base + 3 * self._chunk_size_t_size, self._chunk_size_t_size, endness=self.state.arch.memory_endness
)
return PTChunk(base, self.state)
else:
raise SimHeapError("Attempted to access the backward chunk of an allocated chunk")
def set_bck_chunk(self, bck):
self.state.memory.store(
self.base + 3 * self._chunk_size_t_size,
bck.base,
endness=self.state.arch.memory_endness,
size=self.state.arch.bytes,
)
class PTChunkIterator:
def __init__(self, chunk, cond=lambda chunk: True):
self.chunk = chunk
self.cond = cond
def __iter__(self):
return self
def __next__(self):
if self.chunk is None:
raise StopIteration
if self.cond(self.chunk):
ret = self.chunk
self.chunk = self.chunk.next_chunk()
else:
while self.chunk is not None and not self.cond(self.chunk):
self.chunk = self.chunk.next_chunk()
if self.chunk is None:
raise StopIteration
ret = self.chunk
self.chunk = self.chunk.next_chunk()
return ret
class SimHeapPTMalloc(SimHeapFreelist):
"""
A freelist-style heap implementation inspired by ptmalloc. The chunks used by this heap contain heap metadata in
addition to user data. While the real-world ptmalloc is implemented using multiple lists of free chunks
(corresponding to their different sizes), this more basic model uses a single list of chunks and searches for free
chunks using a first-fit algorithm.
**NOTE:** The plugin must be registered using ``register_plugin`` with name ``heap`` in order to function properly.
:ivar heap_base: the address of the base of the heap in memory
:ivar heap_size: the total size of the main memory region managed by the heap in memory
:ivar mmap_base: the address of the region from which large mmap allocations will be made
:ivar free_head_chunk: the head of the linked list of free chunks in the heap
"""
def __init__(self, heap_base=None, heap_size=None):
super().__init__(heap_base, heap_size)
# All of these depend on the state and so are initialized in init_state
self._free_head_chunk_exists = True # Only used during plugin copy due to the dependency on the memory plugin
self._free_head_chunk_init_base = None # Same as above
self._chunk_size_t_size = None # Size (bytes) of the type used to store a piece of metadata
self._chunk_min_size = None # Based on needed fields for any chunk
self._chunk_align_mask = 0
self.free_head_chunk = None
self._initialized = False
@SimStatePlugin.memo
def copy(self, memo): # pylint: disable=unused-argument
o = super().copy(memo)
o._free_head_chunk_exists = self.free_head_chunk is not None
o._free_head_chunk_init_base = self.free_head_chunk.base if self.free_head_chunk is not None else None
o._initialized = self._initialized
return o
def chunks(self):
return PTChunkIterator(PTChunk(self.heap_base, self.state))
def allocated_chunks(self):
return PTChunkIterator(PTChunk(self.heap_base, self.state), lambda chunk: not chunk.is_free())
def free_chunks(self):
return PTChunkIterator(PTChunk(self.heap_base, self.state), lambda chunk: chunk.is_free())
def chunk_from_mem(self, ptr):
"""
Given a pointer to a user payload, return the base of the chunk associated with that payload (i.e. the chunk
pointer). Returns None if ptr is null.
:param ptr: a pointer to the base of a user payload in the heap
:returns: a pointer to the base of the associated heap chunk, or None if ptr is null
"""
if self.state.solver.symbolic(ptr):
try:
ptr = self.state.solver.eval_one(ptr)
except SimSolverError:
l.warning("A pointer to a chunk is symbolic; maximizing it")
ptr = self.state.solver.max_int(ptr)
else:
ptr = self.state.solver.eval(ptr)
return PTChunk(ptr - (2 * self._chunk_size_t_size), self.state) if ptr != 0 else None
def _find_bck(self, chunk):
"""
Simply finds the free chunk that would be the backwards chunk relative to the chunk at ptr. Hence, the free head
and all other metadata are unaltered by this function.
"""
cur = self.free_head_chunk
if cur is None:
return None
fwd = cur.fwd_chunk()
if cur == fwd:
return cur
# At this point there should be at least two free chunks in the heap
if cur < chunk:
while cur < fwd < chunk:
cur = fwd
fwd = cur.fwd_chunk()
return cur
else:
while fwd != self.free_head_chunk:
cur = fwd
fwd = cur.fwd_chunk()
return cur
def _set_final_freeness(self, flag):
"""
Sets the freedom of the final chunk. Since no proper chunk follows the final chunk, the heap itself manages
this. Nonetheless, for now it is implemented as if an additional chunk followed the final chunk.
"""
if flag:
self.state.memory.store(
self.heap_base + self.heap_size - self._chunk_size_t_size, ~CHUNK_P_MASK, size=self.state.arch.bytes
)
else:
self.state.memory.store(
self.heap_base + self.heap_size - self._chunk_size_t_size, CHUNK_P_MASK, size=self.state.arch.bytes
)
def _make_chunk_size(self, req_size):
"""
Takes an allocation size as requested by the user and modifies it to be a suitable chunk size.
"""
size = req_size
size += 2 * self._chunk_size_t_size # Two size fields
size = self._chunk_min_size if size < self._chunk_min_size else size
if size & self._chunk_align_mask: # If the chunk would not be aligned
size = (size & ~self._chunk_align_mask) + self._chunk_align_mask + 1 # Fix it
return size
def malloc(self, sim_size):
size = self._conc_alloc_size(sim_size)
req_size = size
size = self._make_chunk_size(size)
chunk = None # This will be the resulting allocation
free_chunk = self.free_head_chunk
if free_chunk is None:
l.warning("No free chunks available; heap space exhausted")
return 0 # No free chunks available
# This handler will be necessary as we'll be checking the size fields of many free chunks
def sym_free_size_handler(size):
l.warning("A free chunk's size field is symbolic; maximizing it")
return self.state.solver.max_int(size)
while chunk is None:
free_size = free_chunk.get_size()
free_size = concretize(free_size, self.state.solver, sym_free_size_handler)
if free_size < size:
# Chunk is too small to be used; move to the next or fail
fwd = free_chunk.fwd_chunk()
if fwd <= free_chunk:
l.debug("No free chunks of sufficient size available")
return 0
else:
free_chunk = fwd
elif free_size > size and free_size - size >= self._chunk_min_size:
# Chunk may be too large but we'll use it anyway
chunk = free_chunk
bck = free_chunk.bck_chunk() # Store these now as we'll have to remove this chunk from the list
fwd = free_chunk.fwd_chunk()
rem_chunk = PTChunk(chunk.base + size, chunk.state) # The "remainder" chunk is the unused portion after
rem_chunk.set_size(free_size - size, True) # the allocation is made. Since it follows the used
rem_chunk.set_prev_freeness(False) # portion, we can set the used chunk as not free.
if free_chunk == self.free_head_chunk:
self.free_head_chunk = rem_chunk # If the used chunk had been the head, now the remainder is
chunk.set_size(size)
if free_chunk == bck and free_chunk == fwd: # If the free chunk had been the only free chunk, then the
rem_chunk.set_bck_chunk(rem_chunk) # remainder chunk is now the only free chunk
rem_chunk.set_fwd_chunk(rem_chunk)
else:
rem_chunk.set_bck_chunk(bck) # Otherwise there was at least one other chunk, and the
rem_chunk.set_fwd_chunk(fwd) # remainder chunk may safely replace the original in the
bck.set_fwd_chunk(rem_chunk) # list
fwd.set_bck_chunk(rem_chunk)
else:
# Chunk is a perfect fit, or the remainder would be too small to split off as a free chunk
chunk = free_chunk
fwd = free_chunk.fwd_chunk() # Once again we store these in advance
bck = free_chunk.bck_chunk()
if bck == fwd and free_chunk == fwd: # Last chunk being used up
self.free_head_chunk = None
else:
if free_chunk == self.free_head_chunk:
self.free_head_chunk = fwd
bck.set_fwd_chunk(fwd) # We can safely remove the chunk from the list
fwd.set_bck_chunk(bck)
next_chunk = chunk.next_chunk() # Now we set the new chunk to be allocated, using a different
if next_chunk is not None: # approach depending on whether the chunk was the last in the
next_chunk.set_prev_freeness(False) # heap or not
else:
self._set_final_freeness(False)
addr = chunk.data_ptr()
l.debug("Requested: %4d; Allocated: %4d; Returned: %#08x; Chunk: %#08x", req_size, size, addr, chunk.base)
return addr
def free(self, ptr):
# In the following, "next" and "previous" (and their abbreviations) refer to the adjacent chunks in memory,
# while forward and backward (and their abbreviations) refer to the adjacent chunks in the list of free chunks
req = ptr
chunk = self.chunk_from_mem(ptr)
if chunk is None or chunk.is_free():
return
size = chunk.get_size()
p_in_use = not chunk.is_prev_free()
n_ptr = chunk.next_chunk()
n_in_use = n_ptr is None or not n_ptr.is_free()
if p_in_use and n_in_use: # exist
# When both adjacent chunks are in use, no merging will be
# necessary between the freed chunk and another free chunk
if n_ptr is not None:
n_ptr.set_prev_freeness(True) # Set the chunk to be free
else:
self._set_final_freeness(True)
chunk.set_size(size) # Reset the chunk's size to account for the trailing size field
bck = self._find_bck(chunk) # Scan the free list to determine where to insert the newly freed chunk
if bck is None:
# There was no other chunk in the free list
self.free_head_chunk = chunk
bck = chunk
fwd = chunk
else:
# Insert the chunk after the bck chunk that was found
fwd = bck.fwd_chunk()
bck.set_fwd_chunk(chunk)
fwd.set_bck_chunk(chunk)
chunk.set_bck_chunk(bck)
chunk.set_fwd_chunk(fwd)
if chunk < self.free_head_chunk:
self.free_head_chunk = chunk
elif not p_in_use and not n_in_use:
# If both the adjacent chunks are free, merging between all three will be needed
p_ptr = chunk.prev_chunk() # The previous chunk will be the base of the overall new chunk
p_ptr.set_size(p_ptr.get_size() + size + n_ptr.get_size())
n_fwd = n_ptr.fwd_chunk() # The chunk forward from the chunk that's next after the freed chunk, which is
p_ptr.set_fwd_chunk(n_fwd) # needed since we're removing a free chunk (chunk.next_chunk()) from the linked
n_fwd.set_bck_chunk(p_ptr) # list
else:
# There are two remaining cases, but we handle them generically below by deciding on a base for a new chunk,
# determining its size, and updating all metadata around it (even though sometimes it isn't necessary).
if not p_in_use:
base = chunk.prev_chunk()
new_size = size + base.get_size()
bck = base.bck_chunk()
fwd = base.fwd_chunk()
else:
n_size = n_ptr.get_size()
base = chunk
new_size = size + n_size
bck = n_ptr.bck_chunk()
fwd = n_ptr.fwd_chunk()
# In case the freed chunk preceded the free head
if base < self.free_head_chunk:
self.free_head_chunk = base
# In case the following chunk was the last free chunk, we can't use its links due to the merge
if bck == fwd and bck == n_ptr:
bck = base
fwd = base
base.set_size(new_size)
base.set_bck_chunk(bck)
base.set_fwd_chunk(fwd)
bck.set_fwd_chunk(base)
fwd.set_bck_chunk(base)
new_next = base.next_chunk()
if new_next is not None:
new_next.set_prev_freeness(True)
else:
self._set_final_freeness(True)
# FIXME: must set size twice so that once free, the trailing size field is updated; more elegant way?
base.set_size(new_size)
l.debug(
"Free request: %#08x; Freed chunk: %#08x", self.state.solver.eval(req), self.state.solver.eval(chunk.base)
)
def calloc(self, sim_nmemb, sim_size):
size = self._conc_alloc_size(sim_nmemb * sim_size)
addr = self.malloc(size)
if addr == 0:
return 0
if size != 0:
z = self.state.solver.BVV(0, size * 8)
self.state.memory.store(addr, z)
return addr
def realloc(self, ptr, size):
chunk = self.chunk_from_mem(ptr)
if chunk is None: # ptr is null
return self.malloc(size)
size = self._conc_alloc_size(size)
if size == 0:
# assumes that REALLOC_ZERO_BYTES_FREES is set for ptmalloc
self.free(ptr)
return 0
old_size = chunk.get_size()
def sym_size_handler(sym_size):
l.warning("An allocated chunk's size field is symbolic; maximizing it")
return self.state.solver.max_int(sym_size)
old_size = concretize(old_size, self.state.solver, sym_size_handler)
new_size = self._make_chunk_size(size)
if new_size > old_size:
# If more space is needed, will have to reallocate
# TODO: this could be made more complex to make better usage of remaining heap space when it runs out by
# TODO: checking for smaller adjacent free chunks that could be amalgamated (rather than malloc, copy, free)
new_data_ptr = self.malloc(size) # Make the new allocation
if new_data_ptr == 0: # Check for failure
return 0
# Copy the old data over
old_data_ptr = chunk.data_ptr()
level = silence_logger()
old_data = self.state.memory.load(old_data_ptr, size=old_size - 2 * self._chunk_size_t_size)
unsilence_logger(level)
self.state.memory.store(new_data_ptr, old_data)
self.free(old_data_ptr) # Free the old chunk
return new_data_ptr
elif new_size < old_size and old_size - new_size >= self._chunk_min_size:
# Less space is needed, so just shrink the chunk and create a new free chunk from the freed space
chunk.set_size(new_size, False)
new_next_chunk = chunk.next_chunk()
new_next_chunk.set_size(old_size - new_size, False)
new_next_chunk.set_prev_freeness(False)
self.free(new_next_chunk.data_ptr())
return chunk.data_ptr()
else:
# No changes needed; we're already the right size
return chunk.data_ptr()
def _malloc(self, sim_size):
return self.malloc(sim_size)
def _free(self, ptr):
return self.free(ptr)
def _calloc(self, sim_nmemb, sim_size):
return self.calloc(sim_nmemb, sim_size)
def _realloc(self, ptr, size):
return self.realloc(ptr, size)
def _combine(self, others):
if any(o.heap_base != self.heap_base for o in others):
raise SimMergeError("Cannot merge heaps with different bases")
# When heaps become more dynamic, this next one can probably change
if any(o.heap_size != self.heap_size for o in others):
raise SimMergeError("Cannot merge heaps with different sizes")
if any(o.free_head_chunk != self.free_head_chunk for o in others):
raise SimMergeError("Cannot merge heaps with different freelist head chunks")
if any(o.mmap_base != self.mmap_base for o in others):
raise SimMergeError("Cannot merge heaps with different mmap bases")
# These are definitely sanity checks
if any(o._chunk_size_t_size != self._chunk_size_t_size for o in others):
raise SimMergeError("Cannot merge heaps with different chunk size_t sizes")
if any(o._chunk_min_size != self._chunk_min_size for o in others):
raise SimMergeError("Cannot merge heaps with different minimum chunk sizes")
if any(o._chunk_align_mask != self._chunk_align_mask for o in others):
raise SimMergeError("Cannot merge heaps with different chunk alignments")
return False
def merge(self, others, merge_conditions, common_ancestor=None): # pylint:disable=unused-argument
return self._combine(others)
def widen(self, others):
return self._combine(others)
def init_state(self):
super().init_state()
self._chunk_size_t_size = self.state.arch.bytes
self._chunk_min_size = 4 * self._chunk_size_t_size
self._chunk_align_mask = 2 * self._chunk_size_t_size - 1
# TODO: where are bin metadata stored in reality?
if self._free_head_chunk_exists and self._free_head_chunk_init_base is None:
free_base = self.heap_base
if self.heap_base & self._chunk_align_mask:
free_base = (self.heap_base & ~self._chunk_align_mask) + self._chunk_align_mask + 1
self.free_head_chunk = PTChunk(free_base, self.state, self)
elif not self._free_head_chunk_exists:
self.free_head_chunk = None
else:
self.free_head_chunk = PTChunk(self._free_head_chunk_init_base, self.state)
# We reserve enough space at the top of the heap to simulate the presence of another chunk, for the purpose of
# storing the usage information of the real final chunk
if not self._initialized:
self.state.memory.store(
self.free_head_chunk.base + self._chunk_size_t_size,
((self.heap_size - 2 * self._chunk_size_t_size) & ~self._chunk_align_mask) | CHUNK_P_MASK,
size=self.state.arch.bytes,
)
self._set_final_freeness(True)
self.free_head_chunk.set_fwd_chunk(self.free_head_chunk)
self.free_head_chunk.set_bck_chunk(self.free_head_chunk)
self._initialized = True
|
0d4fa6f52b42aa9d3ee3ea267af1e0975dddc915
|
ea68fad8ce90c488fe9846d6fe636ef94d35fc8c
|
/tests/split-test.py
|
6c283eccb758af4f7bd0b8a3340271e550ee86e3
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
runtimeverification/iele-semantics
|
03eb1bf28c1afbf682bb83f86f269f36f374adaf
|
e030f7592753ee3dd6452757a5fdcfe3baacf037
|
refs/heads/master
| 2023-08-04T15:02:32.863523
| 2023-07-20T13:54:42
| 2023-07-20T13:54:42
| 105,186,520
| 131
| 39
|
NOASSERTION
| 2023-07-18T16:39:18
| 2017-09-28T18:44:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 823
|
py
|
split-test.py
|
#!/usr/bin/env python
import sys
import json
import os
import subprocess
# Example usage: tests/ethereum-tests/VMTests/abc.json tests/VMTests/abc/
source_file = sys.argv[1]
target_dir = sys.argv[2]
evm_test_to_iele = os.path.join(os.path.dirname(__file__), "evm-to-iele", "evm-test-to-iele")
with open (source_file, "r") as source:
original_test = json.load(source)
for subtest in original_test.keys():
target_file = os.path.join(target_dir, subtest + ".json")
target_iele_file = os.path.join(target_dir, subtest + ".iele.json")
with open (target_file, "w+") as target:
json.dump({ subtest: original_test[subtest] }, target, indent=4)
with open (target_iele_file, "w+") as target:
subprocess.check_call([evm_test_to_iele, target_file, target_iele_file])
|
6c281812cf84e31b24676ed2a9c1ff1462bdb61a
|
df87814cb32990ad8c27d0b13a821aabce012819
|
/kolibri/core/auth/test/test_permissions_classes.py
|
ee80adc3851fae57c6f719bfd32889ac1aec6801
|
[
"MIT"
] |
permissive
|
learningequality/kolibri
|
26812d4ae771f3b389d3317a586bc032fc84866b
|
cc9da2a6acd139acac3cd71c4cb05c15d4465712
|
refs/heads/release-v0.16.x
| 2023-09-01T18:07:29.720772
| 2023-08-31T15:43:47
| 2023-08-31T15:43:47
| 49,976,939
| 689
| 682
|
MIT
| 2023-09-14T20:02:29
| 2016-01-19T19:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 5,952
|
py
|
test_permissions_classes.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.test import TestCase
from mock import Mock
from ..api import KolibriAuthPermissions
from ..models import Facility
from ..models import FacilityUser
from ..models import KolibriAnonymousUser
from ..permissions.base import BasePermissions
from ..permissions.general import AllowAll
from ..permissions.general import DenyAll
from .helpers import create_superuser
class BasePermissionsThrowExceptionsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create()
cls.object = object() # shouldn't matter what the object is, for these tests
cls.facility_user = FacilityUser.objects.create(
username="qqq", facility=cls.facility
)
cls.superuser = create_superuser(cls.facility)
cls.anon_user = KolibriAnonymousUser()
cls.permissions = BasePermissions()
def test_user_cannot_create(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_create_object(self.facility_user, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_create_object(self.superuser, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_create_object(self.anon_user, self.object)
)
def test_user_cannot_read(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_read_object(self.facility_user, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_read_object(self.superuser, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_read_object(self.anon_user, self.object)
)
def test_user_cannot_update(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_update_object(self.facility_user, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_update_object(self.superuser, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_update_object(self.anon_user, self.object)
)
def test_user_cannot_delete(self):
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_delete_object(self.facility_user, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_delete_object(self.superuser, self.object)
)
with self.assertRaises(NotImplementedError):
self.assertFalse(
self.permissions.user_can_delete_object(self.anon_user, self.object)
)
class TestBooleanOperationsOnPermissionClassesTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.facility = Facility.objects.create()
cls.obj = object()
cls.user = FacilityUser.objects.create(
username="dummyuser", facility=cls.facility
)
cls.queryset = FacilityUser.objects.all()
def assertAllowAll(self, perms):
self.assertTrue(perms.user_can_create_object(self.user, self.obj))
self.assertTrue(perms.user_can_read_object(self.user, self.obj))
self.assertTrue(perms.user_can_update_object(self.user, self.obj))
self.assertTrue(perms.user_can_delete_object(self.user, self.obj))
def assertDenyAll(self, perms):
self.assertFalse(perms.user_can_create_object(self.user, self.obj))
self.assertFalse(perms.user_can_read_object(self.user, self.obj))
self.assertFalse(perms.user_can_update_object(self.user, self.obj))
self.assertFalse(perms.user_can_delete_object(self.user, self.obj))
def test_allow_or_allow(self):
self.assertAllowAll(AllowAll() | AllowAll())
def test_allow_or_deny(self):
self.assertAllowAll(AllowAll() | DenyAll())
def test_deny_or_allow(self):
self.assertAllowAll(DenyAll() | AllowAll())
def test_deny_or_deny(self):
self.assertDenyAll(DenyAll() | DenyAll())
def test_allow_and_allow(self):
self.assertAllowAll(AllowAll() & AllowAll())
def test_allow_and_deny(self):
self.assertDenyAll(AllowAll() & DenyAll())
def test_deny_and_allow(self):
self.assertDenyAll(DenyAll() & AllowAll())
def test_deny_and_deny(self):
self.assertDenyAll(DenyAll() & DenyAll())
def test_or_is_shortcircuited_for_efficiency(self):
self.assertAllowAll(AllowAll() | BasePermissions())
def test_and_is_shortcircuited_for_efficiency(self):
self.assertDenyAll(DenyAll() & BasePermissions())
def test_or_is_not_shortcircuited_inappropriately(self):
with self.assertRaises(NotImplementedError):
self.assertAllowAll(BasePermissions() | AllowAll())
def test_and_is_not_shortcircuited_inappropriately(self):
with self.assertRaises(NotImplementedError):
self.assertDenyAll(BasePermissions() & DenyAll())
class KolibriAuthPermissionsTestCase(TestCase):
def test_bad_request_method(self):
request = Mock(method="BADWOLF")
view = Mock()
obj = Mock()
perm_obj = KolibriAuthPermissions()
self.assertFalse(perm_obj.has_object_permission(request, view, obj))
|
f8aeeb64d2cb99ca7b104c8edaa13fcfde2592c1
|
41198b450282c36a1d39f361dd99fe423de989da
|
/mantraml/core/management/command_parser.py
|
f35573257337e2e5a25be69820e8a1f3074e73bf
|
[
"Apache-2.0"
] |
permissive
|
RJT1990/mantra
|
4ff49742e68471a11fc6b6060a9b8b1cd3ab3b88
|
7db4d272a1625c33eaa681b8c2e75c0aa57c6952
|
refs/heads/master
| 2022-12-10T07:29:27.803262
| 2019-12-10T23:05:23
| 2019-12-10T23:05:23
| 146,776,545
| 332
| 22
|
Apache-2.0
| 2022-12-08T03:00:07
| 2018-08-30T16:25:10
|
Python
|
UTF-8
|
Python
| false
| false
| 1,779
|
py
|
command_parser.py
|
import argparse
from mantraml.core.management.commands.cloud import CloudCmd
from mantraml.core.management.commands.importcmd import ImportCmd
from mantraml.core.management.commands.makedata import MakeDataCmd
from mantraml.core.management.commands.makemodel import MakeModelCmd
from mantraml.core.management.commands.maketask import MakeTaskCmd
from mantraml.core.management.commands.sync import SyncCmd
from mantraml.core.management.commands.testdata import TestDataCmd
from mantraml.core.management.commands.train import TrainCmd
from mantraml.core.management.commands.ui import UICmd
from mantraml.core.management.commands.launch import LaunchCmd
from mantraml.core.management.commands.upload import UploadCmd
def cmd_line():
"""
Parse the root command line
:return:
"""
parser = argparse.ArgumentParser(prog='mantra')
# register all the subcommands here
subcommands = {
"cloud": CloudCmd(),
"ui": UICmd(),
"launch": LaunchCmd(),
"makemodel": MakeModelCmd(),
"makedata": MakeDataCmd(),
"maketask": MakeTaskCmd(),
"sync": SyncCmd(),
"testdata": TestDataCmd(),
"train": TrainCmd(),
"import": ImportCmd(),
"upload": UploadCmd(),
}
subparsers = parser.add_subparsers(help='sub-command help')
# register all the subparsers
for key,obj in subcommands.items():
subparser = subparsers.add_parser(key, help="%s help" % key)
subparser = obj.add_arguments(subparser)
subparser.set_defaults(func=obj.handle)
obj.parser = parser
# parse the call the appropriate function
args, unknown = parser.parse_known_args()
if "func" in args:
args.func(args, unknown)
else:
parser.print_help()
|
77064d15f2f036033e61f9d1ed835795c24fbbf3
|
9eb4da8fe0eb56a0b0e4c4d660f52f52838c91da
|
/tests/modules/core/test_pasource.py
|
88ef4653931bdc0021dd64b3a1b17eda8e2da363
|
[
"MIT"
] |
permissive
|
tobi-wan-kenobi/bumblebee-status
|
bf53b44341f4d84c4684675af3dcb8c675579f23
|
d03e6307f5e8c0b1c0451636ac9b1e84f3529a73
|
refs/heads/main
| 2023-08-31T11:52:12.140284
| 2023-07-21T12:18:17
| 2023-07-21T12:18:17
| 72,353,166
| 1,345
| 361
|
MIT
| 2023-09-13T19:25:17
| 2016-10-30T14:07:20
|
Python
|
UTF-8
|
Python
| false
| false
| 80
|
py
|
test_pasource.py
|
import pytest
def test_load_module():
__import__("modules.core.pasource")
|
e4e9e7656f17f5eb251ef81dd4ed9c1afdc53e8f
|
22b8c680d7787cc9fcee678cdeed73dc685a4d0f
|
/sdk/python/core/samples/hello_xr.py
|
89a109167c2bee45fe38698e3eeb48aaccb8becf
|
[
"Apache-2.0"
] |
permissive
|
CiscoDevNet/ydk-gen
|
cf36433acb8d90b514f8748531a2cb06e66f7f2d
|
27dd7d85134a62aa9e9fa48edc0359d32b6a31ec
|
refs/heads/master
| 2023-05-13T22:52:26.135573
| 2023-02-01T03:27:31
| 2023-02-01T03:27:31
| 53,680,541
| 138
| 98
|
Apache-2.0
| 2023-09-07T21:55:37
| 2016-03-11T16:27:20
|
C++
|
UTF-8
|
Python
| false
| false
| 2,857
|
py
|
hello_xr.py
|
#!/usr/bin/env python
#
# ========================================================================
# Copyright 2018 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ========================================================================
'''
hello-xr.py
Read all data for model Cisco-IOS-XR-shellutil-oper.yang and print system name and uptime.
Usage: hello-xr.py [-h] [-v] device
Positional arguments:
device gNMI enabled device (ssh://user:password@host:port)
Optional arguments:
-h, --help show this help message and exit
-v, --verbose print debugging messages
Example:
hello-xr.py -v ssh://root:Cisco123!@172.27.150.154:830
'''
import logging
from datetime import timedelta
from argparse import ArgumentParser
from urllib.parse import urlparse
from ydk.services import CRUDService
from ydk.providers import NetconfServiceProvider
from ydk.models.cisco_ios_xr import Cisco_IOS_XR_shellutil_oper as xr_shellutil_oper
def enable_logging(level):
log = logging.getLogger('ydk')
log.setLevel(level)
handler = logging.StreamHandler()
formatter = logging.Formatter(("%(asctime)s - %(name)s - %(levelname)s - %(message)s"))
handler.setFormatter(formatter)
log.addHandler(handler)
if __name__ == "__main__":
"""Main execution path"""
parser = ArgumentParser()
parser.add_argument("-v", "--verbose", help="print debugging messages",
action="store_true")
parser.add_argument("device",
help="Netconf enabled device (ssh://user:password@host:port)")
args = parser.parse_args()
device = urlparse(args.device)
if args.verbose:
enable_logging(logging.INFO)
# create NETCONF session
provider = NetconfServiceProvider( address=device.hostname,
port=device.port,
username=device.username,
password=device.password)
# create CRUD service
crud = CRUDService()
# create system time object
system_time = xr_shellutil_oper.SystemTime()
# read system time from device
system_time = crud.read(provider, system_time)
# print system uptime
print("\nSystem '%s' uptime is "%system_time.uptime.host_name +
str(timedelta(seconds=system_time.uptime.uptime)))
|
34c12360ad0deb0e11dd41141d571d411f73c477
|
e3a012bb52c519dce49d2e18893c336f64826bd8
|
/pendulum/parsing/iso8601.py
|
cc4dd7aa07061cec5cb8683b0d87d9ef075784b0
|
[
"MIT"
] |
permissive
|
sdispater/pendulum
|
3058257d15d7bc58d3f8f4eebf755fad262454d9
|
d1a874dfc079d544e7a96478e2258bb2b01c3e72
|
refs/heads/master
| 2023-08-31T07:04:27.162631
| 2023-08-17T23:23:46
| 2023-08-17T23:23:46
| 62,095,504
| 5,914
| 445
|
MIT
| 2023-08-19T19:59:58
| 2016-06-27T23:37:53
|
Python
|
UTF-8
|
Python
| false
| false
| 13,886
|
py
|
iso8601.py
|
from __future__ import annotations
import datetime
import re
from typing import cast
from pendulum.constants import HOURS_PER_DAY
from pendulum.constants import MINUTES_PER_HOUR
from pendulum.constants import MONTHS_OFFSETS
from pendulum.constants import SECONDS_PER_MINUTE
from pendulum.duration import Duration
from pendulum.helpers import days_in_year
from pendulum.helpers import is_leap
from pendulum.helpers import is_long_year
from pendulum.helpers import week_day
from pendulum.parsing.exceptions import ParserError
from pendulum.tz.timezone import UTC
from pendulum.tz.timezone import FixedTimezone
from pendulum.tz.timezone import Timezone
ISO8601_DT = re.compile(
# Date (optional) # noqa: ERA001
"^"
"(?P<date>"
" (?P<classic>" # Classic date (YYYY-MM-DD) or ordinal (YYYY-DDD)
r" (?P<year>\d{4})" # Year
" (?P<monthday>"
r" (?P<monthsep>-)?(?P<month>\d{2})" # Month (optional)
r" ((?P<daysep>-)?(?P<day>\d{1,2}))?" # Day (optional)
" )?"
" )"
" |"
" (?P<isocalendar>" # Calendar date (2016-W05 or 2016-W05-5)
r" (?P<isoyear>\d{4})" # Year
" (?P<weeksep>-)?" # Separator (optional)
" W" # W separator
r" (?P<isoweek>\d{2})" # Week number
" (?P<weekdaysep>-)?" # Separator (optional)
r" (?P<isoweekday>\d)?" # Weekday (optional)
" )"
")?"
# Time (optional) # noqa: ERA001
"(?P<time>" r" (?P<timesep>[T\ ])?" # Separator (T or space)
# HH:mm:ss (optional mm and ss)
r" (?P<hour>\d{1,2})(?P<minsep>:)?(?P<minute>\d{1,2})?(?P<secsep>:)?(?P<second>\d{1,2})?" # noqa: E501
# Subsecond part (optional)
" (?P<subsecondsection>"
" (?:[.,])" # Subsecond separator (optional)
r" (?P<subsecond>\d{1,9})" # Subsecond
" )?"
# Timezone offset
" (?P<tz>"
r" (?:[-+])\d{2}:?(?:\d{2})?|Z" # Offset (+HH:mm or +HHmm or +HH or Z)
" )?"
")?"
"$",
re.VERBOSE,
)
ISO8601_DURATION = re.compile(
"^P" # Duration P indicator
# Years, months and days (optional) # noqa: ERA001
"(?P<w>"
r" (?P<weeks>\d+(?:[.,]\d+)?W)"
")?"
"(?P<ymd>"
r" (?P<years>\d+(?:[.,]\d+)?Y)?"
r" (?P<months>\d+(?:[.,]\d+)?M)?"
r" (?P<days>\d+(?:[.,]\d+)?D)?"
")?"
"(?P<hms>"
" (?P<timesep>T)" # Separator (T)
r" (?P<hours>\d+(?:[.,]\d+)?H)?"
r" (?P<minutes>\d+(?:[.,]\d+)?M)?"
r" (?P<seconds>\d+(?:[.,]\d+)?S)?"
")?"
"$",
re.VERBOSE,
)
def parse_iso8601(
text: str,
) -> datetime.datetime | datetime.date | datetime.time | Duration:
"""
ISO 8601 compliant parser.
:param text: The string to parse
:type text: str
:rtype: datetime.datetime or datetime.time or datetime.date
"""
parsed = _parse_iso8601_duration(text)
if parsed is not None:
return parsed
m = ISO8601_DT.match(text)
if not m:
raise ParserError("Invalid ISO 8601 string")
ambiguous_date = False
is_date = False
is_time = False
year = 0
month = 1
day = 1
minute = 0
second = 0
microsecond = 0
tzinfo: FixedTimezone | Timezone | None = None
if m.group("date"):
# A date has been specified
is_date = True
if m.group("isocalendar"):
# We have a ISO 8601 string defined
# by week number
if (
m.group("weeksep")
and not m.group("weekdaysep")
and m.group("isoweekday")
):
raise ParserError(f"Invalid date string: {text}")
if not m.group("weeksep") and m.group("weekdaysep"):
raise ParserError(f"Invalid date string: {text}")
try:
date = _get_iso_8601_week(
m.group("isoyear"), m.group("isoweek"), m.group("isoweekday")
)
except ParserError:
raise
except ValueError:
raise ParserError(f"Invalid date string: {text}")
year = date["year"]
month = date["month"]
day = date["day"]
else:
# We have a classic date representation
year = int(m.group("year"))
if not m.group("monthday"):
# No month and day
month = 1
day = 1
else:
if m.group("month") and m.group("day"):
# Month and day
if not m.group("daysep") and len(m.group("day")) == 1:
# Ordinal day
ordinal = int(m.group("month") + m.group("day"))
leap = is_leap(year)
months_offsets = MONTHS_OFFSETS[leap]
if ordinal > months_offsets[13]:
raise ParserError("Ordinal day is out of range")
for i in range(1, 14):
if ordinal <= months_offsets[i]:
day = ordinal - months_offsets[i - 1]
month = i - 1
break
else:
month = int(m.group("month"))
day = int(m.group("day"))
else:
# Only month
if not m.group("monthsep"):
# The date looks like 201207
# which is invalid for a date
# But it might be a time in the form hhmmss
ambiguous_date = True
month = int(m.group("month"))
day = 1
if not m.group("time"):
# No time has been specified
if ambiguous_date:
# We can "safely" assume that the ambiguous date
# was actually a time in the form hhmmss
hhmmss = f"{year!s}{month!s:0>2}"
return datetime.time(int(hhmmss[:2]), int(hhmmss[2:4]), int(hhmmss[4:]))
return datetime.date(year, month, day)
if ambiguous_date:
raise ParserError(f"Invalid date string: {text}")
if is_date and not m.group("timesep"):
raise ParserError(f"Invalid date string: {text}")
if not is_date:
is_time = True
# Grabbing hh:mm:ss
hour = int(m.group("hour"))
minsep = m.group("minsep")
if m.group("minute"):
minute = int(m.group("minute"))
elif minsep:
raise ParserError("Invalid ISO 8601 time part")
secsep = m.group("secsep")
if secsep and not minsep and m.group("minute"):
# minute/second separator but no hour/minute separator
raise ParserError("Invalid ISO 8601 time part")
if m.group("second"):
if not secsep and minsep:
# No minute/second separator but hour/minute separator
raise ParserError("Invalid ISO 8601 time part")
second = int(m.group("second"))
elif secsep:
raise ParserError("Invalid ISO 8601 time part")
# Grabbing subseconds, if any
if m.group("subsecondsection"):
# Limiting to 6 chars
subsecond = m.group("subsecond")[:6]
microsecond = int(f"{subsecond:0<6}")
# Grabbing timezone, if any
tz = m.group("tz")
if tz:
if tz == "Z":
tzinfo = UTC
else:
negative = bool(tz.startswith("-"))
tz = tz[1:]
if ":" not in tz:
if len(tz) == 2:
tz = f"{tz}00"
off_hour = tz[0:2]
off_minute = tz[2:4]
else:
off_hour, off_minute = tz.split(":")
offset = ((int(off_hour) * 60) + int(off_minute)) * 60
if negative:
offset = -1 * offset
tzinfo = FixedTimezone(offset)
if is_time:
return datetime.time(hour, minute, second, microsecond, tzinfo=tzinfo)
return datetime.datetime(
year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo
)
def _parse_iso8601_duration(text: str, **options: str) -> Duration | None:
m = ISO8601_DURATION.match(text)
if not m:
return None
years = 0
months = 0
weeks = 0
days: int | float = 0
hours: int | float = 0
minutes: int | float = 0
seconds: int | float = 0
microseconds: int | float = 0
fractional = False
_days: str | float
_hour: str | int | None
_minutes: str | int | None
_seconds: str | int | None
if m.group("w"):
# Weeks
if m.group("ymd") or m.group("hms"):
# Specifying anything more than weeks is not supported
raise ParserError("Invalid duration string")
_weeks = m.group("weeks")
if not _weeks:
raise ParserError("Invalid duration string")
_weeks = _weeks.replace(",", ".").replace("W", "")
if "." in _weeks:
_weeks, portion = _weeks.split(".")
weeks = int(_weeks)
_days = int(portion) / 10 * 7
days, hours = int(_days // 1), int(_days % 1 * HOURS_PER_DAY)
else:
weeks = int(_weeks)
if m.group("ymd"):
# Years, months and/or days
_years = m.group("years")
_months = m.group("months")
_days = m.group("days")
# Checking order
years_start = m.start("years") if _years else -3
months_start = m.start("months") if _months else years_start + 1
days_start = m.start("days") if _days else months_start + 1
# Check correct order
if not (years_start < months_start < days_start):
raise ParserError("Invalid duration")
if _years:
_years = _years.replace(",", ".").replace("Y", "")
if "." in _years:
raise ParserError("Float years in duration are not supported")
else:
years = int(_years)
if _months:
if fractional:
raise ParserError("Invalid duration")
_months = _months.replace(",", ".").replace("M", "")
if "." in _months:
raise ParserError("Float months in duration are not supported")
else:
months = int(_months)
if _days:
if fractional:
raise ParserError("Invalid duration")
_days = _days.replace(",", ".").replace("D", "")
if "." in _days:
fractional = True
_days, _hours = _days.split(".")
days = int(_days)
hours = int(_hours) / 10 * HOURS_PER_DAY
else:
days = int(_days)
if m.group("hms"):
# Hours, minutes and/or seconds
_hours = m.group("hours") or 0
_minutes = m.group("minutes") or 0
_seconds = m.group("seconds") or 0
# Checking order
hours_start = m.start("hours") if _hours else -3
minutes_start = m.start("minutes") if _minutes else hours_start + 1
seconds_start = m.start("seconds") if _seconds else minutes_start + 1
# Check correct order
if not (hours_start < minutes_start < seconds_start):
raise ParserError("Invalid duration")
if _hours:
if fractional:
raise ParserError("Invalid duration")
_hours = cast(str, _hours).replace(",", ".").replace("H", "")
if "." in _hours:
fractional = True
_hours, _mins = _hours.split(".")
hours += int(_hours)
minutes += int(_mins) / 10 * MINUTES_PER_HOUR
else:
hours += int(_hours)
if _minutes:
if fractional:
raise ParserError("Invalid duration")
_minutes = cast(str, _minutes).replace(",", ".").replace("M", "")
if "." in _minutes:
fractional = True
_minutes, _secs = _minutes.split(".")
minutes += int(_minutes)
seconds += int(_secs) / 10 * SECONDS_PER_MINUTE
else:
minutes += int(_minutes)
if _seconds:
if fractional:
raise ParserError("Invalid duration")
_seconds = cast(str, _seconds).replace(",", ".").replace("S", "")
if "." in _seconds:
_seconds, _microseconds = _seconds.split(".")
seconds += int(_seconds)
microseconds += int(f"{_microseconds[:6]:0<6}")
else:
seconds += int(_seconds)
return Duration(
years=years,
months=months,
weeks=weeks,
days=days,
hours=hours,
minutes=minutes,
seconds=seconds,
microseconds=microseconds,
)
def _get_iso_8601_week(
year: int | str, week: int | str, weekday: int | str
) -> dict[str, int]:
weekday = 1 if not weekday else int(weekday)
year = int(year)
week = int(week)
if week > 53 or week > 52 and not is_long_year(year):
raise ParserError("Invalid week for week date")
if weekday > 7:
raise ParserError("Invalid weekday for week date")
# We can't rely on strptime directly here since
# it does not support ISO week date
ordinal = week * 7 + weekday - (week_day(year, 1, 4) + 3)
if ordinal < 1:
# Previous year
ordinal += days_in_year(year - 1)
year -= 1
if ordinal > days_in_year(year):
# Next year
ordinal -= days_in_year(year)
year += 1
fmt = "%Y-%j"
string = f"{year}-{ordinal}"
dt = datetime.datetime.strptime(string, fmt)
return {"year": dt.year, "month": dt.month, "day": dt.day}
|
37dad8d8041bc0f61e52372eceaac5fa13e58021
|
d7949f5b2075384075fa066d571144bbbe02ffd8
|
/supervised/validation/validator_custom.py
|
e69e0864e2aea7a3854f508fc5751c9732ddadcc
|
[
"MIT"
] |
permissive
|
mljar/mljar-supervised
|
57fb56b05b1a53ea979bf9cb9b127f314853bdbd
|
6722eb1e6441c11990f2aed01a444ddcae478c09
|
refs/heads/master
| 2023-08-30T23:48:28.692945
| 2023-08-28T15:09:39
| 2023-08-28T15:09:39
| 156,218,203
| 2,759
| 388
|
MIT
| 2023-08-28T10:24:12
| 2018-11-05T12:58:04
|
Python
|
UTF-8
|
Python
| false
| false
| 4,267
|
py
|
validator_custom.py
|
import os
import gc
import joblib
import logging
import numpy as np
import pandas as pd
import warnings
log = logging.getLogger(__name__)
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import KFold
from supervised.validation.validator_base import BaseValidator
from supervised.exceptions import AutoMLException
from supervised.utils.utils import load_data
from supervised.utils.config import mem
import time
class CustomValidator(BaseValidator):
def __init__(self, params):
BaseValidator.__init__(self, params)
cv_path = self.params.get("cv_path")
if cv_path is None:
raise AutoMLException("You need to specify `cv` as list or iterable")
self.cv = joblib.load(cv_path)
self.cv = list(self.cv)
self._results_path = self.params.get("results_path")
self._X_path = self.params.get("X_path")
self._y_path = self.params.get("y_path")
self._sample_weight_path = self.params.get("sample_weight_path")
self._sensitive_features_path = self.params.get("sensitive_features_path")
if self._X_path is None or self._y_path is None:
raise AutoMLException("No data path set in CustomValidator params")
folds_path = os.path.join(self._results_path, "folds")
if not os.path.exists(folds_path):
os.mkdir(folds_path)
print("Custom validation strategy")
for fold_cnt, (train_index, validation_index) in enumerate(self.cv):
print(f"Split {fold_cnt}.")
print(f"Train {train_index.shape[0]} samples.")
print(f"Validation {validation_index.shape[0]} samples.")
train_index_file = os.path.join(
self._results_path,
"folds",
f"fold_{fold_cnt}_train_indices.npy",
)
validation_index_file = os.path.join(
self._results_path,
"folds",
f"fold_{fold_cnt}_validation_indices.npy",
)
np.save(train_index_file, train_index)
np.save(validation_index_file, validation_index)
else:
log.debug("Folds split already done, reuse it")
def get_split(self, k, repeat=0):
try:
train_index_file = os.path.join(
self._results_path, "folds", f"fold_{k}_train_indices.npy"
)
validation_index_file = os.path.join(
self._results_path, "folds", f"fold_{k}_validation_indices.npy"
)
train_index = np.load(train_index_file)
validation_index = np.load(validation_index_file)
X = load_data(self._X_path)
y = load_data(self._y_path)
y = y["target"]
sample_weight = None
if self._sample_weight_path is not None:
sample_weight = load_data(self._sample_weight_path)
sample_weight = sample_weight["sample_weight"]
sensitive_features = None
if self._sensitive_features_path is not None:
sensitive_features = load_data(self._sensitive_features_path)
train_data = {"X": X.iloc[train_index], "y": y.iloc[train_index]}
validation_data = {
"X": X.iloc[validation_index],
"y": y.iloc[validation_index],
}
if sample_weight is not None:
train_data["sample_weight"] = sample_weight.iloc[train_index]
validation_data["sample_weight"] = sample_weight.iloc[validation_index]
if sensitive_features is not None:
train_data["sensitive_features"] = sensitive_features.iloc[train_index]
validation_data["sensitive_features"] = sensitive_features.iloc[
validation_index
]
except Exception as e:
import traceback
print(traceback.format_exc())
raise AutoMLException("Problem with custom validation. " + str(e))
return (train_data, validation_data)
def get_n_splits(self):
return len(self.cv)
def get_repeats(self):
return 1
|
829f0d8ec6aea62aaf236e6b5abc4c486ea6d048
|
ab40571d5051ad53c0f205fa797ba36eac516d06
|
/language/compgen/nqg/model/parser/training/input_utils.py
|
f642190b66f620db18170595d8db69090f77afc7
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/language
|
e941b1a92ab46d40d8d03bb0c314905cb6902ce2
|
ac9447064195e06de48cc91ff642f7fffa28ffe8
|
refs/heads/master
| 2023-08-24T23:10:13.207294
| 2023-05-25T20:47:18
| 2023-05-25T22:29:27
| 153,201,352
| 1,567
| 371
|
Apache-2.0
| 2023-07-06T23:03:15
| 2018-10-16T00:58:14
|
Python
|
UTF-8
|
Python
| false
| false
| 4,185
|
py
|
input_utils.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for input pipeline.
The input pipeline should be both GPU and TPU friendly.
"""
import tensorflow as tf
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.io.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but tf.int32 can be faster and more
# memory efficient on certain hardware.
for name in list(example.keys()):
tensor = example[name]
if tensor.dtype == tf.int64:
tensor = tf.cast(tensor, dtype=tf.int32)
example[name] = tensor
return example
def _create_int_feature(length):
return tf.io.FixedLenFeature([length], tf.int64)
def create_training_dataset(input_file, batch_size, config):
"""Returns `tf.data.Dataset` for training."""
name_to_features = {}
name_to_features["wordpiece_ids"] = _create_int_feature(
config["max_num_wordpieces"])
name_to_features["num_wordpieces"] = _create_int_feature(1)
name_to_features["application_span_begin"] = _create_int_feature(
config["max_num_applications"])
name_to_features["application_span_end"] = _create_int_feature(
config["max_num_applications"])
name_to_features["application_rule_idx"] = _create_int_feature(
config["max_num_applications"])
name_to_features["nu_node_type"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_node_1_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_node_2_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_application_idx"] = _create_int_feature(
config["max_num_numerator_nodes"])
name_to_features["nu_num_nodes"] = _create_int_feature(1)
name_to_features["de_node_type"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_node_1_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_node_2_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_application_idx"] = _create_int_feature(
config["max_num_denominator_nodes"])
name_to_features["de_num_nodes"] = _create_int_feature(1)
if "*" in input_file:
# Potentially match multiple input files.
files = tf.io.matching_files(input_file)
files = tf.random.shuffle(files)
shards = tf.data.Dataset.from_tensor_slices(files)
dataset = shards.interleave(tf.data.TFRecordDataset)
else:
# Only using single input file.
dataset = tf.data.TFRecordDataset(input_file)
dataset = dataset.repeat()
dataset = dataset.shuffle(buffer_size=1000)
decode_fn = lambda record: _decode_record(record, name_to_features)
dataset = dataset.map(
decode_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
# Send the single file to all workers.
options = tf.data.Options()
options.experimental_distribute.auto_shard_policy = (
tf.data.experimental.AutoShardPolicy.OFF)
dataset = dataset.with_options(options)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(1024)
return dataset
def get_dataset_fn(input_file, config):
"""Gets a closure to create a dataset.."""
global_batch_size = config["batch_size"]
def dataset_fn(ctx=None):
"""Returns tf.data.Dataset for distributed BERT pretraining."""
batch_size = ctx.get_per_replica_batch_size(
global_batch_size) if ctx else global_batch_size
dataset = create_training_dataset(input_file, batch_size, config)
return dataset
return dataset_fn
|
46a8d90cabe677ea681c74f928e64f1da4af8a3a
|
9ed3b16b3da72e4c47a04f2f2e3ef395e9fd9f20
|
/main/libwacom/template.py
|
78c9e5e23e3e4785690b1f9a79605b729a4093b6
|
[
"BSD-2-Clause"
] |
permissive
|
chimera-linux/cports
|
fdae59dc25856942be3041e10e3533dbf8f883c3
|
714680161cd719dd047452c95fbb9b447bc23a86
|
refs/heads/master
| 2023-09-03T19:30:40.720670
| 2023-09-03T15:07:40
| 2023-09-03T15:07:40
| 374,000,317
| 118
| 37
|
BSD-2-Clause
| 2023-09-14T20:31:08
| 2021-06-05T02:07:34
|
Python
|
UTF-8
|
Python
| false
| false
| 714
|
py
|
template.py
|
pkgname = "libwacom"
pkgver = "2.8.0"
pkgrel = 0
build_style = "meson"
configure_args = ["-Ddocumentation=disabled", "-Dtests=enabled"]
hostmakedepends = ["meson", "pkgconf"]
makedepends = ["libgudev-devel", "glib-devel", "libxml2-devel"]
checkdepends = ["bash"]
pkgdesc = "Library to handle Wacom tablets"
maintainer = "q66 <q66@chimera-linux.org>"
license = "MIT"
url = "https://github.com/linuxwacom/libwacom"
source = f"{url}/releases/download/{pkgname}-{pkgver}/{pkgname}-{pkgver}.tar.xz"
sha256 = "bb04b12c8688d0ff6a108d47a38d2057d572c4d7227d78138abd5fd0ba59f215"
def post_install(self):
self.install_license("COPYING")
@subpackage("libwacom-devel")
def _devel(self):
return self.default_devel()
|
034eab0ae10f793818530ab79d3a7f6bea076321
|
12f0bd77926127cdacc2452d6f9cfed91806b2fe
|
/idaes/core/surrogate/base/surrogate_base.py
|
cfc4d27e19785e6227cdcfa868de34db73444c18
|
[
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
IDAES/idaes-pse
|
e03d2583ae1ba968a7099f9f439fd8c3efa12904
|
deacf4c422bc9e50cb347e11a8cbfa0195bd4274
|
refs/heads/main
| 2023-08-16T19:13:00.355572
| 2023-08-04T04:19:29
| 2023-08-04T04:19:29
| 168,622,088
| 173
| 227
|
NOASSERTION
| 2023-09-11T16:04:55
| 2019-02-01T01:12:51
|
Python
|
UTF-8
|
Python
| false
| false
| 14,453
|
py
|
surrogate_base.py
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES).
#
# Copyright (c) 2018-2023 by the software owners: The Regents of the
# University of California, through Lawrence Berkeley National Laboratory,
# National Technology & Engineering Solutions of Sandia, LLC, Carnegie Mellon
# University, West Virginia University Research Corporation, et al.
# All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md
# for full copyright and license information.
#################################################################################
"""
Common Surrogate interface for IDAES.
"""
from pyomo.common.config import ConfigBlock
class SurrogateTrainer(object):
"""
Base class for IDAES Surrogate Trainers.
"""
CONFIG = ConfigBlock()
def __init__(
self,
input_labels,
output_labels,
training_dataframe,
validation_dataframe=None,
input_bounds=None,
**settings
):
"""
This is the base class for IDAES surrogate training objects.
Args:
input_labels: list
list of labels corresponding to the inputs (in order)
output_labels: list
list of labels corresponding to the outputs (in order)
training_dataframe: pandas DataFrame
Pandas DataFrame corresponding to the training data. Columns must
include all the labels in input_labels and output_labels
validation_dataframe: pandas DataFrame or None
Pandas DateFrame corresponding to the validation data. Columns
must include all the labels in input_labels and output_labels. If
None is passed, then no validation data will be used. Some
derived surrogate trainers may require validation data, while
others may not.
input_bounds: None, or dict of tuples
if None, these are set later from the provided data
if provided, it should be a dictionary where the keys correspond
to the input label, and the values are tuples of bounds
(lower, upper)
settings: additional keyword arguments
These are additional keyword arguments that are passed to the CONFIG
for the derived class.
"""
# Set the config block from passed settings
self.config = self.CONFIG(settings)
# We must have at least one input label and one output label
if (
input_labels is None
or len(input_labels) < 1
or output_labels is None
or len(output_labels) < 1
):
raise ValueError(
"SurrogateTrainer requires a list of input_labels and a list "
"of output_labels which must both have a length of at "
"least one"
)
self._input_labels = list(input_labels)
self._output_labels = list(output_labels)
# check that the input and output labels do not overlap
all_labels = set(self._input_labels)
all_labels.update(self._output_labels)
if len(all_labels) != (len(self._input_labels) + len(self._output_labels)):
raise ValueError(
"Duplicate label found in input_labels and/or output_labels."
)
# create the data members for training and validation data
self._training_dataframe = training_dataframe
self._validation_dataframe = validation_dataframe
# check that all input labels and output labels are in the dataframes
diff = set(self._input_labels) - set(self._training_dataframe.columns)
if diff:
raise ValueError(
"The following input labels were not found in "
"the training data columns: {}.".format(diff)
)
if self._validation_dataframe is not None:
diff = set(self._input_labels) - set(self._validation_dataframe.columns)
if diff:
raise ValueError(
"The following input labels were not found in "
"the validation data columns: {}.".format(diff)
)
diff = set(self._output_labels) - set(self._training_dataframe.columns)
if diff:
raise ValueError(
"The following output labels were not found in "
"the training data columns: {}.".format(diff)
)
if self._validation_dataframe is not None:
diff = set(self._output_labels) - set(self._validation_dataframe.columns)
if diff:
raise ValueError(
"The following output labels were not found in "
"the validation data columns: {}.".format(diff)
)
if input_bounds is not None:
self._input_bounds = dict(input_bounds)
# check that the labels match the input labels
if sorted(input_bounds.keys()) != sorted(self._input_labels):
raise ValueError(
"The input_labels did not match the keys in input_bounds.\n"
"input_bounds.keys(): {}\n"
"input_labels: {}".format(
sorted(input_bounds.keys()), sorted(input_labels)
)
)
else:
# get the bounds from the data
mx = self._training_dataframe.max().to_dict()
mn = self._training_dataframe.min().to_dict()
self._input_bounds = {k: (mn[k], mx[k]) for k in self._input_labels}
def n_inputs(self):
"""
The number of inputs for the surrogate
Returns: float
"""
return len(self._input_labels)
def n_outputs(self):
"""
The number of outputs for the surrogate
Returns: float
"""
return len(self._output_labels)
def input_labels(self):
"""
The ordered list of labels for the inputs
Returns: list of strings
"""
return list(self._input_labels)
def output_labels(self):
"""
The ordered list of labels for the outputs
Returns: list of strings
"""
return list(self._output_labels)
def input_bounds(self):
"""
The dictionary of input bounds. The keys of the dictionary correspond
to the labels for the inputs. The values are tuples of (lower_bound, upper_bound)
Returns: dict
"""
if self._input_bounds:
return dict(self._input_bounds)
return None
def train_surrogate(self):
"""
The ``train_surrogate`` method is used to train a surrogate model
using data provided in set_training_data. This method should return an
instance of a derived surrogate object (from SurrogateBase)
This method should be overridden by the derived classes.
Returns:
tuple : (bool, surrogate object, message) where bool indicates
status of model training, surrogate object is an instance of a
class derived from SurrogateBase, and message is string containing
additional information from the trainer.
"""
raise NotImplementedError(
"train_surrogate called, but not implemented on the derived class"
)
class SurrogateBase:
"""
Base class for Surrogate object.
"""
def __init__(self, input_labels=None, output_labels=None, input_bounds=None):
"""
Base class for standard IDAES Surrogate object. This class is
responsible for being able to load/save a surrogate model, evaluate
the model given an input dataframe, and populating a block to provide
an EO representation of the surrogate for solving in IDAES.
Args:
input_labels: list
list of labels corresponding to the inputs (in order)
output_labels: list
list of labels corresponding to the outputs (in order)
input_bounds: dict of tuples
A dictionary where the keys correspond to the input label,
and the values are tuples of bounds (lower,upper). These
should represent the valid range for the input variables
"""
self._input_labels = input_labels
self._output_labels = output_labels
self._input_bounds = input_bounds
# check that the input and output labels do not overlap
all_labels = set(self._input_labels)
all_labels.update(self._output_labels)
if len(all_labels) != (len(self._input_labels) + len(self._output_labels)):
raise ValueError(
"Duplicate label found in input_labels and/or output_labels."
)
if input_bounds is not None:
self._input_bounds = dict(input_bounds)
# check that the labels match the input labels
if sorted(input_bounds.keys()) != sorted(self._input_labels):
raise ValueError(
"The input_labels did not match the keys in input_bounds.\n"
"input_bounds.keys(): {}\n"
"input_labels: {}".format(
sorted(input_bounds.keys()), sorted(input_labels)
)
)
def n_inputs(self):
"""
The number of inputs for the surrogate
Returns: float
"""
return len(self._input_labels)
def n_outputs(self):
"""
The number of outputs for the surrogate
Returns: float
"""
return len(self._output_labels)
def input_labels(self):
"""
The ordered list of labels for the inputs
Returns: list of strings
"""
return list(self._input_labels)
def output_labels(self):
"""
The ordered list of labels for the outputs
Returns: list of strings
"""
return list(self._output_labels)
def input_bounds(self):
"""
The dictionary of input bounds. The keys of the dictionary correspond
to the labels for the inputs. The values are tuples of (lower_bound, upper_bound)
Returns: dict
"""
if self._input_bounds:
return dict(self._input_bounds)
return None
def populate_block(self, block, additional_options=None):
"""
Method to populate a Pyomo Block with surrogate model
constraints and variables.
Derived classes must overload this method.
Args:
block: Pyomo Block
Component to be populated with constraints.
additional_options: dict
Additional options passed through from SurrogateBlock.build_model
Returns:
None
"""
raise NotImplementedError(
"SurrogateModel class has not implemented populate_block method."
)
def evaluate_surrogate(self, dataframe):
"""
Method to method to evaluate surrogate model at a set of user
provided values.
Derived classes must overload this method
Args:
dataframe: pandas DataFrame
The dataframe of input values to be used in the evaluation. The dataframe
needs to contain a column corresponding to each of the input labels. Additional
columns are fine, but are not used.
Returns:
output: pandas Dataframe
Returns a dataframe of the the output values evaluated at the provided inputs.
The index of the output dataframe should match the index of the provided inputs.
"""
raise NotImplementedError(
"SurrogateModel class has not implemented an evaluate_surrogate " "method."
)
def save_to_file(self, filename, overwrite=False):
"""
This method saves an instance of the surrogate to a file so the model
can be used later.
Args:
filename : str
The path of the filename where the model will be saved
overwrite : bool
If True, this method will overwrite the file if it exists. If False
and the file already exists, this will throw an error.
"""
arg = "x"
if overwrite:
arg = "w"
with open(filename, arg) as fd:
self.save(fd)
def save(self, strm):
"""
Save an instance of this surrogate to the strm so the model can be used later.
This method should be overloaded in derived surrogate classes.
Args:
strm: IO.TextIO
This is the python stream like a file object or StringIO that will be used
to serialize the surrogate object. This methods will often write a string
of json data to the stream, but the format for derived classes need not be json.
"""
raise NotImplementedError(
'"save" should be implemented in the' " class derived from SurrogateBase"
)
@classmethod
def load_from_file(cls, filename):
"""
This method creates a new surrogate object by loading the model from the provided file.
Args:
filename : str
The name of the file from which to load the model.
Returns: an instance of the derived class or None if it failed to load
"""
with open(filename, "r") as fd:
return cls.load(fd)
@classmethod
def load(cls, strm):
"""
Create an instance of a surrogate from a stream. This method should
be overloaded in derived surrogate classes.
Args:
strm: IO.TextIO
This is the python stream like a file object or StringIO containing
the data required to load the surrogate. This is often, but does not
need to be json data.
Returns: an instance of the derived class or None if it failed to load
"""
raise NotImplementedError(
'"load" should be implemented in the' " class derived from SurrogateBase"
)
|
ab7f7d1fffccbe12942b7eaa3a9768005e395df0
|
10ddfb2d43a8ec5d47ce35dc0b8acf4fd58dea94
|
/Python/is-subsequence.py
|
7f5086668d0cee86cf8ceecca1c8c61f87a52e7b
|
[
"MIT"
] |
permissive
|
kamyu104/LeetCode-Solutions
|
f54822059405ef4df737d2e9898b024f051fd525
|
4dc4e6642dc92f1983c13564cc0fd99917cab358
|
refs/heads/master
| 2023-09-02T13:48:26.830566
| 2023-08-28T10:11:12
| 2023-08-28T10:11:12
| 152,631,182
| 4,549
| 1,651
|
MIT
| 2023-05-31T06:10:33
| 2018-10-11T17:38:35
|
C++
|
UTF-8
|
Python
| false
| false
| 379
|
py
|
is-subsequence.py
|
# Time: O(n)
# Space: O(1)
class Solution(object):
def isSubsequence(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if not s:
return True
i = 0
for c in t:
if c == s[i]:
i += 1
if i == len(s):
break
return i == len(s)
|
b891ba9f6022076c33dc994e250c27828b225fa0
|
9ed4d46aedd4d4acadb48d610e940594b5b7b3fd
|
/graphs/g_topological_sort.py
|
77543d51f61d91ba84acfe1b8bae6ef74c58166e
|
[
"MIT"
] |
permissive
|
TheAlgorithms/Python
|
7596a0e236ed12a61f9db19a7ea68309779cc85b
|
421ace81edb0d9af3a173f4ca7e66cc900078c1d
|
refs/heads/master
| 2023-09-01T17:32:20.190949
| 2023-08-29T13:18:10
| 2023-08-29T13:18:10
| 63,476,337
| 184,217
| 48,615
|
MIT
| 2023-09-14T02:05:29
| 2016-07-16T09:44:01
|
Python
|
UTF-8
|
Python
| false
| false
| 946
|
py
|
g_topological_sort.py
|
# Author: Phyllipe Bezerra (https://github.com/pmba)
clothes = {
0: "underwear",
1: "pants",
2: "belt",
3: "suit",
4: "shoe",
5: "socks",
6: "shirt",
7: "tie",
8: "watch",
}
graph = [[1, 4], [2, 4], [3], [], [], [4], [2, 7], [3], []]
visited = [0 for x in range(len(graph))]
stack = []
def print_stack(stack, clothes):
order = 1
while stack:
current_clothing = stack.pop()
print(order, clothes[current_clothing])
order += 1
def depth_first_search(u, visited, graph):
visited[u] = 1
for v in graph[u]:
if not visited[v]:
depth_first_search(v, visited, graph)
stack.append(u)
def topological_sort(graph, visited):
for v in range(len(graph)):
if not visited[v]:
depth_first_search(v, visited, graph)
if __name__ == "__main__":
topological_sort(graph, visited)
print(stack)
print_stack(stack, clothes)
|
2e30c67c084c8eadda2f0d6dacb9c8fd6a34f965
|
9ba214bbcdb58f5134b96bd41f5f54b60faf77a4
|
/avalanche/benchmarks/utils/data.py
|
824aac473dd8cb76b2258558cf3ea5bf8e55439b
|
[
"MIT"
] |
permissive
|
ContinualAI/avalanche
|
aedc55059d7b952beb50898f213b9c66953cb764
|
deb2b3e842046f48efc96e55a16d7a566e022c72
|
refs/heads/master
| 2023-08-08T23:26:10.835875
| 2023-08-02T14:39:40
| 2023-08-02T14:39:40
| 245,145,949
| 1,424
| 270
|
MIT
| 2023-09-04T04:17:05
| 2020-03-05T11:32:13
|
Python
|
UTF-8
|
Python
| false
| false
| 27,305
|
py
|
data.py
|
################################################################################
# Copyright (c) 2022 ContinualAI. #
# Copyrights licensed under the MIT License. #
# See the accompanying LICENSE file for terms. #
# #
# Date: 19-07-2022 #
# Author(s): Antonio Carta #
# E-mail: contact@continualai.org #
# Website: avalanche.continualai.org #
################################################################################
"""
This module contains the implementation of the Avalanche Dataset,
Avalanche dataset class which extends PyTorch's dataset.
AvalancheDataset offers additional features like the
management of preprocessing pipelines and task/class labels.
"""
import copy
import warnings
import numpy as np
from torch.utils.data.dataloader import default_collate
from avalanche.benchmarks.utils.dataset_definitions import IDataset
from .data_attribute import DataAttribute
from typing import (
Dict,
List,
Any,
Optional,
Sequence,
TypeVar,
Callable,
Union,
overload,
)
from .flat_data import FlatData
from .transform_groups import TransformGroups, EmptyTransformGroups
from torch.utils.data import Dataset as TorchDataset
from collections import OrderedDict
T_co = TypeVar("T_co", covariant=True)
TAvalancheDataset = TypeVar("TAvalancheDataset", bound="AvalancheDataset")
TDataWTransform = TypeVar("TDataWTransform", bound="_FlatDataWithTransform")
class AvalancheDataset(IDataset[T_co]):
"""Avalanche Dataset.
Avlanche dataset are pytorch-compatible Datasets with some additional
functionality such as:
- management of transformation groups via :class:`AvalancheTransform`
- support for sample attributes such as class targets and task labels
Data Attributes
---------------
Avalanche datasets manage sample-wise information such as class or task
labels via :class:`DataAttribute`.
Transformation Groups
---------------------
Avalanche datasets manage transformation via transformation groups.
Simply put, a transformation group is a named preprocessing function
(as in torchvision datasets). By default, Avalanche expects
two transformation groups:
- 'train', which contains transformations applied to training patterns.
- 'eval', that contain transformations applied to test patterns.
Having both groups allows to use different transformations during training
and evaluation and to seamlessly switch between them by using the
:func:`train` and :func:`eval` methods. Arbitrary transformation groups
can be added and used. If you define custom groups, you can use them by
calling the `:func:with_transforms` method.
switching to a different transformation group by calling the ``train()``,
``eval()`` or ``with_transforms` methods always returns a new dataset,
levaing the original one unchanged.
Ttransformation groups can be manipulated by removing, freezing, or
replacing transformations. Each operation returns a new dataset, leaving
the original one unchanged.
"""
def __init__(
self,
datasets: Sequence[IDataset[T_co]],
*,
indices: Optional[List[int]] = None,
data_attributes: Optional[List[DataAttribute]] = None,
transform_groups: Optional[TransformGroups] = None,
frozen_transform_groups: Optional[TransformGroups] = None,
collate_fn: Optional[Callable[[List], Any]] = None,
):
"""Creates a ``AvalancheDataset`` instance.
:param dataset: Original dataset. Beware that
AvalancheDataset will not overwrite transformations already
applied by this dataset.
:param transform_groups: Avalanche transform groups.
"""
if isinstance(datasets, TorchDataset) or isinstance(datasets, AvalancheDataset):
warnings.warn(
"AvalancheDataset constructor has been changed. "
"Please check the documentation for the correct usage. You can"
" use `avalanche.benchmarks.utils.make_classification_dataset "
"if you need the old behavior.",
DeprecationWarning,
stacklevel=2,
)
if issubclass(type(datasets), TorchDataset) or issubclass(
type(datasets), AvalancheDataset
):
datasets = [datasets] # type: ignore
# NOTES on implementation:
# - raw datasets operations are implemented by _FlatData
# - data attributes are implemented by DataAttribute
# - transformations are implemented by TransformGroups
# AvalancheDataset just takes care to manage all of these attributes
# together and decides how the information propagates through
# operations (e.g. how to pass attributes after concat/subset
# operations).
flat_datas = []
for d in datasets:
if len(d) > 0:
if isinstance(d, AvalancheDataset):
flat_datas.append(d._flat_data)
elif not isinstance(d, _FlatDataWithTransform):
flat_datas.append(_FlatDataWithTransform([d]))
else:
flat_datas.append(d)
if (
transform_groups is None
and frozen_transform_groups is None
and indices is not None
and len(flat_datas) == 1
):
# TODO: remove. shouldn't be needed but helps with flattening
assert len(flat_datas) == 1
self._flat_data = flat_datas[0].subset(indices)
elif (
transform_groups is None
and frozen_transform_groups is None
and indices is None
and len(flat_datas) >= 1
):
# TODO: remove. shouldn't be needed but helps with flattening
if len(flat_datas) == 0:
self._flat_data = _FlatDataWithTransform([])
self._flat_data = flat_datas[0]
if not isinstance(self._flat_data, _FlatDataWithTransform):
self._flat_data = _FlatDataWithTransform([self._flat_data])
for d in flat_datas[1:]:
if not isinstance(d, _FlatDataWithTransform):
d = _FlatDataWithTransform([d])
self._flat_data = self._flat_data.concat(d)
else:
self._flat_data: _FlatDataWithTransform[T_co] = _FlatDataWithTransform(
flat_datas,
indices=indices,
transform_groups=transform_groups,
frozen_transform_groups=frozen_transform_groups,
)
self.collate_fn = collate_fn
####################################
# Init collate_fn
####################################
if len(datasets) > 0:
self.collate_fn = self._init_collate_fn(datasets[0], collate_fn)
else:
self.collate_fn = default_collate
"""
The collate function to use when creating mini-batches from this
dataset.
"""
####################################
# Init data attributes
####################################
# concat attributes from child datasets
new_data_attributes: Dict[str, DataAttribute] = dict()
if data_attributes is not None:
new_data_attributes = {da.name: da for da in data_attributes}
ld = sum(len(d) for d in datasets)
for da in data_attributes:
if len(da) != ld:
raise ValueError(
"Data attribute {} has length {} but the dataset "
"has length {}".format(da.name, len(da), ld)
)
self._data_attributes: Dict[str, DataAttribute] = OrderedDict()
first_dataset = datasets[0] if len(datasets) > 0 else None
if isinstance(first_dataset, AvalancheDataset):
for attr in first_dataset._data_attributes.values():
if attr.name in new_data_attributes:
# Keep overridden attributes in their previous position
self._data_attributes[attr.name] = new_data_attributes.pop(
attr.name
)
continue
acat = attr
found_all = True
for d2 in datasets[1:]:
if hasattr(d2, attr.name):
acat = acat.concat(getattr(d2, attr.name))
elif len(d2) > 0: # if empty we allow missing attributes
found_all = False
break
if found_all:
self._data_attributes[attr.name] = acat
# Insert new data attributes after inherited ones
for da in new_data_attributes.values():
self._data_attributes[da.name] = da
if indices is not None: # subset operation for attributes
for da in self._data_attributes.values():
# TODO: this was the old behavior. How do we know what to do if
# we permute the entire dataset?
# DEPRECATED! always subset attributes
# we keep this behavior only for `classification_subset`
# if len(da) != sum([len(d) for d in datasets]):
# self._data_attributes[da.name] = da
# else:
# self._data_attributes[da.name] = da.subset(self._indices)
#
# dasub = da.subset(indices)
# self._data_attributes[da.name] = dasub
dasub = da.subset(indices)
self._data_attributes[da.name] = dasub
# set attributes dynamically
for el in self._data_attributes.values():
assert len(el) == len(self), f"BUG: Wrong size for attribute {el.name}"
is_property = False
if hasattr(self, el.name):
is_property = True
# Do not raise an error if a property.
# Any check related to the property will be done
# in the property setter method.
if not isinstance(getattr(type(self), el.name, None), property):
raise ValueError(
f"Trying to add DataAttribute `{el.name}` to "
f"AvalancheDataset but the attribute name is "
f"already used."
)
if not is_property:
setattr(self, el.name, el)
def __len__(self) -> int:
return len(self._flat_data)
def __add__(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:
return self.concat(other)
def __radd__(
self: TAvalancheDataset, other: TAvalancheDataset
) -> TAvalancheDataset:
return other.concat(self)
@property
def _datasets(self):
"""Only for backward compatibility of old unit tests. Do not use."""
return self._flat_data._datasets
def concat(self: TAvalancheDataset, other: TAvalancheDataset) -> TAvalancheDataset:
"""Concatenate this dataset with other.
:param other: Other dataset to concatenate.
:return: A new dataset.
"""
return self.__class__([self, other])
def subset(self: TAvalancheDataset, indices: Sequence[int]) -> TAvalancheDataset:
"""Subset this dataset.
:param indices: The indices to keep.
:return: A new dataset.
"""
return self.__class__([self], indices=indices)
@property
def transform(self):
raise AttributeError(
"Cannot access or modify transform directly. Use transform_groups "
"methods such as `replace_current_transform_group`. "
"See the documentation for more info."
)
def update_data_attribute(
self: TAvalancheDataset, name: str, new_value
) -> TAvalancheDataset:
"""
Return a new dataset with the added or replaced data attribute.
If a object of type :class:`DataAttribute` is passed, then the data
attribute is setted as is.
Otherwise, if a raw value is passed, a new DataAttribute is created.
If a DataAttribute with the same already exists, the use_in_getitem
flag is inherited, otherwise it is set to False.
:param name: The name of the data attribute to add/replace.
:param new_value: Either a :class:`DataAttribute` or a sequence
containing as many elements as the datasets.
:returns: A copy of this dataset with the given data attribute set.
"""
assert len(new_value) == len(
self
), f"Size mismatch when updating data attribute {name}"
datacopy = self._shallow_clone_dataset()
datacopy._data_attributes = copy.copy(datacopy._data_attributes)
if isinstance(new_value, DataAttribute):
assert name == new_value.name
datacopy._data_attributes[name] = new_value
else:
use_in_getitem = False
prev_attr = datacopy._data_attributes.get(name, None)
if prev_attr is not None:
use_in_getitem = prev_attr.use_in_getitem
datacopy._data_attributes[name] = DataAttribute(
new_value, name=name, use_in_getitem=use_in_getitem
)
if not hasattr(datacopy, name):
# Creates the field if it does not exist
setattr(datacopy, name, datacopy._data_attributes[name])
return datacopy
def __eq__(self, other: object):
for required_attr in ["_flat_data", "_data_attributes", "collate_fn"]:
if not hasattr(other, required_attr):
return False
return (
other._flat_data == self._flat_data
and self._data_attributes == other._data_attributes # type: ignore
and self.collate_fn == other.collate_fn # type: ignore
)
@overload
def __getitem__(self, exp_id: int) -> T_co:
...
@overload
def __getitem__(self: TAvalancheDataset, exp_id: slice) -> TAvalancheDataset:
...
def __getitem__(
self: TAvalancheDataset, idx: Union[int, slice]
) -> Union[T_co, TAvalancheDataset]:
elem = self._flat_data[idx]
for da in self._data_attributes.values():
if da.use_in_getitem:
if isinstance(elem, dict):
elem[da.name] = da[idx]
elif isinstance(elem, tuple):
elem = list(elem) # type: ignore
elem.append(da[idx]) # type: ignore
else:
elem.append(da[idx]) # type: ignore
return elem
def train(self):
"""Returns a new dataset with the transformations of the 'train' group
loaded.
The current dataset will not be affected.
:return: A new dataset with the training transformations loaded.
"""
return self.with_transforms("train")
def eval(self):
"""
Returns a new dataset with the transformations of the 'eval' group
loaded.
Eval transformations usually don't contain augmentation procedures.
This function may be useful when in need to test on training data
(for instance, in order to run a validation pass).
The current dataset will not be affected.
:return: A new dataset with the eval transformations loaded.
"""
return self.with_transforms("eval")
def with_transforms(self: TAvalancheDataset, group_name: str) -> TAvalancheDataset:
"""
Returns a new dataset with the transformations of a different group
loaded.
The current dataset will not be affected.
:param group_name: The name of the transformations group to use.
:return: A new dataset with the new transformations.
"""
datacopy = self._shallow_clone_dataset()
datacopy._flat_data = datacopy._flat_data.with_transforms(group_name)
return datacopy
def freeze_transforms(self: TAvalancheDataset) -> TAvalancheDataset:
"""Returns a new dataset with the transformation groups frozen."""
datacopy = self._shallow_clone_dataset()
datacopy._flat_data = datacopy._flat_data.freeze_transforms()
return datacopy
def remove_current_transform_group(self):
"""Recursively remove transformation groups from dataset tree."""
datacopy = self._shallow_clone_dataset()
fdata = datacopy._flat_data
datacopy._flat_data = fdata.remove_current_transform_group()
return datacopy
def replace_current_transform_group(self, transform):
"""Recursively remove the current transformation group from the
dataset tree and replaces it."""
datacopy = self._shallow_clone_dataset()
fdata = datacopy._flat_data
datacopy._flat_data = fdata.replace_current_transform_group(transform)
return datacopy
def _shallow_clone_dataset(self: TAvalancheDataset) -> TAvalancheDataset:
"""Clone dataset.
This is a shallow copy, i.e. the data attributes are not copied.
"""
dataset_copy = copy.copy(self)
dataset_copy._flat_data = self._flat_data._shallow_clone_dataset()
return dataset_copy
def _init_collate_fn(self, dataset, collate_fn):
if collate_fn is not None:
return collate_fn
if hasattr(dataset, "collate_fn"):
return getattr(dataset, "collate_fn")
return default_collate
def __repr__(self):
return repr(self._flat_data)
def _tree_depth(self):
"""Return the depth of the tree of datasets.
Use only to debug performance issues.
"""
return self._flat_data._tree_depth()
class _FlatDataWithTransform(FlatData[T_co]):
"""Private class used to wrap a dataset with a transformation group.
Do not use outside of this file.
"""
def __init__(
self,
datasets: Sequence[IDataset[T_co]],
*,
indices: Optional[List[int]] = None,
transform_groups: Optional[TransformGroups] = None,
frozen_transform_groups: Optional[TransformGroups] = None,
):
can_flatten = (transform_groups is None) and (frozen_transform_groups is None)
super().__init__(datasets, indices=indices, can_flatten=can_flatten)
if isinstance(transform_groups, dict):
transform_groups = TransformGroups(transform_groups)
if isinstance(frozen_transform_groups, dict):
frozen_transform_groups = TransformGroups(frozen_transform_groups)
if transform_groups is None:
transform_groups = EmptyTransformGroups()
if frozen_transform_groups is None:
frozen_transform_groups = EmptyTransformGroups()
self._transform_groups: TransformGroups = transform_groups
self._frozen_transform_groups: TransformGroups = frozen_transform_groups
####################################
# Init transformations
####################################
cgroup = None
# inherit transformation group from original dataset
for dd in datasets:
if isinstance(dd, _FlatDataWithTransform):
if cgroup is None and dd._transform_groups is not None:
cgroup = dd._transform_groups.current_group
elif (
dd._transform_groups is not None
and dd._transform_groups.current_group != cgroup
):
# all datasets must have the same transformation group
warnings.warn(
f"Concatenated datasets have different transformation "
f"groups. Using group={cgroup}."
)
if cgroup is None:
cgroup = "train"
self._frozen_transform_groups.current_group = cgroup
self._transform_groups.current_group = cgroup
def __eq__(self, other):
for required_attr in [
"_datasets",
"_transform_groups",
"_frozen_transform_groups",
]:
if not hasattr(other, required_attr):
return False
eq_datasets = len(self._datasets) == len(other._datasets) # type: ignore
eq_datasets = eq_datasets and all(
d1 == d2 for d1, d2 in zip(self._datasets, other._datasets) # type: ignore
)
ftg = other._frozen_transform_groups # type: ignore
return (
eq_datasets
and self._transform_groups == other._transform_groups # type: ignore
and self._frozen_transform_groups == ftg # type: ignore
)
def _getitem_recursive_call(self, idx, group_name) -> T_co:
"""Private method only for internal use.
We need this recursive call to avoid appending task
label multiple times inside the __getitem__.
"""
dataset_idx, idx = self._get_idx(idx)
dd = self._datasets[dataset_idx]
if isinstance(dd, _FlatDataWithTransform):
element = dd._getitem_recursive_call(idx, group_name=group_name)
else:
element = dd[idx]
if self._frozen_transform_groups is not None:
element = self._frozen_transform_groups(element, group_name=group_name)
if self._transform_groups is not None:
element = self._transform_groups(element, group_name=group_name)
return element
def __getitem__(
self: TDataWTransform, idx: Union[int, slice]
) -> Union[T_co, TDataWTransform]:
if isinstance(idx, (int, np.integer)):
elem = self._getitem_recursive_call(
idx, self._transform_groups.current_group
)
return elem # type: ignore
else:
return super().__getitem__(idx)
def with_transforms(self: TDataWTransform, group_name: str) -> TDataWTransform:
"""
Returns a new dataset with the transformations of a different group
loaded.
The current dataset will not be affected.
:param group_name: The name of the transformations group to use.
:return: A new dataset with the new transformations.
"""
datacopy = self._shallow_clone_dataset()
datacopy._frozen_transform_groups.with_transform(group_name)
datacopy._transform_groups.with_transform(group_name)
return datacopy
def freeze_transforms(self: TDataWTransform) -> TDataWTransform:
"""Returns a new dataset with the transformation groups frozen."""
tgroups = copy.copy(self._transform_groups)
frozen_tgroups = copy.copy(self._frozen_transform_groups)
datacopy = self._shallow_clone_dataset()
datacopy._frozen_transform_groups = frozen_tgroups + tgroups
datacopy._transform_groups = EmptyTransformGroups()
dds: List[IDataset] = []
for dd in datacopy._datasets:
if isinstance(dd, _FlatDataWithTransform):
dds.append(dd.freeze_transforms())
else:
dds.append(dd)
datacopy._datasets = dds
return datacopy
def remove_current_transform_group(self):
"""Recursively remove transformation groups from dataset tree."""
dataset_copy = self._shallow_clone_dataset()
cgroup = dataset_copy._transform_groups.current_group
dataset_copy._transform_groups[cgroup] = None
dds = []
for dd in dataset_copy._datasets:
if isinstance(dd, _FlatDataWithTransform):
dds.append(dd.remove_current_transform_group())
else:
dds.append(dd)
dataset_copy._datasets = dds
return dataset_copy
def replace_current_transform_group(self, transform):
"""Recursively remove the current transformation group from the
dataset tree and replaces it."""
dataset_copy = self.remove_current_transform_group()
cgroup = dataset_copy._transform_groups.current_group
dataset_copy._transform_groups[cgroup] = transform
dds = []
for dd in dataset_copy._datasets:
if isinstance(dd, _FlatDataWithTransform):
dds.append(dd.remove_current_transform_group())
else:
dds.append(dd)
dataset_copy._datasets = dds
return dataset_copy
def _shallow_clone_dataset(self: TDataWTransform) -> TDataWTransform:
"""Clone dataset.
This is a shallow copy, i.e. the data attributes are not copied.
"""
dataset_copy = copy.copy(self)
dataset_copy._transform_groups = copy.copy(dataset_copy._transform_groups)
dataset_copy._frozen_transform_groups = copy.copy(
dataset_copy._frozen_transform_groups
)
return dataset_copy
def make_avalanche_dataset(
dataset: IDataset[T_co],
*,
data_attributes: Optional[List[DataAttribute]] = None,
transform_groups: Optional[TransformGroups] = None,
frozen_transform_groups: Optional[TransformGroups] = None,
collate_fn: Optional[Callable[[List], Any]] = None,
) -> AvalancheDataset[T_co]:
"""Avalanche Dataset.
Creates a ``AvalancheDataset`` instance.
See ``AvalancheDataset`` for more details.
:param dataset: Original dataset. Beware that
AvalancheDataset will not overwrite transformations already
applied by this dataset.
:param transform_groups: Avalanche transform groups.
"""
return AvalancheDataset(
[dataset],
data_attributes=data_attributes,
transform_groups=transform_groups,
frozen_transform_groups=frozen_transform_groups,
collate_fn=collate_fn,
)
def _print_frozen_transforms(self):
"""Internal debugging method. Do not use it.
Prints the current frozen transformations."""
print("FROZEN TRANSFORMS:\n" + str(self._frozen_transform_groups))
for dd in self._datasets:
if isinstance(dd, AvalancheDataset):
print("PARENT FROZEN:\n")
_print_frozen_transforms(dd)
def _print_nonfrozen_transforms(self):
"""Internal debugging method. Do not use it.
Prints the current non-frozen transformations."""
print("TRANSFORMS:\n" + str(self._transform_groups))
for dd in self._datasets:
if isinstance(dd, AvalancheDataset):
print("PARENT TRANSFORMS:\n")
_print_nonfrozen_transforms(dd)
def _print_transforms(self):
"""Internal debugging method. Do not use it.
Prints the current transformations."""
self._print_frozen_transforms()
self._print_nonfrozen_transforms()
__all__ = ["AvalancheDataset", "make_avalanche_dataset"]
|
7e596ec1717ac4d7f71dbbd07f0f64c3fc962f89
|
6c8305ea1df9687df1c0d2b0ace56733516c6322
|
/readthedocs/search/tests/test_api.py
|
123b0bbd652adf4d3d5948caacc7690eacef70e0
|
[
"MIT"
] |
permissive
|
readthedocs/readthedocs.org
|
9806083aa744c2308267919480a692e1e003e45d
|
bf88ce6d1085d922322a5fadce63a22c5544c830
|
refs/heads/main
| 2023-09-05T20:22:34.281891
| 2023-09-05T12:41:52
| 2023-09-05T12:41:52
| 841,835
| 2,894
| 1,509
|
MIT
| 2023-09-14T20:36:00
| 2010-08-16T19:18:06
|
Python
|
UTF-8
|
Python
| false
| false
| 28,852
|
py
|
test_api.py
|
import re
from unittest import mock
import pytest
from django.urls import reverse
from django_dynamic_fixture import get
from readthedocs.builds.models import Version
from readthedocs.projects.constants import (
MKDOCS,
MKDOCS_HTML,
PUBLIC,
SPHINX,
SPHINX_HTMLDIR,
SPHINX_SINGLEHTML,
)
from readthedocs.projects.models import Feature, HTMLFile, Project
from readthedocs.search.api.v2.views import PageSearchAPIView
from readthedocs.search.documents import PageDocument
from readthedocs.search.tests.utils import (
SECTION_FIELDS,
get_search_query_from_project_file,
)
from readthedocs.search.utils import index_new_files, remove_indexed_files
@pytest.mark.django_db
@pytest.mark.search
@pytest.mark.usefixtures("all_projects")
class BaseTestDocumentSearch:
def setup_method(self, method):
# This reverse needs to be inside the ``setup_method`` method because from
# the Corporate site we don't define this URL if ``-ext`` module is not
# installed
self.url = reverse("search_api")
@pytest.fixture(autouse=True)
def setup_settings(self, settings):
settings.PUBLIC_DOMAIN = "readthedocs.io"
settings.USE_SUBDOMAIN = True
def get_search(self, api_client, search_params):
return api_client.get(self.url, search_params)
@pytest.mark.parametrize("page_num", [0, 1])
def test_search_works_with_title_query(self, api_client, project, page_num):
query = get_search_query_from_project_file(
project_slug=project.slug, page_num=page_num, field="title"
)
version = project.versions.all().first()
search_params = {"project": project.slug, "version": version.slug, "q": query}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
data = resp.data["results"]
assert len(data) >= 1
# Matching first result
project_data = data[0]
assert project_data["project"] == project.slug
# Check highlight return correct object of first result
title_highlight = project_data["highlights"]["title"]
assert len(title_highlight) == 1
assert query.lower() in title_highlight[0].lower()
@pytest.mark.parametrize("data_type", SECTION_FIELDS)
@pytest.mark.parametrize("page_num", [0, 1])
def test_search_works_with_sections(self, api_client, project, page_num, data_type):
type, field = data_type.split(".")
query = get_search_query_from_project_file(
project_slug=project.slug,
page_num=page_num,
type=type,
field=field,
)
version = project.versions.all().first()
search_params = {"project": project.slug, "version": version.slug, "q": query}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
data = resp.data["results"]
assert len(data) >= 1
# Matching first result
project_data = data[0]
assert project_data["project"] == project.slug
blocks = project_data["blocks"]
# since there was a nested query,
# blocks should not be empty
assert len(blocks) >= 1
block_0 = blocks[0]
assert block_0["type"] == type
highlights = block_0["highlights"][field]
assert len(highlights) == 1, "number_of_fragments is set to 1"
# checking highlighting of results
highlighted_words = re.findall( # this gets all words inside <em> tag
"<span>(.*?)</span>", highlights[0]
)
assert len(highlighted_words) > 0
for word in highlighted_words:
# Make it lower because our search is case insensitive
assert word.lower() in query.lower()
def test_doc_search_filter_by_project(self, api_client):
"""Test Doc search results are filtered according to project"""
# `documentation` word is present both in `kuma` and `docs` files
# and not in `pipeline`, so search with this phrase but filter through project
search_params = {"q": "documentation", "project": "docs", "version": "latest"}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
data = resp.data["results"]
assert len(data) == 2 # both pages of `docs` contains the word `documentation`
# all results must be from same project
for res in data:
assert res["project"] == "docs"
def test_doc_search_filter_by_version(self, api_client, project):
"""Test Doc search result are filtered according to version"""
query = get_search_query_from_project_file(project_slug=project.slug)
latest_version = project.versions.all()[0]
# Create another version
dummy_version = get(
Version,
project=project,
active=True,
privacy_level=PUBLIC,
)
# Create HTMLFile same as the latest version
latest_version_files = HTMLFile.objects.all().filter(version=latest_version)
for f in latest_version_files:
f.version = dummy_version
# Make primary key to None, so django will create new object
f.pk = None
f.save()
PageDocument().update(f)
search_params = {
"q": query,
"project": project.slug,
"version": dummy_version.slug,
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
data = resp.data["results"]
assert len(data) == 1
assert data[0]["project"] == project.slug
assert data[0]["project_alias"] is None
def test_doc_search_pagination(self, api_client, project):
"""Test Doc search result can be paginated"""
latest_version = project.versions.all()[0]
html_file = HTMLFile.objects.filter(version=latest_version)[0]
title = html_file.processed_json["title"]
query = title.split()[0]
# Create 60 more same html file
for _ in range(60):
# Make primary key to None, so django will create new object
html_file.pk = None
html_file.save()
PageDocument().update(html_file)
search_params = {
"q": query,
"project": project.slug,
"version": latest_version.slug,
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
# Check the count is 61 (1 existing and 60 new created)
assert resp.data["count"] == 61
# Check there are next url
assert resp.data["next"] is not None
# There should be only 50 data as the pagination is 50 by default
assert len(resp.data["results"]) == 50
# Check for page 2
search_params["page"] = 2
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
# Check the count is 61 (1 existing and 60 new created)
assert resp.data["count"] == 61
# We don't have more results after this page
assert resp.data["next"] is None
# There should be only the 11 left
assert len(resp.data["results"]) == 11
# Add `page_size` parameter and check the data is paginated accordingly
search_params["page_size"] = 5
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
assert len(resp.data["results"]) == 5
def test_doc_search_without_parameters(self, api_client, project):
"""Hitting Document Search endpoint without project and version should return 404."""
resp = self.get_search(api_client, {})
assert resp.status_code == 404
def test_doc_search_without_query(self, api_client, project):
"""Hitting Document Search endpoint without a query should return error."""
resp = self.get_search(
api_client,
{"project": project.slug, "version": project.versions.first().slug},
)
assert resp.status_code == 400
# Check error message is there
assert "q" in resp.data.keys()
def test_doc_search_subprojects(self, api_client, all_projects):
"""Test Document search return results from subprojects also"""
project = all_projects[0]
subproject = all_projects[1]
version = project.versions.all()[0]
# Add another project as subproject of the project
project.add_subproject(subproject)
# Now search with subproject content but explicitly filter by the parent project
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {"q": query, "project": project.slug, "version": version.slug}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
data = resp.data["results"]
assert len(data) >= 1 # there may be results from another projects
# First result should be the subproject
first_result = data[0]
assert first_result["project"] == subproject.slug
assert first_result["project_alias"] == subproject.slug
# The result is from the same version as the main project.
assert first_result["version"] == version.slug
# Check the link is the subproject document link
document_link = subproject.get_docs_url(version_slug=version.slug)
link = first_result["domain"] + first_result["path"]
assert document_link in link
def test_doc_search_subprojects_default_version(self, api_client, all_projects):
"""Return results from subprojects that match the version from the main project or fallback to its default version."""
project = all_projects[0]
version = project.versions.all()[0]
subproject = all_projects[1]
subproject_version = subproject.versions.all()[0]
# Change the name of the version, and make it default.
subproject_version.slug = "different"
subproject_version.save()
subproject.default_version = subproject_version.slug
subproject.save()
subproject.versions.filter(slug=version.slug).delete()
# Refresh index
version_files = HTMLFile.objects.all().filter(version=subproject_version)
for f in version_files:
PageDocument().update(f)
# Add another project as subproject of the project
project.add_subproject(subproject)
# Now search with subproject content but explicitly filter by the parent project
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {"q": query, "project": project.slug, "version": version.slug}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
data = resp.data["results"]
assert len(data) >= 1 # there may be results from another projects
# First result should be the subproject
first_result = data[0]
assert first_result["project"] == subproject.slug
assert first_result["version"] == "different"
# Check the link is the subproject document link
document_link = subproject.get_docs_url(version_slug=subproject_version.slug)
link = first_result["domain"] + first_result["path"]
assert document_link in link
def test_doc_search_unexisting_project(self, api_client):
project = "notfound"
assert not Project.objects.filter(slug=project).exists()
search_params = {
"q": "documentation",
"project": project,
"version": "latest",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 404
def test_doc_search_unexisting_version(self, api_client, project):
version = "notfound"
assert not project.versions.filter(slug=version).exists()
search_params = {
"q": "documentation",
"project": project.slug,
"version": version,
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 404
@mock.patch.object(PageSearchAPIView, "_get_projects_to_search", list)
def test_get_all_projects_returns_empty_results(self, api_client, project):
"""If there is a case where `_get_projects_to_search` returns empty, we could be querying all projects."""
# `documentation` word is present both in `kuma` and `docs` files
# and not in `pipeline`, so search with this phrase but filter through project
search_params = {"q": "documentation", "project": "docs", "version": "latest"}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
data = resp.data["results"]
assert len(data) == 0
def test_doc_search_hidden_versions(self, api_client, all_projects):
"""Test Document search return results from subprojects also"""
project = all_projects[0]
subproject = all_projects[1]
version = project.versions.all()[0]
# Add another project as subproject of the project
project.add_subproject(subproject)
version_subproject = subproject.versions.first()
version_subproject.hidden = True
version_subproject.save()
# Now search with subproject content but explicitly filter by the parent project
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {"q": query, "project": project.slug, "version": version.slug}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
# The version from the subproject is hidden, so isn't show on the results.
data = resp.data["results"]
assert len(data) == 0
# Now search on the subproject with hidden version
query = get_search_query_from_project_file(project_slug=subproject.slug)
search_params = {
"q": query,
"project": subproject.slug,
"version": version_subproject.slug,
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
# We can still search inside the hidden version
data = resp.data["results"]
assert len(data) == 1
first_result = data[0]
assert first_result["project"] == subproject.slug
@pytest.mark.parametrize("doctype", [SPHINX, SPHINX_SINGLEHTML, MKDOCS_HTML])
def test_search_correct_link_for_normal_page_html_projects(
self, api_client, doctype
):
project = Project.objects.get(slug="docs")
project.versions.update(documentation_type=doctype)
version = project.versions.all().first()
# Refresh index
version_files = HTMLFile.objects.all().filter(version=version)
for f in version_files:
PageDocument().update(f)
search_params = {
"project": project.slug,
"version": version.slug,
"q": "Support",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
result = resp.data["results"][0]
assert result["project"] == project.slug
assert result["path"] == "/en/latest/support.html"
@pytest.mark.parametrize("doctype", [SPHINX, SPHINX_SINGLEHTML, MKDOCS_HTML])
def test_search_correct_link_for_index_page_html_projects(
self, api_client, doctype
):
project = Project.objects.get(slug="docs")
project.versions.update(documentation_type=doctype)
version = project.versions.all().first()
# Refresh index
version_files = HTMLFile.objects.all().filter(version=version)
for f in version_files:
PageDocument().update(f)
search_params = {
"project": project.slug,
"version": version.slug,
"q": "Some content from index",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
result = resp.data["results"][0]
assert result["project"] == project.slug
assert result["path"] == "/en/latest/index.html"
@pytest.mark.parametrize("doctype", [SPHINX, SPHINX_SINGLEHTML, MKDOCS_HTML])
def test_search_correct_link_for_index_page_subdirectory_html_projects(
self, api_client, doctype
):
project = Project.objects.get(slug="docs")
project.versions.update(documentation_type=doctype)
version = project.versions.all().first()
# Refresh index
version_files = HTMLFile.objects.all().filter(version=version)
for f in version_files:
PageDocument().update(f)
search_params = {
"project": project.slug,
"version": version.slug,
"q": "Some content from guides/index",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
result = resp.data["results"][0]
assert result["project"] == project.slug
assert result["path"] == "/en/latest/guides/index.html"
@pytest.mark.parametrize("doctype", [SPHINX_HTMLDIR, MKDOCS])
def test_search_correct_link_for_normal_page_htmldir_projects(
self, api_client, doctype
):
project = Project.objects.get(slug="docs")
project.versions.update(documentation_type=doctype)
version = project.versions.all().first()
# Refresh index
version_files = HTMLFile.objects.all().filter(version=version)
for f in version_files:
PageDocument().update(f)
search_params = {
"project": project.slug,
"version": version.slug,
"q": "Support",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
result = resp.data["results"][0]
assert result["project"] == project.slug
assert result["path"] == "/en/latest/support.html"
@pytest.mark.parametrize("doctype", [SPHINX_HTMLDIR, MKDOCS])
def test_search_correct_link_for_index_page_htmldir_projects(
self, api_client, doctype
):
project = Project.objects.get(slug="docs")
project.versions.update(documentation_type=doctype)
version = project.versions.all().first()
# Refresh index
version_files = HTMLFile.objects.all().filter(version=version)
for f in version_files:
PageDocument().update(f)
search_params = {
"project": project.slug,
"version": version.slug,
"q": "Some content from index",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
result = resp.data["results"][0]
assert result["project"] == project.slug
assert result["path"] == "/en/latest/"
@pytest.mark.parametrize("doctype", [SPHINX_HTMLDIR, MKDOCS])
def test_search_correct_link_for_index_page_subdirectory_htmldir_projects(
self, api_client, doctype
):
project = Project.objects.get(slug="docs")
project.versions.update(documentation_type=doctype)
version = project.versions.all().first()
# Refresh index
version_files = HTMLFile.objects.all().filter(version=version)
for f in version_files:
PageDocument().update(f)
search_params = {
"project": project.slug,
"version": version.slug,
"q": "Some content from guides/index",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
result = resp.data["results"][0]
assert result["project"] == project.slug
assert result["path"] == "/en/latest/guides/"
def test_search_advanced_query_detection(self, api_client):
project = Project.objects.get(slug="docs")
feature, _ = Feature.objects.get_or_create(
feature_id=Feature.DEFAULT_TO_FUZZY_SEARCH,
)
project.feature_set.add(feature)
project.save()
version = project.versions.all().first()
# Query with a typo should return results
search_params = {
"project": project.slug,
"version": version.slug,
"q": "indx",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) > 0
assert "Index" in results[0]["title"]
# Query with a typo, but we want to match that
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"indx"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
assert len(resp.data["results"]) == 0
# Exact query still works
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"index"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) > 0
assert "Index" in results[0]["title"]
def test_search_single_query(self, api_client):
"""A single query matches substrings."""
project = Project.objects.get(slug="docs")
feature, _ = Feature.objects.get_or_create(
feature_id=Feature.DEFAULT_TO_FUZZY_SEARCH,
)
project.feature_set.add(feature)
project.save()
version = project.versions.all().first()
# Query with a partial word should return results
search_params = {
"project": project.slug,
"version": version.slug,
"q": "ind",
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) > 0
assert "Index" in results[0]["title"]
highlights = results[0]["blocks"][0]["highlights"]
assert "<span>index</span>" in highlights["content"][0]
assert "Guides" in results[1]["title"]
highlights = results[1]["blocks"][0]["highlights"]
assert "<span>index</span>" in highlights["content"][0]
# Query with a partial word, but we want to match that
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"ind"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
assert len(resp.data["results"]) == 0
# Exact query still works
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"index"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) > 0
assert "Index" in results[0]["title"]
def test_search_custom_ranking(self, api_client):
project = Project.objects.get(slug="docs")
version = project.versions.all().first()
page_index = HTMLFile.objects.get(
version=version,
path="index.html",
)
page_guides = HTMLFile.objects.get(
version=version,
path="guides/index.html",
)
# Query with the default ranking
assert page_index.rank == 0
assert page_guides.rank == 0
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"content from"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 2
assert results[0]["path"] == "/en/latest/index.html"
assert results[1]["path"] == "/en/latest/guides/index.html"
# Query with a higher rank over guides/index.html
page_guides.rank = 5
page_guides.save()
PageDocument().update(page_guides)
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"content from"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 2
assert results[0]["path"] == "/en/latest/guides/index.html"
assert results[1]["path"] == "/en/latest/index.html"
# Query with a lower rank over index.html
page_index.rank = -2
page_index.save()
page_guides.rank = 4
page_guides.save()
PageDocument().update(page_index)
PageDocument().update(page_guides)
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"content from"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 2
assert results[0]["path"] == "/en/latest/guides/index.html"
assert results[1]["path"] == "/en/latest/index.html"
# Query with a lower rank over index.html
page_index.rank = 3
page_index.save()
page_guides.rank = 6
page_guides.save()
PageDocument().update(page_index)
PageDocument().update(page_guides)
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"content from"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 2
assert results[0]["path"] == "/en/latest/guides/index.html"
assert results[1]["path"] == "/en/latest/index.html"
# Query with a same rank over guides/index.html and index.html
page_index.rank = -10
page_index.save()
page_guides.rank = -10
page_guides.save()
PageDocument().update(page_index)
PageDocument().update(page_guides)
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"content from"',
}
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 2
assert results[0]["path"] == "/en/latest/index.html"
assert results[1]["path"] == "/en/latest/guides/index.html"
def test_search_ignore(self, api_client):
project = Project.objects.get(slug="docs")
version = project.versions.all().first()
page_index = HTMLFile.objects.get(
version=version,
path="index.html",
)
page_guides = HTMLFile.objects.get(
version=version,
path="guides/index.html",
)
search_params = {
"project": project.slug,
"version": version.slug,
"q": '"content from"',
}
# Query with files not ignored.
assert page_index.ignore is None
assert page_guides.ignore is None
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 2
assert results[0]["path"] == "/en/latest/index.html"
assert results[1]["path"] == "/en/latest/guides/index.html"
# Query with guides/index.html ignored.
page_guides.ignore = True
page_guides.save()
remove_indexed_files(HTMLFile, project.slug, version.slug)
index_new_files(HTMLFile, version, page_index.build)
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 1
assert results[0]["path"] == "/en/latest/index.html"
# Query with index.html and guides/index.html ignored.
page_index.ignore = True
page_index.save()
remove_indexed_files(HTMLFile, project.slug, version.slug)
index_new_files(HTMLFile, version, page_index.build)
resp = self.get_search(api_client, search_params)
assert resp.status_code == 200
results = resp.data["results"]
assert len(results) == 0
class TestDocumentSearch(BaseTestDocumentSearch):
pass
|
0bdd617c41fc85fcb8aebfd87172cb5089c1545c
|
6cf2903f7384272cf270f4a16e9b010b8778c165
|
/mangle-infra-agent/Faults/DiskFillFault.py
|
bb4d3a2a4619b4badb0c99f70a10e8f88cc8a3fd
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
vmware/mangle
|
f0a814391e61936bd16ff88bdd1fe88139e5a2a3
|
aa8e22adfd08f825d4d80be143c707f4fd2c5d86
|
refs/heads/master
| 2023-08-17T15:34:04.628269
| 2023-04-06T10:07:47
| 2023-04-06T10:07:47
| 172,835,741
| 166
| 52
|
NOASSERTION
| 2023-06-14T22:33:56
| 2019-02-27T03:25:47
|
Java
|
UTF-8
|
Python
| false
| false
| 5,752
|
py
|
DiskFillFault.py
|
from Faults import InfraFault
from Faults import FaultStatus
import time
import psutil
import subprocess
import os
import logging
import threading
import math
from Faults.helper import FaultHelper
log = logging.getLogger("python_agent")
class DiskFillFault(InfraFault.InfraFault):
def __init__(self, fault_args):
super().__init__(fault_args)
self.threads = []
self.sudo_command = ''
def prereq_check(self):
self.sudo_command = FaultHelper.is_sudo_available()
pre_req_error_msg = ''
dd_res = subprocess.call(self.sudo_command + ' dd --version >/dev/null 2>&1', shell=True)
if dd_res != 0:
pre_req_error_msg += "dd command required"
if os.path.isdir(self.fault_args.get("--directoryPath")):
if os.access(self.fault_args.get("--directoryPath"), os.W_OK):
_, _, _, use_percentage = psutil.disk_usage(self.fault_args.get("--directoryPath"))
if float(self.fault_args.get("--diskFillSize")) <= use_percentage:
pre_req_error_msg += "The Provided diskFill percentage should be greater than used disk percentage"
else:
pre_req_error_msg += "The Provided user does not have permission on given directory"
else:
pre_req_error_msg += "The Provided directory path not found:" + self.fault_args.get("--directoryPath")
if len(pre_req_error_msg) > 0:
return pre_req_error_msg
def get_status(self, faultId):
log.info("status of {} is {}".format(faultId, self.faultinfo.status))
if self.faultinfo.status == FaultStatus.FaultStatus.COMPLETED.name:
return self.faultinfo.status
_, used, _, percentage = psutil.disk_usage(self.fault_args.get("--directoryPath"))
current_disk_status = "Current disk usage is {} and Current disk used Percentage is {}. "\
.format(used, percentage)
return self.faultinfo.status +" " +" ".join(str(x) for x in self.faultinfo.activity) + current_disk_status
def remediate(self):
log.info("remediation of disk fill called")
if len(self.threads) > 0:
for th in self.threads:
if th:
th.stop = True
print("Close threads")
self.threads.clear()
for proc in psutil.process_iter(['pid', 'name', 'username', 'cmdline']):
if 'mangleDumpFile.txt' in proc.info['cmdline']:
proc.kill()
file_path = '{}/mangleDumpFile.txt'.format(self.fault_args.get("--directoryPath"))
if os.path.isfile(file_path):
log.info("removing dummyfile")
remove_dummyfile_cmd= self.sudo_command + " rm -rf {}".format(file_path)
subprocess.call(remove_dummyfile_cmd, shell=True)
else:
log.info("Error: %s file already deleted" % file_path)
self.faultinfo.status = FaultStatus.FaultStatus.COMPLETED.name
def trigger_injection(self):
log.info("Filling Disk")
thread = threading.Thread(target=self.fill_disk)
self.threads.append(thread)
thread.start()
print("Thread creation done")
def fill_disk(self):
total, _, _, use_percentage = psutil.disk_usage(self.fault_args.get("--directoryPath"))
if use_percentage < 100:
disk_fill_percentage = float(self.fault_args.get("--diskFillSize"))
of_cmd = 'of={}/mangleDumpFile.txt'.format(self.fault_args.get("--directoryPath"))
log.info(of_cmd)
if disk_fill_percentage != 0 and disk_fill_percentage != 100:
percentage_to_fill = disk_fill_percentage - use_percentage
bytes_to_fill = round(percentage_to_fill*total/100)
log.info("Bytes to fill:{}".format(bytes_to_fill))
count_cmd = 'count={}'.format(round(bytes_to_fill/(1024*1024)))
log.info("Count: {}".format(count_cmd))
dd_command = "{} dd if=/dev/zero {} oflag=append bs=1MB {} conv=notrunc".format(self.sudo_command,
of_cmd,count_cmd)
cmd_return = subprocess.call(dd_command, shell=True)
log.info(str(cmd_return))
if cmd_return != 0:
self.faultinfo.status = FaultStatus.FaultStatus.INJECTION_FAILED.name
return
time.sleep(round(float(self.fault_args.get("--timeout"))/1000))
else:
dd_command = "{} dd if=/dev/zero {} oflag=append bs=1GB conv=notrunc".format(self.sudo_command,of_cmd)
cmd_return = subprocess.call(dd_command, shell=True)
log.info(str(cmd_return))
_, _, _, use_percentage = psutil.disk_usage(self.fault_args.get("--directoryPath"))
log.info("Used:{}".format(str(use_percentage)))
if cmd_return != 0:
if math.ceil(use_percentage) != 100 :
self.faultinfo.status = FaultStatus.FaultStatus.INJECTION_FAILED.name
return
time.sleep(round(float(self.fault_args.get("--timeout")) / 1000))
else:
log.info("Disk is already full")
if __name__ == '__main__':
print("Happy Filling")
fault_args = {'--operation': 'inject', '--faultname': "diskSpaceFault", "--directoryPath": "mangletestDir",
"--timeout": "15000","--diskFillSize":"75", "--faultId": "abcdefgDiskio"}
diskobj = DiskFillFault(fault_args)
process = []
|
e084cfd1974239e5612e763b0acb5a14e6aacf96
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/experimental/job/example_job/demo_script.py
|
8bcc79611e26d404a7f7d0dbd86b22f1424420c9
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,258
|
py
|
demo_script.py
|
# Regular ray application that user wrote and runs on local cluster.
# intermediate status are dumped to GCS
import argparse
import time
import ray
import ray.experimental.internal_kv as ray_kv
@ray.remote
class StepActor:
def __init__(self, interval_s=1, total_steps=3):
self.interval_s = interval_s
self.stopped = False
self.current_step = 1
self.total_steps = total_steps
worker = ray._private.worker.global_worker
worker_id = worker.core_worker.get_actor_id()
ray_kv._internal_kv_put(f"JOB:{worker_id}", self.current_step, overwrite=True)
def run(self):
worker = ray._private.worker.global_worker
worker_id = worker.core_worker.get_actor_id()
while self.current_step <= self.total_steps:
if not self.stopped:
print(
f"Sleeping {self.interval_s} secs to executing "
f"step {self.current_step}"
)
time.sleep(self.interval_s)
self.current_step += 1
ray_kv._internal_kv_put(
f"JOB:{worker_id}", self.current_step, overwrite=True
)
else:
print("Stop called or reached final step.")
break
self.stopped = True
ray_kv._internal_kv_put(f"JOB:{worker_id}", "DONE", overwrite=True)
return "DONE"
def get_step(self):
return self.current_step
def stop(self):
self.stopped = True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--interval-s",
required=False,
type=int,
default=1,
help="Address to use to connect to Ray",
)
parser.add_argument(
"--total-steps",
required=False,
type=int,
default=3,
help="Password for connecting to Redis",
)
args, _ = parser.parse_known_args()
ray.init()
step_actor = StepActor.remote(
interval_s=args.interval_s, total_steps=args.total_steps
)
ref = step_actor.run.remote()
print(ray.get([ref]))
job_key = ray_kv._internal_kv_list("JOB:")[0]
print(f"{job_key}, {ray_kv._internal_kv_get(job_key)}")
|
9ad556b2adaa62767d8a063a59a9292f9cffe5f6
|
8d1c7fba7cd15f8a1e33fd27d11eefd1c67d579f
|
/src/test/py/bazel/bazel_windows_cpp_test.py
|
f2b2b8792d0992d01869710da17c65cc4b6bf360
|
[
"Apache-2.0"
] |
permissive
|
bazelbuild/bazel
|
5896162455f032efc899b8de60aa39b9d2cad4a6
|
171aae3f9c57b41089e25ec61fc84c35baa3079d
|
refs/heads/master
| 2023-08-22T22:52:48.714735
| 2023-08-22T18:01:53
| 2023-08-22T18:01:53
| 20,773,773
| 20,294
| 4,383
|
Apache-2.0
| 2023-09-14T18:38:44
| 2014-06-12T16:00:38
|
Java
|
UTF-8
|
Python
| false
| false
| 41,655
|
py
|
bazel_windows_cpp_test.py
|
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import os
import unittest
from src.test.py.bazel import test_base
class BazelWindowsCppTest(test_base.TestBase):
def createProjectFiles(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'package(',
' default_visibility = ["//visibility:public"],',
' features=["windows_export_all_symbols"]',
')',
'',
'cc_library(',
' name = "A",',
' srcs = ["a.cc"],',
' hdrs = ["a.h"],',
' copts = ["/DCOMPILING_A_DLL"],',
' features = ["no_windows_export_all_symbols"],',
')',
'',
'cc_library(',
' name = "B",',
' srcs = ["b.cc"],',
' hdrs = ["b.h"],',
' deps = [":A"],',
' copts = ["/DNO_DLLEXPORT"],',
')',
'',
'cc_binary(',
' name = "C",',
' srcs = ["c.cc"],',
' deps = [":A", ":B" ],',
' linkstatic = 0,',
')',
])
self.ScratchFile('a.cc', [
'#include <stdio.h>',
'#include "a.h"',
'int a = 0;',
'void hello_A() {',
' a++;',
' printf("Hello A, %d\\n", a);',
'}',
])
self.ScratchFile('b.cc', [
'#include <stdio.h>',
'#include "a.h"',
'#include "b.h"',
'void hello_B() {',
' hello_A();',
' printf("Hello B\\n");',
'}',
])
header_temp = [
'#ifndef %{name}_H',
'#define %{name}_H',
'',
'#if NO_DLLEXPORT',
' #define DLLEXPORT',
'#elif COMPILING_%{name}_DLL',
' #define DLLEXPORT __declspec(dllexport)',
'#else',
' #define DLLEXPORT __declspec(dllimport)',
'#endif',
'',
'DLLEXPORT void hello_%{name}();',
'',
'#endif',
]
self.ScratchFile('a.h',
[line.replace('%{name}', 'A') for line in header_temp])
self.ScratchFile('b.h',
[line.replace('%{name}', 'B') for line in header_temp])
c_cc_content = [
'#include <stdio.h>',
'#include "a.h"',
'#include "b.h"',
'',
'void hello_C() {',
' hello_A();',
' hello_B();',
' printf("Hello C\\n");',
'}',
'',
'int main() {',
' hello_C();',
' return 0;',
'}',
]
self.ScratchFile('c.cc', c_cc_content)
self.ScratchFile('lib/BUILD', [
'cc_library(',
' name = "A",',
' srcs = ["dummy.cc"],',
' features = ["windows_export_all_symbols"],',
' visibility = ["//visibility:public"],',
')',
])
self.ScratchFile('lib/dummy.cc', ['void dummy() {}'])
self.ScratchFile('main/main.cc', c_cc_content)
def getBazelInfo(self, info_key):
_, stdout, _ = self.RunBazel(['info', info_key])
return stdout[0]
def testBuildDynamicLibraryWithUserExportedSymbol(self):
self.createProjectFiles()
bazel_bin = self.getBazelInfo('bazel-bin')
# //:A export symbols by itself using __declspec(dllexport), so it doesn't
# need Bazel to export symbols using DEF file.
self.RunBazel(['build', '//:A', '--output_groups=dynamic_library'])
# TODO(pcloudy): change suffixes to .lib and .dll after making DLL
# extensions correct on Windows.
import_library = os.path.join(bazel_bin, 'A.if.lib')
shared_library = os.path.join(bazel_bin, 'A_0.dll')
empty_def_file = os.path.join(bazel_bin, 'A.gen.empty.def')
self.assertTrue(os.path.exists(import_library))
self.assertTrue(os.path.exists(shared_library))
# An empty DEF file should be generated for //:A
self.assertTrue(os.path.exists(empty_def_file))
def testBuildDynamicLibraryWithExportSymbolFeature(self):
self.createProjectFiles()
bazel_bin = self.getBazelInfo('bazel-bin')
# //:B doesn't export symbols by itself, so it need Bazel to export symbols
# using DEF file.
self.RunBazel(['build', '//:B', '--output_groups=dynamic_library'])
# TODO(pcloudy): change suffixes to .lib and .dll after making DLL
# extensions correct on Windows.
import_library = os.path.join(bazel_bin, 'B.if.lib')
shared_library = os.path.join(bazel_bin, 'B_0.dll')
def_file = os.path.join(bazel_bin, 'B.gen.def')
self.assertTrue(os.path.exists(import_library))
self.assertTrue(os.path.exists(shared_library))
# DEF file should be generated for //:B
self.assertTrue(os.path.exists(def_file))
# Test build //:B if windows_export_all_symbols feature is disabled by
# no_windows_export_all_symbols.
self.RunBazel([
'build',
'//:B',
'--output_groups=dynamic_library',
'--features=no_windows_export_all_symbols',
])
import_library = os.path.join(bazel_bin, 'B.if.lib')
shared_library = os.path.join(bazel_bin, 'B_0.dll')
empty_def_file = os.path.join(bazel_bin, 'B.gen.empty.def')
self.assertTrue(os.path.exists(import_library))
self.assertTrue(os.path.exists(shared_library))
# An empty DEF file should be generated for //:B
self.assertTrue(os.path.exists(empty_def_file))
self.AssertFileContentNotContains(empty_def_file, 'hello_B')
def testBuildCcBinaryWithDependenciesDynamicallyLinked(self):
self.createProjectFiles()
bazel_bin = self.getBazelInfo('bazel-bin')
# Since linkstatic=0 is specified for //:C, it's dependencies should be
# dynamically linked.
self.RunBazel(['build', '//:C'])
# TODO(pcloudy): change suffixes to .lib and .dll after making DLL
# extensions correct on
# Windows.
# a_import_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.if.lib')))
# a_shared_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A_0.dll')))
# a_def_file
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A.gen.empty.def')))
# b_import_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.if.lib')))
# b_shared_library
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B_0.dll')))
# b_def_file
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'B.gen.def')))
# c_exe
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'C.exe')))
def testBuildCcBinaryFromDifferentPackage(self):
self.createProjectFiles()
self.ScratchFile('main/BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["//:B"],',
' linkstatic = 0,',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
self.RunBazel(['build', '//main:main'])
# Test if A.dll and B.dll are copied to the directory of main.exe
main_bin = os.path.join(bazel_bin, 'main/main.exe')
self.assertTrue(os.path.exists(main_bin))
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/A_0.dll')))
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/B_0.dll')))
# Run the binary to see if it runs successfully
_, stdout, _ = self.RunProgram([main_bin])
self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout)
def testBuildCcBinaryDependsOnConflictDLLs(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["//:B", "//lib:A"],', # Transitively depends on //:A
' linkstatic = 0,',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
# //main:main depends on both //lib:A and //:A
self.RunBazel(['build', '//main:main'])
# Run the binary to see if it runs successfully
main_bin = os.path.join(bazel_bin, 'main/main.exe')
_, stdout, _ = self.RunProgram([main_bin])
self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout)
# There are 2 A_{hash}.dll since //main:main depends on both //lib:A and
# //:A
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 2)
# There is only 1 B_{hash}.dll
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'B_*.dll'))), 1)
def testBuildDifferentCcBinariesDependOnConflictDLLs(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 0,',
')',
'',
'cc_binary(',
' name = "other_main",',
' srcs = ["other_main.cc"],',
' deps = ["//lib:A"],',
' linkstatic = 0,',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
self.ScratchFile('main/other_main.cc', ['int main() {return 0;}'])
# Building //main:main should succeed
self.RunBazel(
['build', '//main:main', '--incompatible_avoid_conflict_dlls']
)
main_bin = os.path.join(bazel_bin, 'main/main.exe')
# Run the main_bin binary to see if it runs successfully
_, stdout, _ = self.RunProgram([main_bin])
self.assertEqual(['Hello A, 1', 'Hello A, 2', 'Hello B', 'Hello C'], stdout)
# There is only 1 A_{hash}.dll since //main:main depends transitively on
# //:A
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 1)
# There is only 1 B_{hash}.dll
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'B_*.dll'))), 1)
# Building //main:other_main should succeed
self.RunBazel([
'build',
'//main:main',
'//main:other_main',
'--incompatible_avoid_conflict_dlls',
])
other_main_bin = os.path.join(bazel_bin, 'main/other_main.exe')
# Run the other_main_bin binary to see if it runs successfully
self.RunProgram([other_main_bin])
# There are 2 A_{hash}.dll since //main:main depends on //:A
# and //main:other_main depends on //lib:A
self.assertEqual(
len(glob.glob(os.path.join(bazel_bin, 'main', 'A_*.dll'))), 2)
def testDLLIsCopiedFromExternalRepo(self):
self.ScratchFile('ext_repo/WORKSPACE')
self.ScratchFile('ext_repo/BUILD', [
'cc_library(',
' name = "A",',
' srcs = ["a.cc"],',
' features = ["windows_export_all_symbols"],',
' visibility = ["//visibility:public"],',
')',
])
self.ScratchFile('ext_repo/a.cc', [
'#include <stdio.h>',
'void hello_A() {',
' printf("Hello A\\n");',
'}',
])
self.ScratchFile('WORKSPACE', [
'local_repository(',
' name = "ext_repo",',
' path = "ext_repo",',
')',
])
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
' deps = ["@ext_repo//:A"],',
' linkstatic = 0,',
')',
])
self.ScratchFile('main.cc', [
'extern void hello_A();',
'',
'int main() {',
' hello_A();',
' return 0;',
'}',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(['build', '//:main', '-s'])
self.AssertExitCode(exit_code, 0, stderr)
# Test if A.dll is copied to the directory of main.exe
main_bin = os.path.join(bazel_bin, 'main.exe')
self.assertTrue(os.path.exists(main_bin))
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'A_9324b6d0.dll')))
# Run the binary to see if it runs successfully
_, stdout, _ = self.RunProgram([main_bin])
self.assertEqual(['Hello A'], stdout)
def testDynamicLinkingMSVCRT(self):
self.createProjectFiles()
bazel_output = self.getBazelInfo('output_path')
# By default, it should link to msvcrt dynamically.
exit_code, _, stderr = self.RunBazel(
['build', '//:A', '--output_groups=dynamic_library', '-s'])
compile_params = os.path.join(
bazel_output, 'x64_windows-fastbuild/bin/_objs/A/a.obj.params'
)
link_params = os.path.join(
bazel_output, 'x64_windows-fastbuild/bin/A_0.dll-2.params'
)
self.AssertExitCode(exit_code, 0, stderr)
self.AssertFileContentContains(compile_params, '/MD')
self.AssertFileContentContains(link_params, '/DEFAULTLIB:msvcrt.lib')
self.AssertFileContentNotContains(compile_params, '/MT')
self.AssertFileContentNotContains(link_params, '/DEFAULTLIB:libcmt.lib')
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel(
['build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library', '-s'])
compile_params = os.path.join(
bazel_output, 'x64_windows-dbg/bin/_objs/A/a.obj.params'
)
link_params = os.path.join(
bazel_output, 'x64_windows-dbg/bin/A_0.dll-2.params'
)
self.AssertExitCode(exit_code, 0, stderr)
self.AssertFileContentContains(compile_params, '/MDd')
self.AssertFileContentContains(link_params, '/DEFAULTLIB:msvcrtd.lib')
self.AssertFileContentNotContains(compile_params, '/MTd')
self.AssertFileContentNotContains(link_params, '/DEFAULTLIB:libcmtd.lib')
def testStaticLinkingMSVCRT(self):
self.createProjectFiles()
bazel_output = self.getBazelInfo('output_path')
# With static_link_msvcrt feature, it should link to msvcrt statically.
exit_code, _, stderr = self.RunBazel([
'build', '//:A', '--output_groups=dynamic_library',
'--features=static_link_msvcrt', '-s'
])
compile_params = os.path.join(
bazel_output, 'x64_windows-fastbuild/bin/_objs/A/a.obj.params'
)
link_params = os.path.join(
bazel_output, 'x64_windows-fastbuild/bin/A_0.dll-2.params'
)
self.AssertExitCode(exit_code, 0, stderr)
self.AssertFileContentNotContains(compile_params, '/MD')
self.AssertFileContentNotContains(link_params, '/DEFAULTLIB:msvcrt.lib')
self.AssertFileContentContains(compile_params, '/MT')
self.AssertFileContentContains(link_params, '/DEFAULTLIB:libcmt.lib')
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel([
'build', '-c', 'dbg', '//:A', '--output_groups=dynamic_library',
'--features=static_link_msvcrt', '-s'
])
compile_params = os.path.join(
bazel_output, 'x64_windows-dbg/bin/_objs/A/a.obj.params'
)
link_params = os.path.join(
bazel_output, 'x64_windows-dbg/bin/A_0.dll-2.params'
)
self.AssertExitCode(exit_code, 0, stderr)
self.AssertFileContentNotContains(compile_params, '/MDd')
self.AssertFileContentNotContains(link_params, '/DEFAULTLIB:msvcrtd.lib')
self.AssertFileContentContains(compile_params, '/MTd')
self.AssertFileContentContains(link_params, '/DEFAULTLIB:libcmtd.lib')
def testBuildSharedLibraryFromCcBinaryWithStaticLink(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main.dll",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 1,',
' linkshared = 1,',
' features=["windows_export_all_symbols"]',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel([
'build', '//main:main.dll',
'--output_groups=default,runtime_dynamic_libraries,interface_library'
])
self.AssertExitCode(exit_code, 0, stderr)
main_library = os.path.join(bazel_bin, 'main/main.dll')
main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib')
def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def')
self.assertTrue(os.path.exists(main_library))
self.assertTrue(os.path.exists(main_interface))
self.assertTrue(os.path.exists(def_file))
# A.dll and B.dll should not be copied.
self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/A.dll')))
self.assertFalse(os.path.exists(os.path.join(bazel_bin, 'main/B.dll')))
self.AssertFileContentContains(def_file, 'hello_A')
self.AssertFileContentContains(def_file, 'hello_B')
self.AssertFileContentContains(def_file, 'hello_C')
def testBuildSharedLibraryFromCcBinaryWithDynamicLink(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main.dll",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 0,',
' linkshared = 1,',
' features=["windows_export_all_symbols"]',
')',
'',
'genrule(',
' name = "renamed_main",',
' srcs = [":main.dll"],',
' outs = ["main_renamed.dll"],',
' cmd = "cp $< $@",',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel([
'build', '//main:main.dll',
'--output_groups=default,runtime_dynamic_libraries,interface_library'
])
self.AssertExitCode(exit_code, 0, stderr)
main_library = os.path.join(bazel_bin, 'main/main.dll')
main_interface = os.path.join(bazel_bin, 'main/main.dll.if.lib')
def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def')
self.assertTrue(os.path.exists(main_library))
self.assertTrue(os.path.exists(main_interface))
self.assertTrue(os.path.exists(def_file))
# A.dll and B.dll should be built and copied because they belong to
# runtime_dynamic_libraries output group.
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/A_0.dll')))
self.assertTrue(os.path.exists(os.path.join(bazel_bin, 'main/B_0.dll')))
# hello_A and hello_B should not be exported.
self.AssertFileContentNotContains(def_file, 'hello_A')
self.AssertFileContentNotContains(def_file, 'hello_B')
self.AssertFileContentContains(def_file, 'hello_C')
# The copy should succeed since //main:main.dll is only supposed to refer to
# main.dll, A.dll and B.dll should be in a separate output group.
exit_code, _, stderr = self.RunBazel(['build', '//main:renamed_main'])
self.AssertExitCode(exit_code, 0, stderr)
def testGetDefFileOfSharedLibraryFromCcBinary(self):
self.createProjectFiles()
self.ScratchFile(
'main/BUILD',
[
'cc_binary(',
' name = "main.dll",',
' srcs = ["main.cc"],',
' deps = ["//:B"],', # Transitively depends on //:A
' linkstatic = 1,',
' linkshared = 1,',
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(
['build', '//main:main.dll', '--output_groups=def_file'])
self.AssertExitCode(exit_code, 0, stderr)
# Although windows_export_all_symbols is not specified for this target,
# we should still be able to get the DEF file by def_file output group.
def_file = os.path.join(bazel_bin, 'main/main.dll.gen.def')
self.assertTrue(os.path.exists(def_file))
self.AssertFileContentContains(def_file, 'hello_A')
self.AssertFileContentContains(def_file, 'hello_B')
self.AssertFileContentContains(def_file, 'hello_C')
def testBuildSharedLibraryWithoutAnySymbolExported(self):
self.createProjectFiles()
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "A.dll",',
' srcs = ["a.cc", "a.h"],',
' copts = ["/DNO_DLLEXPORT"],',
' linkshared = 1,'
')',
])
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(['build', '//:A.dll'])
self.AssertExitCode(exit_code, 0, stderr)
# Although windows_export_all_symbols is not specified for this target,
# we should still be able to build a DLL without any symbol exported.
empty_def_file = os.path.join(bazel_bin, 'A.dll.gen.empty.def')
self.assertTrue(os.path.exists(empty_def_file))
self.AssertFileContentNotContains(empty_def_file, 'hello_A')
def testUsingDefFileGeneratedFromCcLibrary(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('lib_A.cc', ['void hello_A() {}'])
self.ScratchFile('lib_B.cc', ['void hello_B() {}'])
self.ScratchFile('BUILD', [
'cc_library(',
' name = "lib_A",',
' srcs = ["lib_A.cc"],',
')',
'',
'cc_library(',
' name = "lib_B",',
' srcs = ["lib_B.cc"],',
' deps = [":lib_A"]',
')',
'',
'filegroup(',
' name = "lib_B_symbols",',
' srcs = [":lib_B"],',
' output_group = "def_file",',
')',
'',
'cc_binary(',
' name = "lib.dll",',
' deps = [":lib_B"],',
' win_def_file = ":lib_B_symbols",',
' linkshared = 1,',
')',
])
# Test specifying DEF file in cc_binary
bazel_bin = self.getBazelInfo('bazel-bin')
exit_code, _, stderr = self.RunBazel(['build', '//:lib.dll', '-s'])
self.AssertExitCode(exit_code, 0, stderr)
def_file = bazel_bin + '/lib_B.gen.def'
self.assertTrue(os.path.exists(def_file))
# hello_A should not be exported
self.AssertFileContentNotContains(def_file, 'hello_A')
# hello_B should be exported
self.AssertFileContentContains(def_file, 'hello_B')
def testWinDefFileAttribute(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('lib.cc', ['void hello() {}'])
self.ScratchFile('my_lib.def', [
'EXPORTS',
' ?hello@@YAXXZ',
])
self.ScratchFile('BUILD', [
'cc_library(',
' name = "lib",',
' srcs = ["lib.cc"],',
' win_def_file = "my_lib.def",',
')',
'',
'cc_binary(',
' name = "lib_dy.dll",',
' srcs = ["lib.cc"],',
' win_def_file = "my_lib.def",',
' linkshared = 1,',
')',
])
# Test exporting symbols using custom DEF file in cc_library.
# Auto-generating DEF file should be disabled when custom DEF file specified
# Rename DLL should be disabled when when custom DEF file specified
exit_code, _, stderr = self.RunBazel([
'build', '//:lib', '-s', '--output_groups=dynamic_library',
'--features=windows_export_all_symbols'
])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = self.getBazelInfo('bazel-bin')
lib_if = os.path.join(bazel_bin, 'lib.if.lib')
lib_def = os.path.join(bazel_bin, 'lib.gen.def')
lib_dll = os.path.join(bazel_bin, 'lib.dll')
self.assertTrue(os.path.exists(lib_if))
self.assertFalse(os.path.exists(lib_def))
self.assertTrue(os.path.exists(lib_dll))
# Test specifying DEF file in cc_binary
exit_code, _, stderr = self.RunBazel(['build', '//:lib_dy.dll', '-s'])
self.AssertExitCode(exit_code, 0, stderr)
filepath = bazel_bin + '/lib_dy.dll-2.params'
with open(filepath, 'r', encoding='latin-1') as param_file:
self.assertIn('/DEF:my_lib.def', param_file.read())
def testCcImportRule(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('A.lib', [])
self.ScratchFile('A.dll', [])
self.ScratchFile('A.if.lib', [])
self.ScratchFile('BUILD', [
'cc_import(',
' name = "a_import",',
' static_library = "A.lib",',
' shared_library = "A.dll",',
' interface_library = "A.if.lib",',
' hdrs = ["a.h"],',
' alwayslink = 1,',
')',
])
exit_code, _, stderr = self.RunBazel([
'build', '//:a_import',
])
self.AssertExitCode(exit_code, 0, stderr)
def testCopyDLLAsSource(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_import(',
' name = "a_import",',
' shared_library = "A.dll",',
' visibility = ["//:__subpackages__"],',
')',
'',
'filegroup(',
' name = "bin_src",',
' srcs = ["bin.cc"],',
' visibility = ["//:__subpackages__"],',
')',
'',
'cc_binary(',
' name = "bin",',
' srcs = ["//:bin_src"],',
' deps = ["//:a_import"],',
')',
])
self.ScratchFile('package/BUILD', [
'cc_binary(',
' name = "dir1/dir2/bin",',
' srcs = ["//:bin_src"],',
' deps = ["//:a_import"],',
')',
])
self.ScratchFile('A.dll')
self.ScratchFile('bin.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build',
'//:bin',
'//package:dir1/dir2/bin',
])
self.AssertExitCode(exit_code, 0, stderr)
bazel_bin = self.getBazelInfo('bazel-bin')
# Even though A.dll is in the same package as bin.exe, it still should
# be copied to the output directory of bin.exe.
a_dll = os.path.join(bazel_bin, 'A.dll')
self.assertTrue(os.path.exists(a_dll))
nested_a_dll = os.path.join(bazel_bin, 'package/dir1/dir2/A.dll')
self.assertTrue(os.path.exists(nested_a_dll))
def testCppErrorShouldBeVisible(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "bad",',
' srcs = ["bad.cc"],',
')',
])
self.ScratchFile('bad.cc', [
'int main(int argc, char** argv) {',
' this_is_an_error();',
'}',
])
exit_code, stdout, stderr = self.RunBazel(
['build', '//:bad'], allow_failure=True
)
self.AssertExitCode(exit_code, 1, stderr)
self.assertIn('this_is_an_error', ''.join(stdout))
def testBuildWithClangClByCompilerFlag(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=clang-cl',
'--incompatible_enable_cc_toolchain_resolution=false', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('clang-cl.exe', ''.join(stderr))
def testBuildWithClangClByToolchainResolution(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE', [
'register_execution_platforms(',
' ":windows_clang"',
')',
'',
'register_toolchains(',
' "@local_config_cc//:cc-toolchain-x64_windows-clang-cl",',
')',
])
self.ScratchFile('BUILD', [
'platform(',
' name = "windows_clang",',
' constraint_values = [',
' "@platforms//cpu:x86_64",',
' "@platforms//os:windows",',
' "@bazel_tools//tools/cpp:clang-cl",',
' ]',
')',
'',
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--incompatible_enable_cc_toolchain_resolution=true',
'//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('clang-cl.exe', ''.join(stderr))
def createSimpleCppWorkspace(self, name):
work_dir = self.ScratchDir(name)
self.ScratchFile(name + '/WORKSPACE', ['workspace(name = "%s")' % name])
self.ScratchFile(
name + '/BUILD',
['cc_library(name = "lib", srcs = ["lib.cc"], hdrs = ["lib.h"])'])
self.ScratchFile(name + '/lib.h', ['void hello();'])
self.ScratchFile(name + '/lib.cc', ['#include "lib.h"', 'void hello() {}'])
return work_dir
# Regression test for https://github.com/bazelbuild/bazel/issues/9172
def testCacheBetweenWorkspaceWithDifferentNames(self):
cache_dir = self.ScratchDir('cache')
dir_a = self.createSimpleCppWorkspace('A')
dir_b = self.createSimpleCppWorkspace('B')
exit_code, _, stderr = self.RunBazel(
['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_a)
self.AssertExitCode(exit_code, 0, stderr)
exit_code, _, stderr = self.RunBazel(
['build', '--disk_cache=' + cache_dir, ':lib'], cwd=dir_b)
self.AssertExitCode(exit_code, 0, stderr)
# Regression test for https://github.com/bazelbuild/bazel/issues/9321
def testCcCompileWithTreeArtifactAsSource(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'load(":genccs.bzl", "genccs")',
'',
'genccs(',
' name = "gen_tree",',
')',
'',
'cc_library(',
' name = "main",',
' srcs = [ "gen_tree" ]',
')',
'',
'cc_binary(',
' name = "genccs",',
' srcs = [ "genccs.cpp" ],',
')',
])
self.ScratchFile('genccs.bzl', [
'def _impl(ctx):',
' tree = ctx.actions.declare_directory(ctx.attr.name + ".cc")',
' ctx.actions.run(',
' inputs = [],',
' outputs = [ tree ],',
' arguments = [ tree.path ],',
' progress_message = "Generating cc files into \'%s\'" % tree.path,',
' executable = ctx.executable._tool,',
' )',
'',
' return [ DefaultInfo(files = depset([ tree ])) ]',
'',
'genccs = rule(',
' implementation = _impl,',
' attrs = {',
' "_tool": attr.label(',
' executable = True,',
' cfg = "exec",',
' allow_files = True,',
' default = Label("//:genccs"),',
' )',
' }',
')',
])
self.ScratchFile('genccs.cpp', [
'#include <fstream>',
'#include <Windows.h>',
'using namespace std;',
'',
'int main (int argc, char *argv[]) {',
' CreateDirectory(argv[1], NULL);',
' ofstream myfile;',
' myfile.open(string(argv[1]) + string("/foo.cpp"));',
' myfile << "int main() { return 42; }";',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel(['build', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
def testBuild32BitCppBinaryWithMsvcCL(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--cpu=x64_x86_windows',
'--noincompatible_enable_cc_toolchain_resolution', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('x86\\cl.exe', '\n'.join(stderr))
def testBuildArmCppBinaryWithMsvcCL(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--cpu=x64_arm_windows',
'--noincompatible_enable_cc_toolchain_resolution', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('arm\\cl.exe', '\n'.join(stderr))
def testBuildArm64CppBinaryWithMsvcCLAndCpuX64Arm64Windows(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--cpu=x64_arm64_windows',
'--noincompatible_enable_cc_toolchain_resolution', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('arm64\\cl.exe', '\n'.join(stderr))
def testBuildCppBinaryWithMingwGCC(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
# Test build without debug and optimize modes.
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=mingw-gcc',
'--noincompatible_enable_cc_toolchain_resolution', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('mingw64\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.assertNotIn('-Wl,--gc-sections', ''.join(stderr))
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=mingw-gcc',
'--noincompatible_enable_cc_toolchain_resolution', '-c', 'dbg',
'//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('mingw64\\bin\\gcc', '\n'.join(stderr))
self.assertIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.assertNotIn('-Wl,--gc-sections', ''.join(stderr))
# Test build in optimize mode.
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=mingw-gcc',
'--noincompatible_enable_cc_toolchain_resolution', '-c', 'opt',
'//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('mingw64\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.assertIn('-Wl,--gc-sections', ''.join(stderr))
def testBuildCppBinaryWithMsysGCC(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
bazel_output = self.getBazelInfo('output_path')
paramfile = 'x64_windows-%s/bin/main.exe-2.params'
# Test build without debug and optimize modes.
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=msys-gcc',
'--noincompatible_enable_cc_toolchain_resolution', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('usr\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.AssertFileContentNotContains(
os.path.join(bazel_output, paramfile % 'fastbuild'),
'-Wl,--gc-sections')
# Test build in debug mode.
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=msys-gcc',
'--noincompatible_enable_cc_toolchain_resolution', '-c', 'dbg',
'//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('usr\\bin\\gcc', '\n'.join(stderr))
self.assertIn('-g -Og', ''.join(stderr))
self.assertNotIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.AssertFileContentNotContains(
os.path.join(bazel_output, paramfile % 'dbg'), '-Wl,--gc-sections')
# Test build in optimize mode.
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--compiler=msys-gcc',
'--noincompatible_enable_cc_toolchain_resolution', '-c', 'opt',
'//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('usr\\bin\\gcc', '\n'.join(stderr))
self.assertNotIn('-g -Og', ''.join(stderr))
self.assertIn('-g0 -O3 -DNDEBUG -ffunction-sections -fdata-sections',
''.join(stderr))
self.AssertFileContentContains(
os.path.join(bazel_output, paramfile % 'opt'), '-Wl,--gc-sections')
def testBuildArm64CppBinaryWithMsvcCLAndCpuArm64Windows(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile('BUILD', [
'cc_binary(',
' name = "main",',
' srcs = ["main.cc"],',
')',
])
self.ScratchFile('main.cc', [
'int main() {',
' return 0;',
'}',
])
exit_code, _, stderr = self.RunBazel([
'build', '-s', '--cpu=arm64_windows',
'--noincompatible_enable_cc_toolchain_resolution', '//:main'
])
self.AssertExitCode(exit_code, 0, stderr)
self.assertIn('arm64\\cl.exe', ''.join(stderr))
def testLongCompileCommandLines(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'BUILD',
[
'cc_binary(',
' name = "long",',
' srcs = ["long.cc"],',
# Creates a command that is longer than 32767 characters, which is
# the maximum length of a command line on Windows.
' includes = [str(i) + 450 * "a" for i in range(120)],',
')',
],
)
self.ScratchFile('long.cc', ['int main() { return 0; }'])
exit_code, _, stderr = self.RunBazel(
['build', '--verbose_failures', '//:long']
)
self.AssertExitCode(exit_code, 0, stderr)
def testCompilerSettingMsvc(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'BUILD',
[
'config_setting(',
' name = "msvc_compiler",',
(
' flag_values = {"@bazel_tools//tools/cpp:compiler":'
' "msvc-cl"},'
),
')',
'cc_binary(',
' name = "main",',
' srcs = select({":msvc_compiler": ["main.cc"]}),',
')',
],
)
self.ScratchFile('main.cc', ['int main() { return 0; }'])
exit_code, _, stderr = self.RunBazel(['build', '//:main'])
self.AssertExitCode(exit_code, 0, stderr)
def testCompilerSettingClangCl(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'BUILD',
[
'platform(',
' name = "x64_windows-clang-cl",',
' constraint_values = [',
' "@platforms//cpu:x86_64",',
' "@platforms//os:windows",',
' "@bazel_tools//tools/cpp:clang-cl",',
' ],',
')',
'config_setting(',
' name = "clang_cl_compiler",',
(
' flag_values = {"@bazel_tools//tools/cpp:compiler":'
' "clang-cl"},'
),
')',
'cc_binary(',
' name = "main",',
' srcs = select({":clang_cl_compiler": ["main.cc"]}),',
')',
],
)
self.ScratchFile('main.cc', ['int main() { return 0; }'])
exit_code, _, stderr = self.RunBazel([
'build',
'--incompatible_enable_cc_toolchain_resolution',
'--extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows-clang-cl',
'--extra_execution_platforms=//:x64_windows-clang-cl',
'//:main',
])
self.AssertExitCode(exit_code, 0, stderr)
def testCompilerSettingMingwGcc(self):
self.CreateWorkspaceWithDefaultRepos('WORKSPACE')
self.ScratchFile(
'BUILD',
[
'platform(',
' name = "x64_windows-mingw-gcc",',
' constraint_values = [',
' "@platforms//cpu:x86_64",',
' "@platforms//os:windows",',
' "@bazel_tools//tools/cpp:mingw",',
' ],',
')',
'config_setting(',
' name = "mingw_gcc_compiler",',
(
' flag_values = {"@bazel_tools//tools/cpp:compiler":'
' "mingw-gcc"},'
),
')',
'cc_binary(',
' name = "main",',
' srcs = select({":mingw_gcc_compiler": ["main.cc"]}),',
')',
],
)
self.ScratchFile('main.cc', ['int main() { return 0; }'])
exit_code, _, stderr = self.RunBazel([
'build',
'--incompatible_enable_cc_toolchain_resolution',
'--extra_toolchains=@local_config_cc//:cc-toolchain-x64_windows_mingw',
'--extra_execution_platforms=//:x64_windows-mingw-gcc',
'//:main',
])
self.AssertExitCode(exit_code, 0, stderr)
if __name__ == '__main__':
unittest.main()
|
4c46c4ee93cc7503e21816eef57bb4c82c4a0227
|
4c8ce1a65c1543d8411b990340b0ccb84bfcf18a
|
/examples/finished/gcp.py
|
e434be5e2839c69e1cb8f5e258e6c2189002bc0f
|
[
"MIT"
] |
permissive
|
scipopt/PySCIPOpt
|
e7b92c39ea1cdc32a123669614e4c06bee4b73eb
|
c6329760618a88e43e32d164e363ed233499de91
|
refs/heads/master
| 2023-09-03T13:35:16.769766
| 2023-07-03T08:33:49
| 2023-07-03T08:33:49
| 59,214,089
| 390
| 92
|
MIT
| 2023-08-07T10:44:19
| 2016-05-19T14:29:21
|
Cython
|
UTF-8
|
Python
| false
| false
| 5,046
|
py
|
gcp.py
|
##@file gcp.py
#@brief model for the graph coloring problem
"""
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
from pyscipopt import Model, quicksum, multidict
def gcp(V,E,K):
"""gcp -- model for minimizing the number of colors in a graph
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
- K: upper bound on the number of colors
Returns a model, ready to be solved.
"""
model = Model("gcp")
x,y = {},{}
for k in range(K):
y[k] = model.addVar(vtype="B", name="y(%s)"%k)
for i in V:
x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k))
for i in V:
model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)"%i)
for (i,j) in E:
for k in range(K):
model.addCons(x[i,k] + x[j,k] <= y[k], "NotSameColor(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(y[k] for k in range(K)), "minimize")
model.data = x
return model
def gcp_low(V,E,K):
"""gcp_low -- model for minimizing the number of colors in a graph
(use colors with low indices)
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
- K: upper bound to the number of colors
Returns a model, ready to be solved.
"""
model = Model("gcp - low colors")
x,y = {},{}
for k in range(K):
y[k] = model.addVar(vtype="B", name="y(%s)"%k)
for i in V:
x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k))
for i in V:
model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)" % i)
for (i,j) in E:
for k in range(K):
model.addCons(x[i,k] + x[j,k] <= y[k], "NotSameColor(%s,%s,%s)"%(i,j,k))
for k in range(K-1):
model.addCons(y[k] >= y[k+1], "LowColor(%s)"%k)
model.setObjective(quicksum(y[k] for k in range(K)), "minimize")
model.data = x
return model
def gcp_sos(V,E,K):
"""gcp_sos -- model for minimizing the number of colors in a graph
(use sos type 1 constraints)
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
- K: upper bound to the number of colors
Returns a model, ready to be solved.
"""
model = Model("gcp - sos constraints")
x,y = {},{}
for k in range(K):
y[k] = model.addVar(vtype="B", name="y(%s)"%k)
for i in V:
x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k))
for i in V:
model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)" % i)
model.addConsSOS1([x[i,k] for k in range(K)])
for (i,j) in E:
for k in range(K):
model.addCons(x[i,k] + x[j,k] <= y[k], "NotSameColor(%s,%s,%s)"%(i,j,k))
for k in range(K-1):
model.addCons(y[k] >= y[k+1], "LowColor(%s)"%k)
model.setObjective(quicksum(y[k] for k in range(K)), "minimize")
model.data = x
return model
import random
def make_data(n,prob):
"""make_data: prepare data for a random graph
Parameters:
- n: number of vertices
- prob: probability of existence of an edge, for each pair of vertices
Returns a tuple with a list of vertices and a list edges.
"""
V = range(1,n+1)
E = [(i,j) for i in V for j in V if i < j and random.random() < prob]
return V,E
if __name__ == "__main__":
random.seed(1)
V,E = make_data(20,.5)
K = 10 # upper bound to the number of colors
print("n,K=",len(V),K)
model = gcp_low(V,E,K)
model.optimize()
print("Optimal value:", model.getObjVal())
x = model.data
color = {}
for i in V:
for k in range(K):
if model.getVal(x[i,k]) > 0.5:
color[i] = k
print("colors:",color)
import time,sys
models = [gcp,gcp_low,gcp_sos]
cpu = {}
N = 25 # number of observations
print("#size\t%s\t%s\t%s" % tuple(m.__name__ for m in models))
for size in range(250):
print(size,"\t",)
K = size
for prob in [0.1]:
for m in models:
name = m.__name__
if not (name,size-1,prob) in cpu or cpu[name,size-1,prob] < 100: #cpu.has_key((name,size-1,prob))
cpu[name,size,prob] = 0.
for t in range(N):
tinit = time.clock()
random.seed(t)
V,E = make_data(size,prob)
model = m(V,E,K)
model.hideOutput() # silent mode
model.optimize()
assert model.getObjVal() >= 0 and model.getObjVal() <= K
tend = time.clock()
cpu[name,size,prob] += tend - tinit
cpu[name,size,prob] /= N
else:
cpu[name,size,prob] = "-"
print(cpu[name,size,prob],"\t",)
print()
sys.stdout.flush()
|
43894a055ae8d9f23efce29824851752e655a33c
|
a960e38868b3a157179bb576ac78b0f144140ed8
|
/PyTorch/nlp/bert/pack_pretraining_data_pytorch.py
|
88780c576aa520ced6c27112f1a10b245dfbcb47
|
[
"Apache-2.0"
] |
permissive
|
HabanaAI/Model-References
|
d339b211adf8be9575ebb1e3bb6afd6cd04312e6
|
3ca77c4a5fb62c60372e8a2839b1fccc3c4e4212
|
refs/heads/master
| 2023-08-17T06:32:28.714253
| 2023-08-14T15:22:08
| 2023-08-14T15:22:08
| 288,377,272
| 108
| 53
| null | 2023-08-18T02:30:16
| 2020-08-18T06:45:50
|
Python
|
UTF-8
|
Python
| false
| false
| 26,607
|
py
|
pack_pretraining_data_pytorch.py
|
###############################################################################
# Copyright (c) 2021, Habana Labs Ltd. All rights reserved.
###############################################################################
import os
import time
import argparse
import random
import h5py
from tqdm import tqdm, trange
import os
import numpy as np
import torch
from torch.utils.data import Dataset
from scipy import optimize
from itertools import repeat, chain
from functools import lru_cache, reduce
from collections import defaultdict, OrderedDict
from concurrent.futures import ProcessPoolExecutor
import gc
import json
@lru_cache(maxsize=None)
def packing_strategies(start, previous, target, depth):
gap = target - start
strategies = []
# Complete the packing with exactly 1 number
if depth == 1:
if gap >= previous:
strategies.append([gap])
# Complete the sample in "depth" steps, recursively
else:
for new in range(previous, gap + 1):
new_gap = target - start - new
if new_gap == 0:
strategies.append([new])
else:
options = packing_strategies(start + new, new, target, depth - 1)
for option in options:
if len(option) > 0:
strategies.append([new] + option)
return strategies
def create_json_metadata(
seqeunces_dropped,
num_strategies_utilized,
new_number_of_samples,
original_number_of_samples,
compression_ratio,
expected_speedup,
theoretical_speedup,
avg_sequence_per_sample,
padding_tokens_packed_dataset,
padding_tokens_original_dataset,
packing_efficiency,
top_8_strategies):
# convert to json serrializable format
top_8_strategies = top_8_strategies.tolist()
packing_efficiency = float(packing_efficiency)
padding_tokens_original_dataset = int(padding_tokens_original_dataset)
padding_tokens_packed_dataset = float(padding_tokens_packed_dataset)
avg_sequence_per_sample = float(avg_sequence_per_sample)
theoretical_speedup = float(theoretical_speedup)
json_object = json.dumps(
{'number_of_sequences_dropped': seqeunces_dropped,
'number_of_strategies_utilized': num_strategies_utilized,
'new_number_of_samples': new_number_of_samples,
'original_number_of_samples': original_number_of_samples,
'compression_ratio': compression_ratio,
'expected_speed_up': expected_speedup,
'theoretical_speed_up': theoretical_speedup,
'avg_seq_per_sample': avg_sequence_per_sample,
'padding_tokens_packed_dataset': padding_tokens_packed_dataset,
'padding_tokens_original_dataset': padding_tokens_original_dataset,
'padding_tokens_original_dataset': padding_tokens_original_dataset,
'packing_efficiency':packing_efficiency,
'top_8_strategies':top_8_strategies},
sort_keys=True, indent=2)
return json_object
def get_packing_recipe(output_dir, sequence_lengths, max_sequence_length, max_sequences_per_pack=3):
# Histogram of sequence lengths
histogram, bins = np.histogram(sequence_lengths, bins=np.arange(1, max_sequence_length + 2))
print("Begin packing pass".center(80, "_"))
print(f"Unpacked mean sequence length: {sequence_lengths.mean():3.2f}")
# Make sure all strategies are recipes to pack to the correct sequence length
strategy_set = packing_strategies(0, 1, max_sequence_length, max_sequences_per_pack)
for strategy in strategy_set:
assert(sum(strategy) == max_sequence_length)
num_strategies = len(strategy_set)
print(f"Found {num_strategies} unique packing strategies.")
# Solve the packing equation A@mixture = histogram
A = np.zeros((max_sequence_length, num_strategies), dtype=np.int32)
for i in range(num_strategies):
strategy = strategy_set[i]
for seq_len in strategy:
A[seq_len - 1, i] += 1
# short sequences are inexpensive to add, so should have low residual weights
# to exactly minimize padding use w0 = np.arange(1, max_sequence_length + 1)
# in practice the difference is negligible, but this converges faster
padding_cutoff = 8
w0 = np.ones([max_sequence_length])
# w0 = np.linspace(1, max_sequence_length+1, max_sequence_length)/max_sequence_length # padding minimization weight
w0[:padding_cutoff] = padding_cutoff / (2 * max_sequence_length)
w0 = np.sqrt(w0)
# Starting values for the padding and the mixture
padding = np.zeros([max_sequence_length], dtype=np.int32)
mixture = np.zeros([num_strategies], dtype=np.int32)
b = histogram + padding
# Pack sequences as best as possible, then increase padding accordingly and repeat
for i in range(0, 20):
print(f"\nIteration: {i}: sequences still to pack: ", b.sum())
start = time.time()
partial_mixture, rnorm = optimize.nnls(np.expand_dims(w0, -1) * A, w0 * b)
print(f"Solving nnls took {time.time() - start:3.2f} seconds.")
print(f"Residual norm: {rnorm:3.5e}")
# Update mixture (round the floating point solution to integers)
partial_mixture = np.where(partial_mixture < 2, np.rint(partial_mixture), np.floor(partial_mixture))
# If partial mixture is empty (due to rounding) we follow the gradient
# this usually happens when the number of examples is small i.e. ~100
if partial_mixture.max() == 0:
grad = A.T @ (b * np.arange(1, max_sequence_length + 1))
k = int(b.sum() // 2) + 1
topk = np.argsort(-grad)[:k]
partial_mixture[topk] += 1
# Update mixture
mixture = mixture + partial_mixture
# Compute the residuals
residual = b - A @ partial_mixture
print(f"Max residual: {abs(residual).max()}")
print(f"Residual on first 8 categories: {np.around(residual[:8], 4)}")
print(f"Residual on last 8 categories: {np.around(residual[-8:], 4)}")
# Add padding based on deficit (negative residual)
partial_padding = np.where(residual < 0, -residual, 0)
print(f"Added {(partial_padding*np.arange(1,max_sequence_length+1)).sum():3.2e} tokens of padding.")
padding = padding + partial_padding
# Update the rhs vector (remaining surplus sequences)
b = histogram + padding - A @ mixture
assert np.all(b >= 0), b
# Done iterating
if b.sum() < 100:
break
# Make sure there is no remainder
unpacked_seqlen = np.arange(1, max_sequence_length + 1)[b > 0]
# Update the mixture to also covered the unpacked sequences
for l in unpacked_seqlen:
# Get the depth 1 strategy
strategy = sorted([l, max_sequence_length - l])
strategy_index = strategy_set.index(strategy)
mixture[strategy_index] += b[l-1]
b = histogram - A @ mixture
padding = np.where(b < 0, -b, 0)
b = histogram + padding - A @ mixture
assert b.sum() == 0
# Analyze result
print("Done solving for packing order".center(80, "_"))
num_padding_tokens = (np.arange(1, max_sequence_length + 1) * padding).sum()
num_padding_tokens_original = (max_sequence_length - sequence_lengths).sum()
number_of_sequences_dropped = b.sum()
print(f"Number of sequences dropped: {number_of_sequences_dropped}")
number_of_strategies_utilized = np.count_nonzero(mixture)
print(f"Number of strategies utilized: {number_of_strategies_utilized}")
new_number_of_samples = int(mixture.sum())
original_number_of_samples = len(sequence_lengths)
compression = 1 - new_number_of_samples / original_number_of_samples
print(f"New number of samples: {new_number_of_samples:3.2f}, original {original_number_of_samples}. A compression ratio of {compression:3.3f}")
expected_speedup_from_packing = 1 / (1 - compression)
print(f"The expected speed-up from packing: {expected_speedup_from_packing}")
upper_bound = 1.0 / (1 - ((1 - sequence_lengths / max_sequence_length).mean()))
print(f"Theoretical upper bound on speed-up: {upper_bound:3.3f}")
avg_sequences_per_sample = ((A.sum(0) * mixture).sum() - padding.sum()) / new_number_of_samples
print(f"Average sequences/sample {avg_sequences_per_sample:3.5f}")
print(f"Added {num_padding_tokens:3.2e} padding tokens. Original dataset used {num_padding_tokens_original:3.2e} padding tokens")
efficiency = (new_number_of_samples*max_sequence_length - num_padding_tokens)/(new_number_of_samples*max_sequence_length)
print(f"Packing efficiency (fraction of real tokens): {efficiency:3.4f}")
print(f"Top 8 strategies")
topK = np.argsort(-mixture)[:8]
for i in topK:
print(f"Strategy {strategy_set[i]} which is used {int(mixture[i])} times")
print("".center(80, "_"))
# Figure out the slicing that each strategy should use
slicing = np.zeros_like(A)
slicing[:, 1:] = np.cumsum(A * mixture, axis=1)[:, :-1]
slicing = slicing.T
mixture = mixture.astype(np.int64)
norm_path = os.path.normpath(output_dir)
head_tail = os.path.split(norm_path)
metadata_file_name = head_tail[1]
metadata_file_name = metadata_file_name + '_metadata.json'
metadata_file_path = os.path.join(head_tail[0],metadata_file_name)
print(f"Saving metadata to file: {metadata_file_path}")
with open(metadata_file_path,mode='w') as file_handle:
json_content = create_json_metadata(seqeunces_dropped=int(number_of_sequences_dropped),
num_strategies_utilized=number_of_strategies_utilized,
new_number_of_samples=new_number_of_samples,
original_number_of_samples=original_number_of_samples,
compression_ratio=compression,
expected_speedup=expected_speedup_from_packing,
theoretical_speedup=upper_bound,
avg_sequence_per_sample=avg_sequences_per_sample,
padding_tokens_original_dataset=num_padding_tokens_original,
padding_tokens_packed_dataset=num_padding_tokens,
packing_efficiency=efficiency,
top_8_strategies=topK)
file_handle.write(json_content)
return strategy_set, mixture, padding, slicing
def slice_examples_mult_stratagies_shuffle(examples_by_length, slicing, strategy_set, repeat_counts):
# Divide the work, firstly between the strategies and then into chunks of 50k
strategies_slices = defaultdict(list)
for strategy, slice_offsets, repeat_count in zip(strategy_set, slicing, repeat_counts):
if repeat_count == 0:
continue
# Slice out the sequences allocated to this strategy in increments of 50k
subcounts = (min(1, repeat_count - 1 * (i - 1)) for i in range(1, repeat_count + 1))
for part_id, part_count in enumerate(subcounts):
for k, seq_len in enumerate(strategy):
slice_start = int(slice_offsets[seq_len - 1])
slice_end = slice_start + int(part_count)
slice_offsets[seq_len - 1] = slice_end
strategies_slices[str(strategy)+'_'+str(seq_len)].append([slice_start,slice_end])
slices = []
examples_batch = []
slice_offsets=slicing[0]
total_num_samples=[len(examples_by_length[sl]) for sl in examples_by_length.keys()]
suffle_samples_ind=np.random.permutation(sum(repeat_counts))
strategies = [[st]*rp for st,rp in zip(strategy_set,repeat_counts)]
strategies = list(chain.from_iterable(strategies))
num_sample_per_slice=4480
counter=0; count_samples=0
for ind in suffle_samples_ind:
strategy=strategies[ind]
if len(strategy) == 0:
continue
# Slice out the sequences allocated to this strategy in increments of 50k
counter+=1
examples=[]
for k, seq_len in enumerate(strategy):
count_samples+=1
[slice_start,slice_end]=strategies_slices[str(strategy)+'_'+str(seq_len)].pop()
examples.append(examples_by_length[seq_len][slice_start:slice_end][0])
examples_batch.append(examples)
if counter%num_sample_per_slice==0:
slices.append(examples_batch)
examples_batch=[]
assert sum(total_num_samples)==count_samples, "Possibly not using all samples"
examples_by_length = None
return slices
def parallel_pack_according_to_strategy(args, part_idx, examples):
# Pack the sequences according to the strategy and write them to disk
filename = os.path.join(args.output_dir, "mixed_strategies_part_%d.hdf5"%part_idx)
features = defaultdict(list)
for inst_index, multi_sequence in enumerate(examples):
features_packed = create_multi_sequence_example(multi_sequence, args.max_predictions_per_sequence,
args.max_sequence_length, args.max_sequences_per_pack)
#if features_packed['next_sentence_weights'].sum()>1:
# print(features_packed['next_sentence_weights'],filename)
features["input_ids"].append(features_packed["input_ids"])
features["input_mask"].append(features_packed["input_mask"])
features["segment_ids"].append(features_packed["segment_ids"])
features["positions"].append(features_packed["positions"])
features["masked_lm_positions"].append(features_packed["masked_lm_positions"])
features["masked_lm_ids"].append(features_packed["masked_lm_ids"])
features["next_sentence_positions"].append(features_packed["next_sentence_positions"])
features["next_sentence_labels"].append(features_packed["next_sentence_labels"])
features["next_sentence_weights"].append(features_packed["next_sentence_weights"])
f= h5py.File(filename, 'w')
f.create_dataset("input_ids", data=np.array(features["input_ids"]), dtype='i4', compression='gzip')
f.create_dataset("input_mask", data=np.array(features["input_mask"]), dtype='i4', compression='gzip')
f.create_dataset("segment_ids", data=np.array(features["segment_ids"]), dtype='i1', compression='gzip')
f.create_dataset("positions", data=np.array(features["positions"]), dtype='i4', compression='gzip')
f.create_dataset("masked_lm_positions", data=np.array(features["masked_lm_positions"]), dtype='i4', compression='gzip')
f.create_dataset("masked_lm_ids", data=np.array(features["masked_lm_ids"]), dtype='i4', compression='gzip')
f.create_dataset("next_sentence_positions", data=np.array(features["next_sentence_positions"]), dtype='i4', compression='gzip')
f.create_dataset("next_sentence_labels", data=np.array(features["next_sentence_labels"]), dtype='i1', compression='gzip')
f.create_dataset("next_sentence_weights", data=np.array(features["next_sentence_weights"]), dtype='i4', compression='gzip')
f.flush()
f.close()
def create_multi_sequence_example(multi_sequence, max_predictions_per_sequence, max_sequence_length, max_sequences_per_pack):
# SEQ
packed_input_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_input_mask = np.zeros(max_sequence_length, dtype=np.int32)
packed_segment_ids = np.zeros(max_sequence_length, dtype=np.int32)
packed_positions = np.zeros(max_sequence_length, dtype=np.int32)
# MLM
# we are packing up to max_sequences_per_pack, each with a certain percentage of masked tokens
# in case that percentege is rounded up for all sequences in the pack, need to add an extra token for
# each sequence in the pack
packed_masked_lm_positions = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
packed_masked_lm_ids = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
#packed_masked_lm_weights = np.zeros(max_predictions_per_sequence + max_sequences_per_pack, dtype=np.int32)
# NSP
packed_next_sentence_positions = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_labels = np.zeros(max_sequences_per_pack, dtype=np.int32)
packed_next_sentence_weights = np.zeros(max_sequences_per_pack, dtype=np.int32)
offset = 0
mlm_offset = 0
sequence_index = 1 # used in the input mask
for sequence in multi_sequence:
# Padding sequences are donoted with None
if sequence is not None:
input_ids = np.array(sequence['input_ids'])
input_mask = np.array(sequence['input_mask'])
segment_ids = np.array(sequence['segment_ids'])
masked_lm_positions = np.array(sequence['masked_lm_positions'])
masked_lm_ids = np.array(sequence['masked_lm_ids'])
#masked_lm_weights = np.array(sequence['masked_lm_weights'])
next_sentence_labels = np.array(sequence['next_sentence_labels'])
#input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids, masked_lm_weights, next_sentence_labels = sequence
seq_len = input_mask.sum()
# SEQ
packed_input_ids[offset:offset + seq_len] = input_ids[:seq_len]
packed_input_mask[offset:offset + seq_len] = sequence_index
packed_segment_ids[offset:offset + seq_len] = segment_ids[:seq_len]
packed_positions[offset:offset + seq_len] = np.arange(0, seq_len)
# MLM
mlm_len= (masked_lm_ids!=0).sum()
#mlm_len = int(masked_lm_weights.sum())
assert mlm_offset + mlm_len < max_predictions_per_sequence + max_sequences_per_pack, "Too many LM predictions per sequences"
max_mlm = mlm_offset + mlm_len
packed_masked_lm_positions[mlm_offset:max_mlm] = offset + masked_lm_positions[:mlm_len]
packed_masked_lm_ids[mlm_offset:max_mlm] = masked_lm_ids[:mlm_len]
#packed_masked_lm_weights[mlm_offset:max_mlm] = sequence_index
# NSP
packed_next_sentence_positions[sequence_index - 1] = offset
packed_next_sentence_labels[sequence_index - 1] = next_sentence_labels
packed_next_sentence_weights[sequence_index - 1] = 1
# Update offsets
sequence_index += 1
offset += seq_len
mlm_offset = max_mlm
input_ids = None; input_mask = None; segment_ids = None; masked_lm_positions = None;
masked_lm_ids = None; next_sentence_labels = None; seq_len = None
# Pack into tfrecord format:
features = OrderedDict()
features["input_ids"] = packed_input_ids
features["input_mask"] = packed_input_mask
features["segment_ids"] = packed_segment_ids
features["positions"] = packed_positions
features["masked_lm_positions"] = packed_masked_lm_positions
features["masked_lm_ids"] = packed_masked_lm_ids
features["next_sentence_positions"] = packed_next_sentence_positions
features["next_sentence_labels"] = packed_next_sentence_labels
features["next_sentence_weights"] = packed_next_sentence_weights
del packed_input_ids; del packed_input_mask; del packed_segment_ids; del packed_positions; del packed_masked_lm_positions; del packed_masked_lm_ids;
del packed_next_sentence_positions; del packed_next_sentence_labels; del packed_next_sentence_weights
return features
class pretraining_dataset(Dataset):
def __init__(self, input_file, max_pred_length):
self.input_file = input_file
self.max_pred_length = max_pred_length
f = h5py.File(input_file, "r")
keys = ['input_ids', 'input_mask', 'segment_ids', 'masked_lm_positions', 'masked_lm_ids',
'next_sentence_labels']
self.keys_exist = list(f.keys())
self.inputs = [np.asarray(f[key][:]) for key in keys]
self.len_dict={}
for key in keys:
self.len_dict[key] = np.asarray(f[key][:]).shape
f.close()
def __len__(self):
'Denotes the total number of samples'
return len(self.inputs[0])
def __getitem__(self, index):
[input_ids, input_mask, segment_ids, masked_lm_positions, masked_lm_ids,next_sentence_labels] = [input[index] if indice < 5 else
np.asarray(input[index]) for indice, input in enumerate(self.inputs)]
return [input_ids, input_mask, segment_ids,masked_lm_positions, masked_lm_ids,
next_sentence_labels]
class WorkerInitObj(object):
def __init__(self, seed):
self.seed = seed
def __call__(self, id):
np.random.seed(seed=self.seed + id)
random.seed(self.seed + id)
def parse_arguments():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--input_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain .hdf5 files for the task.")
parser.add_argument("--max_sequence_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_predictions_per_sequence",
default=80,
type=int,
help="The maximum total of masked tokens in input sequence")
parser.add_argument("--max_sequences_per_pack",
default=3,
type=int,
help="The maximum number of sequences to pack in multi-sequence")
parser.add_argument("--train_batch_size",
default=8,
type=int,
help="Total batch size for training.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the packed dataset will be written.")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument("--local_rank",
type=int,
default=os.getenv('LOCAL_RANK', -1),
help="local_rank for distributed training on gpus")
parser.add_argument('--disable_progress_bar',
default=False,
action='store_true',
help='Disable tqdm progress bar')
args = parser.parse_args()
return args
def main():
global timeout_sent
args = parse_arguments()
random.seed(args.seed + args.local_rank)
np.random.seed(args.seed + args.local_rank)
torch.manual_seed(args.seed + args.local_rank)
torch.cuda.manual_seed(args.seed + args.local_rank)
worker_init = WorkerInitObj(args.seed + args.local_rank)
device = torch.device("cpu")
print("args.max_sequence_length={}, args.max_sequences_per_pack={},args.max_predictions_per_sequence={}".format(args.max_sequence_length, args.max_sequences_per_pack,args.max_predictions_per_sequence))
files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if
os.path.isfile(os.path.join(args.input_dir, f)) and 'training' in f]
print("files={}".format(files))
sequence_lengths = []
examples_by_length = defaultdict(list)
print("Looping through dataset to collect sequence length information...")
for f_id in range(len(files)):
#single card
data_file = files[f_id]
print("-- loading data_file={}".format(data_file))
train_data = pretraining_dataset(data_file, args.max_predictions_per_sequence)
for step, batch in enumerate(train_data):
input_ids, input_mask, segment_ids,masked_lm_positions, masked_lm_ids, next_sentence_labels = batch
features = OrderedDict()
features["input_ids"] = input_ids
features["input_mask"] = input_mask
features["segment_ids"] = segment_ids
features["masked_lm_positions"] = masked_lm_positions
features["masked_lm_ids"] = masked_lm_ids
#features["masked_lm_weights"] = masked_lm_weights
features["next_sentence_labels"] = next_sentence_labels
im_length = sum(input_mask)
examples_by_length[im_length].append(features)
sequence_lengths.append(im_length)
sequence_lengths = np.array(sequence_lengths)
# Pass the array of sequence lengths to the packing algorithm
strategy_set, mixture, padding, slicing = get_packing_recipe(args.output_dir, sequence_lengths, args.max_sequence_length, args.max_sequences_per_pack)
# Add the calculated padding
for i in range(1, args.max_sequence_length + 1):
if i not in examples_by_length.keys():
examples_by_length[i]=[]
examples_by_length[i].extend([None] * int(padding[i - 1]))
# Shuffle the data
for key in examples_by_length:
random.shuffle(examples_by_length[key])
# Pack and store the data
print(f"\nPacking and writing packed dataset to {args.output_dir}.")
# Slice the data into chunks of max 50k packed examples
example_slices = slice_examples_mult_stratagies_shuffle(examples_by_length, slicing, strategy_set, mixture)
part_idx = [i for i in range(len(example_slices))]
gc.collect()
print('Done slice_examples !!!')
del examples_by_length; del slicing; del strategy_set; del mixture
gc.collect()
start = time.time()
print(f"Splitting work into {len(part_idx)} parts.")
split_write_sessions_size = 1000
for rr in range(1+len(example_slices)//split_write_sessions_size):
print(rr,'out of',1+len(example_slices)//split_write_sessions_size)
str_idx,stp_idx=rr*split_write_sessions_size,min((rr+1)*split_write_sessions_size,len(example_slices))
example_slices_prt,part_idx_prt = example_slices[str_idx:stp_idx], part_idx[str_idx:stp_idx]
with ProcessPoolExecutor(50) as executor:
work = repeat(args), part_idx_prt, example_slices_prt
for partial_result in executor.map(parallel_pack_according_to_strategy, *work):
pass
print('------')
del work
print(f"\nDone. Took: {time.time() - start:3.2f} seconds to pack and write dataset.")
print('-------------',str_idx,stp_idx)
print('Done Cleaning')
if __name__ == "__main__":
main()
|
78f73baeda8385f61f6c1733ca70e5175ce67fc7
|
8da41ffa2ccb09e04f95db0f211e0ed69a42a352
|
/courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/oauth2client/contrib/devshell.py
|
691765f097640a9987ae217b7d380280f89508ac
|
[
"Apache-2.0"
] |
permissive
|
GoogleCloudPlatform/training-data-analyst
|
808af9b09a0e5f5657c4ca76cdd205f808d76d89
|
975a95032ce5b7012d1772c7f1f5cfe606eae839
|
refs/heads/master
| 2023-09-05T19:50:59.722334
| 2023-09-04T14:25:33
| 2023-09-04T14:25:33
| 56,459,948
| 7,311
| 5,917
|
Apache-2.0
| 2023-09-13T21:45:54
| 2016-04-17T21:39:27
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 4,991
|
py
|
devshell.py
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""OAuth 2.0 utitilies for Google Developer Shell environment."""
import datetime
import json
import os
import socket
from oauth2client import _helpers
from oauth2client import client
DEVSHELL_ENV = 'DEVSHELL_CLIENT_PORT'
class Error(Exception):
"""Errors for this module."""
pass
class CommunicationError(Error):
"""Errors for communication with the Developer Shell server."""
class NoDevshellServer(Error):
"""Error when no Developer Shell server can be contacted."""
# The request for credential information to the Developer Shell client socket
# is always an empty PBLite-formatted JSON object, so just define it as a
# constant.
CREDENTIAL_INFO_REQUEST_JSON = '[]'
class CredentialInfoResponse(object):
"""Credential information response from Developer Shell server.
The credential information response from Developer Shell socket is a
PBLite-formatted JSON array with fields encoded by their index in the
array:
* Index 0 - user email
* Index 1 - default project ID. None if the project context is not known.
* Index 2 - OAuth2 access token. None if there is no valid auth context.
* Index 3 - Seconds until the access token expires. None if not present.
"""
def __init__(self, json_string):
"""Initialize the response data from JSON PBLite array."""
pbl = json.loads(json_string)
if not isinstance(pbl, list):
raise ValueError('Not a list: ' + str(pbl))
pbl_len = len(pbl)
self.user_email = pbl[0] if pbl_len > 0 else None
self.project_id = pbl[1] if pbl_len > 1 else None
self.access_token = pbl[2] if pbl_len > 2 else None
self.expires_in = pbl[3] if pbl_len > 3 else None
def _SendRecv():
"""Communicate with the Developer Shell server socket."""
port = int(os.getenv(DEVSHELL_ENV, 0))
if port == 0:
raise NoDevshellServer()
sock = socket.socket()
sock.connect(('localhost', port))
data = CREDENTIAL_INFO_REQUEST_JSON
msg = '{0}\n{1}'.format(len(data), data)
sock.sendall(_helpers._to_bytes(msg, encoding='utf-8'))
header = sock.recv(6).decode()
if '\n' not in header:
raise CommunicationError('saw no newline in the first 6 bytes')
len_str, json_str = header.split('\n', 1)
to_read = int(len_str) - len(json_str)
if to_read > 0:
json_str += sock.recv(to_read, socket.MSG_WAITALL).decode()
return CredentialInfoResponse(json_str)
class DevshellCredentials(client.GoogleCredentials):
"""Credentials object for Google Developer Shell environment.
This object will allow a Google Developer Shell session to identify its
user to Google and other OAuth 2.0 servers that can verify assertions. It
can be used for the purpose of accessing data stored under the user
account.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
"""
def __init__(self, user_agent=None):
super(DevshellCredentials, self).__init__(
None, # access_token, initialized below
None, # client_id
None, # client_secret
None, # refresh_token
None, # token_expiry
None, # token_uri
user_agent)
self._refresh(None)
def _refresh(self, http):
"""Refreshes the access token.
Args:
http: unused HTTP object
"""
self.devshell_response = _SendRecv()
self.access_token = self.devshell_response.access_token
expires_in = self.devshell_response.expires_in
if expires_in is not None:
delta = datetime.timedelta(seconds=expires_in)
self.token_expiry = client._UTCNOW() + delta
else:
self.token_expiry = None
@property
def user_email(self):
return self.devshell_response.user_email
@property
def project_id(self):
return self.devshell_response.project_id
@classmethod
def from_json(cls, json_data):
raise NotImplementedError(
'Cannot load Developer Shell credentials from JSON.')
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize Developer Shell credentials.')
|
e7425b1b7ccf2888efd0ce7682eba523f70614dd
|
fbbe424559f64e9a94116a07eaaa555a01b0a7bb
|
/Selenium_PhantomJS/source/pip/_vendor/re-vendor.py
|
0a52123e4f248648117bc4599da04dd4634e5253
|
[
"MIT"
] |
permissive
|
ryfeus/lambda-packs
|
6544adb4dec19b8e71d75c24d8ed789b785b0369
|
cabf6e4f1970dc14302f87414f170de19944bac2
|
refs/heads/master
| 2022-12-07T16:18:52.475504
| 2022-11-29T13:35:35
| 2022-11-29T13:35:35
| 71,386,735
| 1,283
| 263
|
MIT
| 2022-11-26T05:02:14
| 2016-10-19T18:22:39
|
Python
|
UTF-8
|
Python
| false
| false
| 773
|
py
|
re-vendor.py
|
import os
import sys
import pip
import glob
import shutil
here = os.path.abspath(os.path.dirname(__file__))
def usage():
print("Usage: re-vendor.py [clean|vendor]")
sys.exit(1)
def clean():
for fn in os.listdir(here):
dirname = os.path.join(here, fn)
if os.path.isdir(dirname):
shutil.rmtree(dirname)
# six is a single file, not a package
os.unlink(os.path.join(here, 'six.py'))
def vendor():
pip.main(['install', '-t', here, '-r', 'vendor.txt'])
for dirname in glob.glob('*.egg-info'):
shutil.rmtree(dirname)
if __name__ == '__main__':
if len(sys.argv) != 2:
usage()
if sys.argv[1] == 'clean':
clean()
elif sys.argv[1] == 'vendor':
vendor()
else:
usage()
|
92ab2cf6f57d7083305947966c802ce0e2791a9b
|
c7c73566784a7896100e993606e1bd8fdd0ea94e
|
/direct/src/gui/DirectGuiBase.py
|
f9bbb5b5f8d138755469b04cf27ae4532b5511dc
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
panda3d/panda3d
|
c3f94df2206ff7cfe4a3b370777a56fb11a07926
|
160ba090a5e80068f61f34fc3d6f49dbb6ad52c5
|
refs/heads/master
| 2023-08-21T13:23:16.904756
| 2021-04-11T22:55:33
| 2023-08-06T06:09:32
| 13,212,165
| 4,417
| 1,072
|
NOASSERTION
| 2023-09-09T19:26:14
| 2013-09-30T10:20:25
|
C++
|
UTF-8
|
Python
| false
| false
| 46,105
|
py
|
DirectGuiBase.py
|
"""
Base class for all DirectGui items. Handles composite widgets and
command line argument parsing.
Code overview:
1) Each widget defines a set of options (optiondefs) as a list of tuples
of the form ``('name', defaultValue, handler)``.
'name' is the name of the option (used during construction of configure)
handler can be: None, method, or INITOPT. If a method is specified, it
will be called during widget construction (via initialiseoptions), if the
Handler is specified as an INITOPT, this is an option that can only be set
during widget construction.
2) :func:`~DirectGuiBase.defineoptions` is called. defineoption creates:
self._constructorKeywords = { keyword: [value, useFlag] }
A dictionary of the keyword options specified as part of the
constructor keywords can be of the form 'component_option', where
component is the name of a widget's component, a component group or a
component alias.
self._dynamicGroups
A list of group names for which it is permissible to specify options
before components of that group are created.
If a widget is a derived class the order of execution would be::
foo.optiondefs = {}
foo.defineoptions()
fooParent()
fooParent.optiondefs = {}
fooParent.defineoptions()
3) :func:`~DirectGuiBase.addoptions` is called. This combines options
specified as keywords to the widget constructor (stored in
self._constructorKeywords) with the default options (stored in optiondefs).
Results are stored in
``self._optionInfo = { keyword: [default, current, handler] }``.
If a keyword is of the form 'component_option' it is left in the
self._constructorKeywords dictionary (for use by component constructors),
otherwise it is 'used', and deleted from self._constructorKeywords.
Notes:
- constructor keywords override the defaults.
- derived class default values override parent class defaults
- derived class handler functions override parent class functions
4) Superclass initialization methods are called (resulting in nested calls
to define options (see 2 above)
5) Widget components are created via calls to
:func:`~DirectGuiBase.createcomponent`. User can specify aliases and groups
for each component created.
Aliases are alternate names for components, e.g. a widget may have a
component with a name 'entryField', which itself may have a component
named 'entry', you could add an alias 'entry' for the 'entryField_entry'
These are stored in self.__componentAliases. If an alias is found,
all keyword entries which use that alias are expanded to their full
form (to avoid conversion later)
Groups allow option specifications that apply to all members of the group.
If a widget has components: 'text1', 'text2', and 'text3' which all belong
to the 'text' group, they can be all configured with keywords of the form:
'text_keyword' (e.g. ``text_font='comic.rgb'``). A component's group
is stored as the fourth element of its entry in self.__componentInfo.
Note: the widget constructors have access to all remaining keywords in
_constructorKeywords (those not transferred to _optionInfo by
define/addoptions). If a component defines an alias that applies to
one of the keywords, that keyword is replaced with a new keyword with
the alias expanded.
If a keyword (or substituted alias keyword) is used during creation of the
component, it is deleted from self._constructorKeywords. If a group
keyword applies to the component, that keyword is marked as used, but is
not deleted from self._constructorKeywords, in case it applies to another
component. If any constructor keywords remain at the end of component
construction (and initialisation), an error is raised.
5) :func:`~DirectGuiBase.initialiseoptions` is called. This method calls any
option handlers to respond to any keyword/default values, then checks to
see if any keywords are left unused. If so, an error is raised.
"""
from __future__ import annotations
__all__ = ['DirectGuiBase', 'DirectGuiWidget']
from panda3d.core import (
ConfigVariableBool,
KeyboardButton,
MouseWatcherRegion,
NodePath,
PGFrameStyle,
PGItem,
Point3,
Texture,
Vec3,
)
from direct.showbase import ShowBaseGlobal
from direct.showbase.ShowBase import ShowBase
from direct.showbase.MessengerGlobal import messenger
from . import DirectGuiGlobals as DGG
from direct.directtools.DirectUtil import ROUND_TO
from direct.showbase import DirectObject
from direct.task import Task
from direct.task.TaskManagerGlobal import taskMgr
_track_gui_items = ConfigVariableBool('track-gui-items', False)
class DirectGuiBase(DirectObject.DirectObject):
"""Base class of all DirectGUI widgets."""
def __init__(self):
# Default id of all gui object, subclasses should override this
self.guiId = 'guiObject'
# List of all post initialization functions
self.postInitialiseFuncList = []
# To avoid doing things redundantly during initialisation
self.fInit = 1
# Mapping from each megawidget option to a list of information
# about the option
# - default value
# - current value
# - function to call when the option is initialised in the
# call to initialiseoptions() in the constructor or
# modified via configure(). If this is INITOPT, the
# option is an initialisation option (an option that can
# be set by the call to the constructor but can not be
# used with configure).
# This mapping is not initialised here, but in the call to
# defineoptions() which precedes construction of this base class.
#
# self._optionInfo = {}
# Mapping from each component name to a tuple of information
# about the component.
# - component widget instance
# - configure function of widget instance
# - the class of the widget (Frame, EntryField, etc)
# - cget function of widget instance
# - the name of the component group of this component, if any
self.__componentInfo = {}
# Mapping from alias names to the names of components or
# sub-components.
self.__componentAliases = {}
# Contains information about the keywords provided to the
# constructor. It is a mapping from the keyword to a tuple
# containing:
# - value of keyword
# - a boolean indicating if the keyword has been used.
# A keyword is used if, during the construction of a megawidget,
# - it is defined in a call to defineoptions() or addoptions(), or
# - it references, by name, a component of the megawidget, or
# - it references, by group, at least one component
# At the end of megawidget construction, a call is made to
# initialiseoptions() which reports an error if there are
# unused options given to the constructor.
#
# self._constructorKeywords = {}
# List of dynamic component groups. If a group is included in
# this list, then it not an error if a keyword argument for
# the group is given to the constructor or to configure(), but
# no components with this group have been created.
# self._dynamicGroups = ()
def defineoptions(self, keywords, optionDefs, dynamicGroups = ()):
""" defineoptions(keywords, optionDefs, dynamicGroups = {}) """
# Create options, providing the default value and the method
# to call when the value is changed. If any option created by
# base classes has the same name as one in <optionDefs>, the
# base class's value and function will be overriden.
# keywords is a dictionary of keyword/value pairs from the constructor
# optionDefs is a dictionary of default options for the widget
# dynamicGroups is a tuple of component groups for which you can
# specify options even though no components of this group have
# been created
# This should be called before the constructor of the base
# class, so that default values defined in the derived class
# override those in the base class.
if not hasattr(self, '_constructorKeywords'):
tmp = {}
for option, value in keywords.items():
tmp[option] = [value, 0]
self._constructorKeywords = tmp
self._optionInfo = {}
# Initialize dictionary of dynamic groups
if not hasattr(self, '_dynamicGroups'):
self._dynamicGroups = ()
self._dynamicGroups = self._dynamicGroups + tuple(dynamicGroups)
# Reconcile command line and default options
self.addoptions(optionDefs, keywords)
def addoptions(self, optionDefs, optionkeywords):
""" addoptions(optionDefs) - add option def to option info """
# Add additional options, providing the default value and the
# method to call when the value is changed. See
# "defineoptions" for more details
# optimisations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.__contains__
keywords = self._constructorKeywords
keywords_has_key = keywords.__contains__
FUNCTION = DGG._OPT_FUNCTION
for name, default, function in optionDefs:
if '_' not in name:
default = optionkeywords.get(name, default)
# The option will already exist if it has been defined
# in a derived class. In this case, do not override the
# default value of the option or the callback function
# if it is not None.
if not optionInfo_has_key(name):
if keywords_has_key(name):
# Overridden by keyword, use keyword value
value = keywords[name][0]
optionInfo[name] = [default, value, function]
# Delete it from self._constructorKeywords
del keywords[name]
else:
# Use optionDefs value
optionInfo[name] = [default, default, function]
elif optionInfo[name][FUNCTION] is None:
# Only override function if not defined by derived class
optionInfo[name][FUNCTION] = function
else:
# This option is of the form "component_option". If this is
# not already defined in self._constructorKeywords add it.
# This allows a derived class to override the default value
# of an option of a component of a base class.
if not keywords_has_key(name):
keywords[name] = [default, 0]
def initialiseoptions(self, myClass):
"""
Call all initialisation functions to initialize widget
options to default of keyword value
"""
# This is to make sure this method class is only called by
# the most specific class in the class hierarchy
if self.__class__ is myClass:
# Call the configuration callback function for every option.
FUNCTION = DGG._OPT_FUNCTION
self.fInit = 1
for info in self._optionInfo.values():
func = info[FUNCTION]
if func is not None and func is not DGG.INITOPT:
func()
self.fInit = 0
# Now check if anything is left over
unusedOptions = []
keywords = self._constructorKeywords
for name in keywords:
used = keywords[name][1]
if not used:
# This keyword argument has not been used. If it
# does not refer to a dynamic group, mark it as
# unused.
index = name.find('_')
if index < 0 or name[:index] not in self._dynamicGroups:
unusedOptions.append(name)
self._constructorKeywords = {}
if len(unusedOptions) > 0:
if len(unusedOptions) == 1:
text = 'Unknown option "'
else:
text = 'Unknown options "'
raise KeyError(text + ', '.join(unusedOptions) + \
'" for ' + myClass.__name__)
# Can now call post init func
self.postInitialiseFunc()
def postInitialiseFunc(self):
for func in self.postInitialiseFuncList:
func()
def isinitoption(self, option):
"""
Is this opition one that can only be specified at construction?
"""
return self._optionInfo[option][DGG._OPT_FUNCTION] is DGG.INITOPT
def options(self):
"""
Print out a list of available widget options.
Does not include subcomponent options.
"""
options = []
if hasattr(self, '_optionInfo'):
for option, info in self._optionInfo.items():
isinit = info[DGG._OPT_FUNCTION] is DGG.INITOPT
default = info[DGG._OPT_DEFAULT]
options.append((option, default, isinit))
options.sort()
return options
def configure(self, option=None, **kw):
"""
configure(option = None)
Query or configure the megawidget options.
"""
#
# If not empty, *kw* is a dictionary giving new
# values for some of the options of this gui item
# For options defined for this widget, set
# the value of the option to the new value and call the
# configuration callback function, if any.
#
# If *option* is None, return all gui item configuration
# options and settings. Options are returned as standard 3
# element tuples
#
# If *option* is a string, return the 3 element tuple for the
# given configuration option.
# First, deal with the option queries.
if len(kw) == 0:
# This configure call is querying the values of one or all options.
# Return 3-tuples:
# (optionName, default, value)
if option is None:
rtn = {}
for option, config in self._optionInfo.items():
rtn[option] = (option,
config[DGG._OPT_DEFAULT],
config[DGG._OPT_VALUE])
return rtn
else:
config = self._optionInfo[option]
return (option, config[DGG._OPT_DEFAULT], config[DGG._OPT_VALUE])
# optimizations:
optionInfo = self._optionInfo
optionInfo_has_key = optionInfo.__contains__
componentInfo = self.__componentInfo
componentInfo_has_key = componentInfo.__contains__
componentAliases = self.__componentAliases
componentAliases_has_key = componentAliases.__contains__
VALUE = DGG._OPT_VALUE
FUNCTION = DGG._OPT_FUNCTION
# This will contain a list of options in *kw* which
# are known to this gui item.
directOptions = []
# This will contain information about the options in
# *kw* of the form <component>_<option>, where
# <component> is a component of this megawidget. It is a
# dictionary whose keys are the configure method of each
# component and whose values are a dictionary of options and
# values for the component.
indirectOptions = {}
indirectOptions_has_key = indirectOptions.__contains__
for option, value in kw.items():
if optionInfo_has_key(option):
# This is one of the options of this gui item.
# Check it is an initialisation option.
if optionInfo[option][FUNCTION] is DGG.INITOPT:
print('Cannot configure initialisation option "' \
+ option + '" for ' + self.__class__.__name__)
break
#raise KeyError, \
# 'Cannot configure initialisation option "' \
# + option + '" for ' + self.__class__.__name__
optionInfo[option][VALUE] = value
directOptions.append(option)
else:
index = option.find('_')
if index >= 0:
# This option may be of the form <component>_<option>.
# e.g. if alias ('efEntry', 'entryField_entry')
# and option = efEntry_width
# component = efEntry, componentOption = width
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if componentAliases_has_key(component):
# component = entryField, subcomponent = entry
component, subComponent = componentAliases[component]
if subComponent is not None:
# componentOption becomes entry_width
componentOption = subComponent + '_' \
+ componentOption
# Expand option string to write on error
# option = entryField_entry_width
option = component + '_' + componentOption
# Does this component exist
if componentInfo_has_key(component):
# Get the configure func for the named component
# component = entryField
componentConfigFuncs = [componentInfo[component][1]]
else:
# Check if this is a group name and configure all
# components in the group.
componentConfigFuncs = []
# For each component
for info in componentInfo.values():
# Check if it is a member of this group
if info[4] == component:
# Yes, append its config func
componentConfigFuncs.append(info[1])
if len(componentConfigFuncs) == 0 and \
component not in self._dynamicGroups:
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Add the configure method(s) (may be more than
# one if this is configuring a component group)
# and option/value to dictionary.
for componentConfigFunc in componentConfigFuncs:
if not indirectOptions_has_key(componentConfigFunc):
indirectOptions[componentConfigFunc] = {}
# Create a dictionary of keyword/values keyed
# on configuration function
indirectOptions[componentConfigFunc][componentOption] \
= value
else:
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Call the configure methods for any components.
# Pass in the dictionary of keyword/values created above
for func, options in indirectOptions.items():
func(**options)
# Call the configuration callback function for each option.
for option in directOptions:
info = optionInfo[option]
func = info[DGG._OPT_FUNCTION]
if func is not None:
func()
# Allow index style references
def __setitem__(self, key, value):
self.configure(**{key: value})
def cget(self, option):
"""
Get current configuration setting for this option
"""
# Return the value of an option, for example myWidget['font'].
if option in self._optionInfo:
return self._optionInfo[option][DGG._OPT_VALUE]
else:
index = option.find('_')
if index >= 0:
component = option[:index]
componentOption = option[(index + 1):]
# Expand component alias
if component in self.__componentAliases:
component, subComponent = self.__componentAliases[
component]
if subComponent is not None:
componentOption = subComponent + '_' + componentOption
# Expand option string to write on error
option = component + '_' + componentOption
if component in self.__componentInfo:
# Call cget on the component.
componentCget = self.__componentInfo[component][3]
return componentCget(componentOption)
else:
# If this is a group name, call cget for one of
# the components in the group.
for info in self.__componentInfo.values():
if info[4] == component:
componentCget = info[3]
return componentCget(componentOption)
# Option not found
raise KeyError('Unknown option "' + option + \
'" for ' + self.__class__.__name__)
# Allow index style refererences
__getitem__ = cget
def createcomponent(self, componentName, componentAliases, componentGroup,
widgetClass, *widgetArgs, **kw):
"""
Create a component (during construction or later) for this widget.
"""
# Check for invalid component name
if '_' in componentName:
raise ValueError('Component name "%s" must not contain "_"' % componentName)
# Get construction keywords
if hasattr(self, '_constructorKeywords'):
keywords = self._constructorKeywords
else:
keywords = {}
for alias, component in componentAliases:
# Create aliases to the component and its sub-components.
index = component.find('_')
if index < 0:
# Just a shorter name for one of this widget's components
self.__componentAliases[alias] = (component, None)
else:
# An alias for a component of one of this widget's components
mainComponent = component[:index]
subComponent = component[(index + 1):]
self.__componentAliases[alias] = (mainComponent, subComponent)
# Remove aliases from the constructor keyword arguments by
# replacing any keyword arguments that begin with *alias*
# with corresponding keys beginning with *component*.
alias = alias + '_'
aliasLen = len(alias)
for option in keywords.copy():
if len(option) > aliasLen and option[:aliasLen] == alias:
newkey = component + '_' + option[aliasLen:]
keywords[newkey] = keywords[option]
del keywords[option]
# Find any keyword arguments for this component
componentPrefix = componentName + '_'
nameLen = len(componentPrefix)
# First, walk through the option list looking for arguments
# than refer to this component's group.
for option in keywords:
# Check if this keyword argument refers to the group
# of this component. If so, add this to the options
# to use when constructing the widget. Mark the
# keyword argument as being used, but do not remove it
# since it may be required when creating another
# component.
index = option.find('_')
if index >= 0 and componentGroup == option[:index]:
rest = option[(index + 1):]
kw[rest] = keywords[option][0]
keywords[option][1] = 1
# Now that we've got the group arguments, walk through the
# option list again and get out the arguments that refer to
# this component specifically by name. These are more
# specific than the group arguments, above; we walk through
# the list afterwards so they will override.
for option in keywords.copy():
if len(option) > nameLen and option[:nameLen] == componentPrefix:
# The keyword argument refers to this component, so add
# this to the options to use when constructing the widget.
kw[option[nameLen:]] = keywords[option][0]
# And delete it from main construction keywords
del keywords[option]
# Return None if no widget class is specified
if widgetClass is None:
return None
# Get arguments for widget constructor
if len(widgetArgs) == 1 and isinstance(widgetArgs[0], tuple):
# Arguments to the constructor can be specified as either
# multiple trailing arguments to createcomponent() or as a
# single tuple argument.
widgetArgs = widgetArgs[0]
# Create the widget
widget = widgetClass(*widgetArgs, **kw)
componentClass = widget.__class__.__name__
self.__componentInfo[componentName] = (widget, widget.configure,
componentClass, widget.cget, componentGroup)
return widget
def component(self, name):
# Return a component widget of the megawidget given the
# component's name
# This allows the user of a megawidget to access and configure
# widget components directly.
# Find the main component and any subcomponents
index = name.find('_')
if index < 0:
component = name
remainingComponents = None
else:
component = name[:index]
remainingComponents = name[(index + 1):]
# Expand component alias
# Example entry which is an alias for entryField_entry
if component in self.__componentAliases:
# component = entryField, subComponent = entry
component, subComponent = self.__componentAliases[component]
if subComponent is not None:
if remainingComponents is None:
# remainingComponents = entry
remainingComponents = subComponent
else:
remainingComponents = subComponent + '_' \
+ remainingComponents
# Get the component from __componentInfo dictionary
widget = self.__componentInfo[component][0]
if remainingComponents is None:
# Not looking for subcomponent
return widget
else:
# Recursive call on subcomponent
return widget.component(remainingComponents)
def components(self):
# Return a list of all components.
return sorted(self.__componentInfo)
def hascomponent(self, component):
return component in self.__componentInfo
def destroycomponent(self, name):
# Remove a megawidget component.
# This command is for use by megawidget designers to destroy a
# megawidget component.
self.__componentInfo[name][0].destroy()
del self.__componentInfo[name]
def destroy(self):
# Clean out any hooks
self.ignoreAll()
del self._optionInfo
del self.__componentInfo
del self.postInitialiseFuncList
def bind(self, event, command, extraArgs = []):
"""
Bind the command (which should expect one arg) to the specified
event (such as ENTER, EXIT, B1PRESS, B1CLICK, etc.)
See DirectGuiGlobals for possible events
"""
# Need to tack on gui item specific id
gEvent = event + self.guiId
if ConfigVariableBool('debug-directgui-msgs', False):
from direct.showbase.PythonUtil import StackTrace
print(gEvent)
print(StackTrace())
self.accept(gEvent, command, extraArgs = extraArgs)
def unbind(self, event):
"""
Unbind the specified event
"""
# Need to tack on gui item specific id
gEvent = event + self.guiId
self.ignore(gEvent)
def toggleGuiGridSnap():
DirectGuiWidget.snapToGrid = 1 - DirectGuiWidget.snapToGrid
def setGuiGridSpacing(spacing):
DirectGuiWidget.gridSpacing = spacing
class DirectGuiWidget(DirectGuiBase, NodePath):
# Toggle if you wish widget's to snap to grid when draggin
snapToGrid = 0
gridSpacing = 0.05
# Determine the default initial state for inactive (or
# unclickable) components. If we are in edit mode, these are
# actually clickable by default.
guiEdit = ConfigVariableBool('direct-gui-edit', False)
if guiEdit:
inactiveInitState = DGG.NORMAL
else:
inactiveInitState = DGG.DISABLED
guiDict: dict[str, DirectGuiWidget] = {}
def __init__(self, parent = None, **kw):
# Direct gui widgets are node paths
# Direct gui widgets have:
# - stateNodePaths (to hold visible representation of widget)
# State node paths can have:
# - a frame of type (None, FLAT, RAISED, GROOVE, RIDGE)
# - arbitrary geometry for each state
# They inherit from DirectGuiWidget
# - Can create components (with aliases and groups)
# - Can bind to mouse events
# They inherit from NodePath
# - Can position/scale them
optiondefs = (
# Widget's constructor
('pgFunc', PGItem, None),
('numStates', 1, None),
('invertedFrames', (), None),
('sortOrder', 0, None),
# Widget's initial state
('state', DGG.NORMAL, self.setState),
# Widget's frame characteristics
('relief', DGG.FLAT, self.setRelief),
('borderWidth', (.1, .1), self.setBorderWidth),
('borderUvWidth', (.1, .1), self.setBorderUvWidth),
('frameSize', None, self.setFrameSize),
('frameColor', (.8, .8, .8, 1), self.setFrameColor),
('frameTexture', None, self.setFrameTexture),
('frameVisibleScale', (1, 1), self.setFrameVisibleScale),
('pad', (0, 0), self.resetFrameSize),
# Override button id (beware! your name may not be unique!)
('guiId', None, DGG.INITOPT),
# Initial pos/scale of the widget
('pos', None, DGG.INITOPT),
('hpr', None, DGG.INITOPT),
('scale', None, DGG.INITOPT),
('color', None, DGG.INITOPT),
# Do events pass through this widget?
('suppressMouse', 1, DGG.INITOPT),
('suppressKeys', 0, DGG.INITOPT),
('enableEdit', 1, DGG.INITOPT),
)
# Merge keyword options with default options
self.defineoptions(kw, optiondefs)
# Initialize the base classes (after defining the options).
DirectGuiBase.__init__(self)
NodePath.__init__(self)
# Create a button
self.guiItem = self['pgFunc']('')
# Override automatically generated guiId
if self['guiId']:
self.guiItem.setId(self['guiId'])
self.guiId = self.guiItem.getId()
if ShowBaseGlobal.__dev__:
# track gui items by guiId for tracking down leaks
if _track_gui_items:
if not hasattr(ShowBase, 'guiItems'):
ShowBase.guiItems = {}
if self.guiId in ShowBase.guiItems:
ShowBase.notify.warning('duplicate guiId: %s (%s stomping %s)' %
(self.guiId, self,
ShowBase.guiItems[self.guiId]))
ShowBase.guiItems[self.guiId] = self
# Attach button to parent and make that self
if parent is None:
parent = ShowBaseGlobal.aspect2d
self.assign(parent.attachNewNode(self.guiItem, self['sortOrder']))
# Update pose to initial values
if self['pos']:
self.setPos(self['pos'])
if self['hpr']:
self.setHpr(self['hpr'])
if self['scale']:
self.setScale(self['scale'])
if self['color']:
self.setColor(self['color'])
# Initialize names
# Putting the class name in helps with debugging.
self.setName("%s-%s" % (self.__class__.__name__, self.guiId))
# Create
self.stateNodePath = []
for i in range(self['numStates']):
self.stateNodePath.append(NodePath(self.guiItem.getStateDef(i)))
# Initialize frame style
self.frameStyle = []
for i in range(self['numStates']):
self.frameStyle.append(PGFrameStyle())
# For holding bounds info
self.ll = Point3(0)
self.ur = Point3(0)
# Is drag and drop enabled?
if self['enableEdit'] and self.guiEdit:
self.enableEdit()
# Set up event handling
suppressFlags = 0
if self['suppressMouse']:
suppressFlags |= MouseWatcherRegion.SFMouseButton
suppressFlags |= MouseWatcherRegion.SFMousePosition
if self['suppressKeys']:
suppressFlags |= MouseWatcherRegion.SFOtherButton
self.guiItem.setSuppressFlags(suppressFlags)
# Bind destroy hook
self.guiDict[self.guiId] = self
# self.bind(DGG.DESTROY, self.destroy)
# Update frame when everything has been initialized
self.postInitialiseFuncList.append(self.frameInitialiseFunc)
# Call option initialization functions
self.initialiseoptions(DirectGuiWidget)
def frameInitialiseFunc(self):
# Now allow changes to take effect
self.updateFrameStyle()
if not self['frameSize']:
self.resetFrameSize()
def enableEdit(self):
self.bind(DGG.B2PRESS, self.editStart)
self.bind(DGG.B2RELEASE, self.editStop)
self.bind(DGG.PRINT, self.printConfig)
# Can we move this to showbase
# Certainly we don't need to do this for every button!
#mb = base.mouseWatcherNode.getModifierButtons()
#mb.addButton(KeyboardButton.control())
#base.mouseWatcherNode.setModifierButtons(mb)
def disableEdit(self):
self.unbind(DGG.B2PRESS)
self.unbind(DGG.B2RELEASE)
self.unbind(DGG.PRINT)
#mb = base.mouseWatcherNode.getModifierButtons()
#mb.removeButton(KeyboardButton.control())
#base.mouseWatcherNode.setModifierButtons(mb)
def editStart(self, event):
taskMgr.remove('guiEditTask')
vWidget2render2d = self.getPos(ShowBaseGlobal.render2d)
vMouse2render2d = Point3(event.getMouse()[0], 0, event.getMouse()[1])
editVec = Vec3(vWidget2render2d - vMouse2render2d)
if base.mouseWatcherNode.getModifierButtons().isDown(
KeyboardButton.control()):
t = taskMgr.add(self.guiScaleTask, 'guiEditTask')
t.refPos = vWidget2render2d
t.editVecLen = editVec.length()
t.initScale = self.getScale()
else:
t = taskMgr.add(self.guiDragTask, 'guiEditTask')
t.editVec = editVec
def guiScaleTask(self, state):
mwn = base.mouseWatcherNode
if mwn.hasMouse():
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
newEditVecLen = Vec3(state.refPos - vMouse2render2d).length()
self.setScale(state.initScale * (newEditVecLen/state.editVecLen))
return Task.cont
def guiDragTask(self, state):
mwn = base.mouseWatcherNode
if mwn.hasMouse():
vMouse2render2d = Point3(mwn.getMouse()[0], 0, mwn.getMouse()[1])
newPos = vMouse2render2d + state.editVec
self.setPos(ShowBaseGlobal.render2d, newPos)
if DirectGuiWidget.snapToGrid:
newPos = self.getPos()
newPos.set(
ROUND_TO(newPos[0], DirectGuiWidget.gridSpacing),
ROUND_TO(newPos[1], DirectGuiWidget.gridSpacing),
ROUND_TO(newPos[2], DirectGuiWidget.gridSpacing))
self.setPos(newPos)
return Task.cont
def editStop(self, event):
taskMgr.remove('guiEditTask')
def setState(self):
if isinstance(self['state'], int):
self.guiItem.setActive(self['state'])
elif self['state'] == DGG.NORMAL or self['state'] == 'normal':
self.guiItem.setActive(1)
else:
self.guiItem.setActive(0)
def resetFrameSize(self):
if not self.fInit:
self.setFrameSize(fClearFrame = 1)
def setFrameSize(self, fClearFrame = 0):
# Use ready state to determine frame Type
frameType = self.getFrameType()
if self['frameSize']:
# Use user specified bounds
self.bounds = self['frameSize']
#print "%s bounds = %s" % (self.getName(), self.bounds)
bw = (0, 0)
else:
if fClearFrame and frameType != PGFrameStyle.TNone:
self.frameStyle[0].setType(PGFrameStyle.TNone)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
# To force an update of the button
self.guiItem.getStateDef(0)
# Clear out frame before computing bounds
self.getBounds()
# Restore frame style if necessary
if frameType != PGFrameStyle.TNone:
self.frameStyle[0].setType(frameType)
self.guiItem.setFrameStyle(0, self.frameStyle[0])
if frameType != PGFrameStyle.TNone and \
frameType != PGFrameStyle.TFlat:
bw = self['borderWidth']
else:
bw = (0, 0)
# Set frame to new dimensions
self.guiItem.setFrame(
self.bounds[0] - bw[0],
self.bounds[1] + bw[0],
self.bounds[2] - bw[1],
self.bounds[3] + bw[1])
def getBounds(self, state = 0):
self.stateNodePath[state].calcTightBounds(self.ll, self.ur)
# Scale bounds to give a pad around graphics
vec_right = Vec3.right()
vec_up = Vec3.up()
left = (vec_right[0] * self.ll[0]
+ vec_right[1] * self.ll[1]
+ vec_right[2] * self.ll[2])
right = (vec_right[0] * self.ur[0]
+ vec_right[1] * self.ur[1]
+ vec_right[2] * self.ur[2])
bottom = (vec_up[0] * self.ll[0]
+ vec_up[1] * self.ll[1]
+ vec_up[2] * self.ll[2])
top = (vec_up[0] * self.ur[0]
+ vec_up[1] * self.ur[1]
+ vec_up[2] * self.ur[2])
self.ll = Point3(left, 0.0, bottom)
self.ur = Point3(right, 0.0, top)
self.bounds = [self.ll[0] - self['pad'][0],
self.ur[0] + self['pad'][0],
self.ll[2] - self['pad'][1],
self.ur[2] + self['pad'][1]]
return self.bounds
def getWidth(self):
return self.bounds[1] - self.bounds[0]
def getHeight(self):
return self.bounds[3] - self.bounds[2]
def getCenter(self):
x = self.bounds[0] + (self.bounds[1] - self.bounds[0])/2.0
y = self.bounds[2] + (self.bounds[3] - self.bounds[2])/2.0
return (x, y)
def getFrameType(self, state = 0):
return self.frameStyle[state].getType()
def updateFrameStyle(self):
if not self.fInit:
for i in range(self['numStates']):
self.guiItem.setFrameStyle(i, self.frameStyle[i])
def setRelief(self, fSetStyle = 1):
relief = self['relief']
# Convert None, and string arguments
if relief is None:
relief = PGFrameStyle.TNone
elif isinstance(relief, str):
# Convert string to frame style int
relief = DGG.FrameStyleDict[relief]
# Set style
if relief == DGG.RAISED:
for i in range(self['numStates']):
if i in self['invertedFrames']:
self.frameStyle[1].setType(DGG.SUNKEN)
else:
self.frameStyle[i].setType(DGG.RAISED)
elif relief == DGG.SUNKEN:
for i in range(self['numStates']):
if i in self['invertedFrames']:
self.frameStyle[1].setType(DGG.RAISED)
else:
self.frameStyle[i].setType(DGG.SUNKEN)
else:
for i in range(self['numStates']):
self.frameStyle[i].setType(relief)
# Apply styles
self.updateFrameStyle()
def setFrameColor(self):
# this might be a single color or a list of colors
colors = self['frameColor']
if isinstance(colors[0], (int, float)):
colors = (colors,)
for i in range(self['numStates']):
if i >= len(colors):
color = colors[-1]
else:
color = colors[i]
self.frameStyle[i].setColor(color[0], color[1], color[2], color[3])
self.updateFrameStyle()
def setFrameTexture(self):
# this might be a single texture or a list of textures
textures = self['frameTexture']
if textures is None or \
isinstance(textures, (Texture, str)):
textures = (textures,) * self['numStates']
for i in range(self['numStates']):
if i >= len(textures):
texture = textures[-1]
else:
texture = textures[i]
if isinstance(texture, str):
texture = base.loader.loadTexture(texture)
if texture:
self.frameStyle[i].setTexture(texture)
else:
self.frameStyle[i].clearTexture()
self.updateFrameStyle()
def setFrameVisibleScale(self):
scale = self['frameVisibleScale']
for i in range(self['numStates']):
self.frameStyle[i].setVisibleScale(scale[0], scale[1])
self.updateFrameStyle()
def setBorderWidth(self):
width = self['borderWidth']
for i in range(self['numStates']):
self.frameStyle[i].setWidth(width[0], width[1])
self.updateFrameStyle()
def setBorderUvWidth(self):
uvWidth = self['borderUvWidth']
for i in range(self['numStates']):
self.frameStyle[i].setUvWidth(uvWidth[0], uvWidth[1])
self.updateFrameStyle()
def destroy(self):
if hasattr(self, "frameStyle"):
if ShowBaseGlobal.__dev__:
if hasattr(ShowBase, 'guiItems'):
ShowBase.guiItems.pop(self.guiId, None)
# Destroy children
for child in self.getChildren():
childGui = self.guiDict.get(child.getName())
if childGui:
childGui.destroy()
else:
# RAU since we added the class to the name, try
# it with the original name
parts = child.getName().split('-')
simpleChildGui = self.guiDict.get(parts[-1])
if simpleChildGui:
simpleChildGui.destroy()
# messenger.send(DESTROY + child.getName())
del self.guiDict[self.guiId]
del self.frameStyle
# Get rid of node path
self.removeNode()
for nodePath in self.stateNodePath:
nodePath.removeNode()
del self.stateNodePath
del self.guiItem
# Call superclass destruction method (clears out hooks)
DirectGuiBase.destroy(self)
def printConfig(self, indent = 0):
space = ' ' * indent
print('%s%s - %s' % (space, self.guiId, self.__class__.__name__))
print('%sPos: %s' % (space, tuple(self.getPos())))
print('%sScale: %s' % (space, tuple(self.getScale())))
# Print out children info
for child in self.getChildren():
messenger.send(DGG.PRINT + child.getName(), [indent + 2])
def copyOptions(self, other):
"""
Copy other's options into our self so we look and feel like other
"""
for key, value in other._optionInfo.items():
self[key] = value[1]
def taskName(self, idString):
return idString + "-" + str(self.guiId)
def uniqueName(self, idString):
return idString + "-" + str(self.guiId)
def setProp(self, propString, value):
"""
Allows you to set a property like frame['text'] = 'Joe' in
a function instead of an assignment.
This is useful for setting properties inside function intervals
where must input a function and extraArgs, not an assignment.
"""
self[propString] = value
|
4675eb63dbbe904ffd9261ec934829869977bf7e
|
0760fb4901a75766921a205b55686d6d6f049b30
|
/python/ray/serve/tests/test_deployment_graph_config.py
|
2e0c80a5998ae1e90832d630ebf344668f01b79c
|
[
"MIT",
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
ray-project/ray
|
a4bb6940b08b59a61ef0b8e755a52d8563a2f867
|
edba68c3e7cf255d1d6479329f305adb7fa4c3ed
|
refs/heads/master
| 2023-08-31T03:36:48.164405
| 2023-08-31T03:20:38
| 2023-08-31T03:20:38
| 71,932,349
| 29,482
| 5,669
|
Apache-2.0
| 2023-09-14T21:48:14
| 2016-10-25T19:38:30
|
Python
|
UTF-8
|
Python
| false
| false
| 2,743
|
py
|
test_deployment_graph_config.py
|
import pytest
import os
import sys
from typing import Any
from ray import serve
from ray.serve.deployment_graph import RayServeDAGHandle
from ray.serve.dag import InputNode
from ray.serve._private.deployment_graph_build import build as pipeline_build
@serve.deployment(name="counter", num_replicas=2, user_config={"count": 123, "b": 2})
class Counter:
def __init__(self):
self.count = 10
def __call__(self, *args):
return self.count, os.getpid()
def reconfigure(self, config):
self.count = config["count"]
@serve.deployment
class Model:
def __init__(self, weight: int, ratio: float = None):
self.weight = weight
self.ratio = ratio or 1
def forward(self, input: int):
return self.ratio * self.weight * input
def __call__(self, request):
input_data = request
return self.ratio * self.weight * input_data
@serve.deployment
class Driver:
def __init__(self, dag: RayServeDAGHandle):
self.dag = dag
async def __call__(self, inp: Any) -> Any:
print(f"Driver got {inp}")
return await (await self.dag.remote(inp))
@serve.deployment
def combine(m1_output, m2_output, kwargs_output=0):
return m1_output + m2_output + kwargs_output
def test_deploment_options_func_class_with_class_method():
with InputNode() as dag_input:
counter = Counter.bind()
m1 = Model.options(name="m1", max_concurrent_queries=3).bind(1)
m2 = Model.options(name="m2", max_concurrent_queries=5).bind(2)
m1_output = m1.forward.bind(dag_input[0])
m2_output = m2.forward.bind(dag_input[1])
combine_output = combine.options(num_replicas=3, max_concurrent_queries=7).bind(
m1_output, m2_output, kwargs_output=dag_input[2]
)
dag = counter.__call__.bind(combine_output)
serve_dag = Driver.bind(dag)
deployments = pipeline_build(serve_dag)
hit_count = 0
for deployment in deployments:
if deployment.name == "counter":
assert deployment.num_replicas == 2
assert deployment.user_config == {"count": 123, "b": 2}
hit_count += 1
elif deployment.name == "m1":
assert deployment.max_concurrent_queries == 3
hit_count += 1
elif deployment.name == "m2":
assert deployment.max_concurrent_queries == 5
hit_count += 1
elif deployment.name == "combine":
assert deployment.num_replicas == 3
assert deployment.max_concurrent_queries == 7
hit_count += 1
assert hit_count == 4, "Not all deployments with expected name were found."
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
|
3aa4850b92b3b2db49e2cd808accc5cf05aca971
|
93a116442c782730ef774688b8f41a8c4bdc2a59
|
/lldbhelpers/iblog.py
|
11c139d4bbb0b4d70a4bbb6cfc48c74294e413e3
|
[
"MIT"
] |
permissive
|
keith/dotfiles
|
016dddc45d194bd3f2b3224b3d830c833b0c9175
|
7ca92029cd03905159fa6433de915666c340d491
|
refs/heads/main
| 2023-08-04T00:49:38.487154
| 2023-07-26T04:05:51
| 2023-07-26T04:05:51
| 5,795,829
| 226
| 29
|
MIT
| 2022-07-25T15:00:22
| 2012-09-13T14:26:33
|
Shell
|
UTF-8
|
Python
| false
| false
| 1,913
|
py
|
iblog.py
|
import lldb
import re
class SectionNotFound(Exception):
pass
def _extract_address_from(memory_string):
result = re.match(
r"^data found at location: (0x[0-9a-fA-F]+)$",
memory_string,
re.MULTILINE,
)
return result.group(1)
def _find_section(module, section_name, segment_name):
section = module.section[section_name]
if not section:
raise SectionNotFound()
try:
return next(x for x in section if x.GetName() == segment_name)
except StopIteration:
raise SectionNotFound()
def _output_for_command(debugger, command):
interpreter = debugger.GetCommandInterpreter()
result = lldb.SBCommandReturnObject()
interpreter.HandleCommand(command, result)
if result.Succeeded():
return str(result.GetOutput())
else:
return ""
def _add_breakpoint_for_string(debugger, target, section, string):
load_address = section.GetLoadAddress(target)
end_address = load_address + section.GetByteSize()
command = "memory find --count 1 --string '{}' {} {}".format(
string, hex(load_address), hex(end_address)
)
output = _output_for_command(debugger, command)
string_addr = _extract_address_from(output)
debugger.HandleCommand(
"br set --name NSLog --condition '(void *)[$arg1 cString] == {}'".format(
string_addr
)
)
@lldb.command()
def iblog(debugger, _ignored, context, result, _):
target = context.target
module = target.module["UIKit"]
section = _find_section(module, "__TEXT", "__cstring")
_add_breakpoint_for_string(
debugger,
target,
section,
'Could not load the "%@" image referenced from a nib in the bundle with identifier "%@"',
)
_add_breakpoint_for_string(
debugger,
target,
section,
"Unknown class %@ in Interface Builder file.\n",
)
|
9616d4add8e898369a8c1773989b22256172d5cf
|
d6a43cbb975c0d5dd2465d6f09c43767d35c121a
|
/tests/test_sql_utils.py
|
9fafcc61fe72e2d9bcfc56eaa9bd788ae75e9c10
|
[
"BSD-3-Clause"
] |
permissive
|
lemon24/reader
|
a76f5fd3f8dbf9d86e3627bbf9a60732414721cb
|
5e1682c9bfa36d341c03ab804adfb95cfc53f26e
|
refs/heads/master
| 2023-08-17T00:38:03.405077
| 2023-08-16T21:11:25
| 2023-08-16T21:11:25
| 115,272,183
| 349
| 24
|
BSD-3-Clause
| 2022-06-20T19:37:32
| 2017-12-24T15:36:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,724
|
py
|
test_sql_utils.py
|
from copy import deepcopy
from textwrap import dedent
import pytest
from reader._sql_utils import BaseQuery
from reader._sql_utils import Query
def test_query_simple():
query = BaseQuery().SELECT('select').FROM('from').JOIN('join').WHERE('where')
assert str(query) == dedent(
"""\
SELECT
select
FROM
from
JOIN
join
WHERE
where
"""
)
def test_query_complicated():
"""Test a complicated query:
* order between different (known) keywords does not matter
* arguments of repeated calls get appended, with the order preserved
* SELECT can receive 2-tuples
* WHERE and HAVING arguments are separated by AND
* JOIN arguments are separated by the keyword
* no-argument keywords have no effect
"""
query = (
BaseQuery()
.WHERE()
.OUTER_JOIN('outer join')
.JOIN('join')
.LIMIT('limit')
.JOIN()
.ORDER_BY('first', 'second')
.SELECT('one')
.HAVING('having')
.SELECT(('two', 'expr'))
.GROUP_BY('group by')
.FROM('from')
.SELECT('three', 'four')
.FROM('another from')
.WHERE('where')
.ORDER_BY('third')
.OUTER_JOIN('another outer join')
# this isn't technically valid
.WITH('first cte')
.GROUP_BY('another group by')
.HAVING('another having')
.WITH(('fancy', 'second cte'))
.JOIN('another join')
.WHERE('another where')
.NATURAL_JOIN('natural join')
.SELECT()
.SELECT()
)
assert str(query) == dedent(
"""\
WITH
(
first cte
),
fancy AS (
second cte
)
SELECT
one,
expr AS two,
three,
four
FROM
from,
another from
OUTER JOIN
outer join
JOIN
join
OUTER JOIN
another outer join
JOIN
another join
NATURAL JOIN
natural join
WHERE
where AND
another where
GROUP BY
group by,
another group by
HAVING
having AND
another having
ORDER BY
first,
second,
third
LIMIT
limit
"""
)
def test_query_flag():
query = BaseQuery().SELECT('one').SELECT('two').SELECT_DISTINCT()
assert str(query) == dedent(
"""\
SELECT DISTINCT
one,
two
"""
)
with pytest.raises(ValueError):
BaseQuery().SELECT_MAGIC('one')
def test_query_deepcopy():
deepcopy(Query())
def test_scrolling_window():
def make_query(cls=Query):
return cls().SELECT('select').FROM('from')
query = make_query()
query.scrolling_window_order_by()
assert str(query) == str(make_query(BaseQuery))
query = make_query()
query.scrolling_window_order_by('one')
assert str(query) == str(make_query(BaseQuery).ORDER_BY('one ASC'))
query = make_query()
query.LIMIT('limit')
assert str(query) == str(make_query(BaseQuery).LIMIT('limit'))
query = make_query()
query.scrolling_window_order_by('one')
query.LIMIT('limit')
assert str(query) == str(make_query(BaseQuery).ORDER_BY('one ASC').LIMIT('limit'))
query = make_query()
query.scrolling_window_order_by('one')
query.LIMIT('limit')
query.add_last([])
assert str(query) == str(
make_query(BaseQuery)
.WHERE(
"""
(
one
) > (
:last_0
)
"""
)
.ORDER_BY('one ASC')
.LIMIT('limit')
)
query = make_query()
query.scrolling_window_order_by('one', desc=True, keyword='HAVING')
query.LIMIT('limit')
query.add_last([])
assert str(query) == str(
make_query(BaseQuery)
.HAVING(
"""
(
one
) < (
:last_0
)
"""
)
.ORDER_BY('one DESC')
.LIMIT('limit')
)
def test_scrolling_window_last():
query = Query().SELECT()
query.scrolling_window_order_by()
assert query.extract_last([1, 2, 3]) == None
assert dict(query.add_last(None)) == {}
query = Query().SELECT('one', 'two', ('three', 'max(3)'))
query.scrolling_window_order_by('one', 'three')
assert query.extract_last([1, 2, 3]) == (1, 3)
assert dict(query.add_last([1, 3])) == {'last_0': 1, 'last_1': 3}
|
d4c7b3aeac7c08c1909d13fa6fdfa79162078b3a
|
b5ce6908490cfb8e6a1e1cbe4745d675122ddce0
|
/questions/maximum-difference-between-node-and-ancestor/Solution.py
|
871c09b00a785d24b2f547e3ab0510571a40feff
|
[
"MIT"
] |
permissive
|
franklingu/leetcode-solutions
|
8895910f13208e1d8e604100d84c2dd35684cde4
|
7ad7e5c1c040510b7b7bd225ed4297054464dbc6
|
refs/heads/master
| 2023-01-09T01:34:08.097518
| 2023-01-02T02:05:35
| 2023-01-02T02:05:35
| 43,345,677
| 155
| 66
|
MIT
| 2020-10-02T03:41:36
| 2015-09-29T04:54:38
|
Python
|
UTF-8
|
Python
| false
| false
| 1,562
|
py
|
Solution.py
|
"""
Given the root of a binary tree, find the maximum value v for which there exist different nodes a and b where v = |a.val - b.val| and a is an ancestor of b.
A node a is an ancestor of b if either: any child of a is equal to b or any child of a is an ancestor of b.
Example 1:
Input: root = [8,3,10,1,6,null,14,null,null,4,7,13]
Output: 7
Explanation: We have various ancestor-node differences, some of which are given below :
|8 - 3| = 5
|3 - 7| = 4
|8 - 1| = 7
|10 - 13| = 3
Among all possible differences, the maximum value of 7 is obtained by |8 - 1| = 7.
Example 2:
Input: root = [1,null,2,null,0,3]
Output: 3
Constraints:
The number of nodes in the tree is in the range [2, 5000].
0 <= Node.val <= 105
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxAncestorDiff(self, root: Optional[TreeNode]) -> int:
def maxDiffAndRange(node, mx, mn):
ret = 0
if node is None:
return ret
nmx, nmn = node.val, node.val
if mx is not None:
ret = max(ret, abs(mx - node.val))
nmx = max(mx, nmx)
if mn is not None:
ret = max(ret, abs(node.val - mn))
nmn = min(mn, nmn)
return max(ret, maxDiffAndRange(node.left, nmx, nmn), maxDiffAndRange(node.right, nmx, nmn))
return maxDiffAndRange(root, None, None)
|
24234dd23ae3eb71b736bea1c860cd650f23b213
|
4c6d31e726ee988ae652cbcc81c34fd95a279dc7
|
/parent/uav/src/main/python/pyspookystuff/uav/utils.py
|
8679755ebc8dfe4cdb6ac00511f4fa3f1f880e00
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
tribbloid/spookystuff
|
4198a9a469b4d648e7b73a4f2faab035fabd0b91
|
477dabeba07519a9c042bb1e20c35f6b6be253cc
|
refs/heads/master
| 2023-07-05T18:24:13.574276
| 2023-05-28T20:57:14
| 2023-06-11T17:59:52
| 20,600,369
| 120
| 42
|
Apache-2.0
| 2023-01-19T21:37:42
| 2014-06-07T18:55:20
|
Scala
|
UTF-8
|
Python
| false
| false
| 4,975
|
py
|
utils.py
|
from __future__ import print_function
import os
import math
import time
from dronekit import LocationGlobal, LocationGlobalRelative
from math import radians, cos, sin, asin, sqrt
earth_radius = 6378137.0 # Radius of "spherical" earth
DEVNULL = open(os.devnull, 'w')
def _groundDistance(lon1, lat1, lon2, lat2):
"""
haversine formula:
Calculate the great circle distance between two points
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])
# haversine formula
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * asin(sqrt(a))
return c * earth_radius
def groundDistance(p1, p2):
return _groundDistance(p1.lon, p1.lat, p2.lon, p2.lat)
# TODO: INACCURATE! not considering curvature
def airDistance(p1, p2):
# type: (LocationGlobal, LocationGlobal) -> (float, float, float)
haversine = groundDistance(p1, p2)
altDist = p2.alt - p1.alt
result = sqrt(haversine * haversine + altDist * altDist)
return result, haversine, abs(altDist)
def retry(maxTrial=3):
def decorate(fn):
def retryFn(*args, **kargs):
for i in range(maxTrial, -1, -1):
try:
result = fn(*args, **kargs)
return result
except BaseException as e:
if i <= 1:
print("All retrie(s) has failed", str(e), "...", str(i - 1), "time(s) left")
raise
else:
print("Retrying locally on", str(e), "...", str(i - 1), "time(s) left")
return retryFn
return decorate
def get_location_metres(original_location, dNorth, dEast):
"""
from http://python.dronekit.io/guide/copter/guided_mode.htmlhttp://python.dronekit.io/guide/copter/guided_mode.html
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the
specified `original_location`. The returned LocationGlobal has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
# Coordinate offsets in radians
dLat = dNorth / earth_radius
dLon = dEast / (earth_radius * math.cos(math.pi * original_location.lat / 180))
# New position in decimal degrees
newlat = original_location.lat + (dLat * 180 / math.pi)
newlon = original_location.lon + (dLon * 180 / math.pi)
if type(original_location) is LocationGlobal:
targetlocation = LocationGlobal(newlat, newlon, original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation = LocationGlobalRelative(newlat, newlon, original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation
def waitFor(condition, duration=60):
# type: (function, int) -> None
for i in range(1, duration):
v = condition(i)
try:
v = v[0]
comment = v[1]
except:
comment = ""
if v:
return
time.sleep(1)
if i % 10 == 0:
print("waiting for", condition.func_name, "\t|", i, "second(s)", comment)
raise os.error("timeout waiting for " + condition.func_name)
# not accurate! should use ground dist
# def get_distance_m(aLocation1, aLocation2):
# """
# Returns the ground distance in metres between two LocationGlobal objects.
#
# This method is an approximation, and will not be accurate over large distances and close to the
# earth's poles. It comes from the ArduPilot test code:
# https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
# """
# dlat = aLocation2.lat - aLocation1.lat
# dlong = aLocation2.lon - aLocation1.lon
# return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
#
# def get_distance_m(aLocation1, aLocation2):
# """
# Returns the ground distance in metres between two LocationGlobal objects.
#
# This method is an approximation, and will not be accurate over large distances and close to the
# earth's poles. It comes from the ArduPilot test code:
# https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
# """
# dlat = aLocation2.lat - aLocation1.lat
# dlong = aLocation2.lon - aLocation1.lon
# return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
|
3834e33d62fbb5370f81c7dcf21d30cd1b3ff75a
|
617f9a5c9ff8e710de7b43c031cdd97a671c68c3
|
/tests/features/test_neurite.py
|
90bea2a3b335113b15fba005885cd08dcdef2099
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
BlueBrain/NeuroM
|
e136fc4a36fe1b5d8e590179dc3d8a111d6e5282
|
34d202cfb95d32cd985a423b65349aa1b01957d6
|
refs/heads/master
| 2023-08-17T18:42:43.125992
| 2023-08-08T07:14:38
| 2023-08-08T07:14:38
| 34,906,350
| 106
| 75
|
BSD-3-Clause
| 2023-09-14T10:58:38
| 2015-05-01T14:21:43
|
Python
|
UTF-8
|
Python
| false
| false
| 9,400
|
py
|
test_neurite.py
|
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test features.neuritefunc."""
from math import pi, sqrt
from pathlib import Path
import neurom as nm
import numpy as np
import scipy
from mock import patch
from neurom.features import neurite, morphology
from neurom.geom import convex_hull
import pytest
from numpy.testing import assert_allclose
DATA_PATH = Path(__file__).parent.parent / 'data'
H5_PATH = DATA_PATH / 'h5/v1'
SWC_PATH = DATA_PATH / 'swc'
SIMPLE = nm.load_morphology(SWC_PATH / 'simple.swc')
NRN = nm.load_morphology(H5_PATH / 'Neuron.h5')
def test_number_of_bifurcations():
assert neurite.number_of_bifurcations(SIMPLE.neurites[0]) == 1
assert neurite.number_of_bifurcations(SIMPLE.neurites[1]) == 1
def test_number_of_forking_points():
assert neurite.number_of_forking_points(SIMPLE.neurites[0]) == 1
assert neurite.number_of_forking_points(SIMPLE.neurites[1]) == 1
def test_number_of_leaves():
assert neurite.number_of_leaves(SIMPLE.neurites[0]) == 2
assert neurite.number_of_leaves(SIMPLE.neurites[1]) == 2
def test_neurite_volume_density():
vol = np.array(morphology.total_volume_per_neurite(NRN))
hull_vol = np.array([convex_hull(n).volume for n in nm.iter_neurites(NRN)])
vol_density = [neurite.volume_density(s) for s in NRN.neurites]
assert len(vol_density) == 4
assert np.allclose(vol_density, vol / hull_vol)
ref_density = [0.43756606998299519, 0.52464681266899216,
0.24068543213643726, 0.26289304906104355]
assert_allclose(vol_density, ref_density)
def test_neurite_volume_density_failed_convex_hull():
flat_neuron = nm.load_morphology(
"""
1 1 0 0 0 0.5 -1
2 3 1 0 0 0.1 1
3 3 2 0 0 0.1 2
""",
reader="swc")
assert np.isnan(
neurite.volume_density(flat_neuron.neurites[0])
)
def test_terminal_path_length_per_neurite():
terminal_distances = [neurite.terminal_path_lengths(s) for s in SIMPLE.neurites]
assert terminal_distances == [[10, 11], [10, 9]]
def test_max_radial_distance():
assert_allclose([neurite.max_radial_distance(s) for s in SIMPLE.neurites],
[7.81025, 7.2111025])
def test_number_of_segments():
assert [neurite.number_of_segments(s) for s in SIMPLE.neurites] == [3, 3]
def test_number_of_sections():
assert [neurite.number_of_sections(s) for s in SIMPLE.neurites] == [3, 3]
def test_section_path_distances():
path_lengths = [neurite.section_path_distances(s) for s in SIMPLE.neurites]
assert path_lengths == [[5., 10., 11.], [4., 10., 9.]]
def test_section_term_lengths():
term_lengths = [neurite.section_term_lengths(s) for s in SIMPLE.neurites]
assert term_lengths == [[5., 6.], [6., 5.]]
def test_section_bif_lengths():
bif_lengths = [neurite.section_bif_lengths(s) for s in SIMPLE.neurites]
assert bif_lengths == [[5.], [4.]]
def test_section_end_distances():
end_dist = [neurite.section_end_distances(s) for s in SIMPLE.neurites]
assert end_dist == [[5.0, 5.0, 6.0], [4.0, 6.0, 5.0]]
def test_section_partition_pairs():
part_pairs = [neurite.partition_pairs(s) for s in SIMPLE.neurites]
assert part_pairs == [[(1.0, 1.0)], [(1.0, 1.0)]]
def test_section_bif_radial_distances():
bif_rads = [neurite.section_bif_radial_distances(s) for s in SIMPLE.neurites]
assert bif_rads == [[5.], [4.]]
def test_section_term_radial_distances():
trm_rads = [neurite.section_term_radial_distances(s) for s in SIMPLE.neurites]
assert_allclose(trm_rads, [[7.0710678118654755, 7.810249675906654], [7.211102550927978, 6.4031242374328485]])
def test_section_branch_orders():
branch_orders = [neurite.section_branch_orders(s) for s in SIMPLE.neurites]
assert_allclose(branch_orders, [[0, 1, 1], [0, 1, 1]])
def test_section_bif_branch_orders():
bif_branch_orders = [neurite.section_bif_branch_orders(s) for s in SIMPLE.neurites]
assert bif_branch_orders == [[0], [0]]
def test_section_term_branch_orders():
term_branch_orders = [neurite.section_term_branch_orders(s) for s in SIMPLE.neurites]
assert term_branch_orders == [[1, 1], [1, 1]]
def test_section_radial_distances():
radial_distances = [neurite.section_radial_distances(s) for s in SIMPLE.neurites]
assert_allclose(radial_distances,
[[5.0, sqrt(5**2 + 5**2), sqrt(6**2 + 5**2)],
[4.0, sqrt(6**2 + 4**2), sqrt(5**2 + 4**2)]])
def test_local_bifurcation_angles():
local_bif_angles = [neurite.local_bifurcation_angles(s) for s in SIMPLE.neurites]
assert_allclose(local_bif_angles, [[pi], [pi]])
def test_remote_bifurcation_angles():
remote_bif_angles = [neurite.remote_bifurcation_angles(s) for s in SIMPLE.neurites]
assert_allclose(remote_bif_angles, [[pi], [pi]])
def test_partition():
partition = [neurite.bifurcation_partitions(s) for s in SIMPLE.neurites]
assert_allclose(partition, [[1.0], [1.0]])
def test_partition_asymmetry():
partition = [neurite.partition_asymmetry(s) for s in SIMPLE.neurites]
assert_allclose(partition, [[0.0], [0.0]])
partition = [neurite.partition_asymmetry(s, variant='length') for s in SIMPLE.neurites]
assert_allclose(partition, [[0.0625], [0.06666666666666667]])
with pytest.raises(ValueError):
neurite.partition_asymmetry(SIMPLE, variant='invalid-variant')
with pytest.raises(ValueError):
neurite.partition_asymmetry(SIMPLE, method='invalid-method')
def test_segment_lengths():
segment_lengths = [neurite.segment_lengths(s) for s in SIMPLE.neurites]
assert_allclose(segment_lengths, [[5.0, 5.0, 6.0], [4.0, 6.0, 5.0]])
def test_segment_areas():
result = [neurite.segment_areas(s) for s in SIMPLE.neurites]
assert_allclose(result, [[31.415927, 16.019042, 19.109562], [25.132741, 19.109562, 16.019042]])
def test_segment_volumes():
expected = [[15.70796327, 5.23598776, 6.28318531], [12.56637061, 6.28318531, 5.23598776]]
result = [neurite.segment_volumes(s) for s in SIMPLE.neurites]
assert_allclose(result, expected)
def test_segment_midpoints():
midpoints = [neurite.segment_midpoints(s) for s in SIMPLE.neurites]
assert_allclose(midpoints,
[[[0., (5. + 0) / 2, 0.], # trunk type 2
[-2.5, 5., 0.],
[3., 5., 0.]],
[[0., (-4. + 0) / 2., 0.], # trunk type 3
[3., -4., 0.],
[-2.5, -4., 0.]]])
def test_segment_radial_distances():
"""midpoints on segments."""
radial_distances = [neurite.segment_radial_distances(s) for s in SIMPLE.neurites]
assert_allclose(radial_distances,
[[2.5, sqrt(2.5**2 + 5**2), sqrt(3**2 + 5**2)], [2.0, 5.0, sqrt(2.5**2 + 4**2)]])
def test_segment_path_lengths():
pathlengths = [neurite.segment_path_lengths(s) for s in SIMPLE.neurites]
assert_allclose(pathlengths, [[5., 10., 11.], [4., 10., 9.]])
pathlengths = neurite.segment_path_lengths(NRN.neurites[0])[:5]
assert_allclose(pathlengths, [0.1, 1.332525, 2.5301487, 3.267878, 4.471462])
def test_section_taper_rates():
assert_allclose(neurite.section_taper_rates(NRN.neurites[0])[:10],
[0.06776235492169848,
0.0588716599404923,
0.03791571485186163,
0.04674653812192691,
-0.026399800285566058,
-0.026547582897720887,
-0.045038414440432537,
0.02083822978267914,
-0.0027721371791201038,
0.0803069042861474],
atol=1e-4)
|
b1a9afa70bfc77545507bbc488493403b56225c1
|
c803d90920e31aa96a458ef217875ddd784cbd22
|
/bindings/python/py_src/tokenizers/implementations/byte_level_bpe.py
|
c7e3dbc466259795ed9d168f57d8fcabe947e96e
|
[
"Apache-2.0"
] |
permissive
|
huggingface/tokenizers
|
ae906154bf31d3aca9f8eee1a9a95e6e76e18a93
|
8e522a38d9721796f39d62fc67bfc8900a923de1
|
refs/heads/main
| 2023-09-04T06:26:20.217139
| 2023-08-29T11:15:26
| 2023-08-29T11:15:26
| 219,035,799
| 7,761
| 722
|
Apache-2.0
| 2023-09-07T14:25:38
| 2019-11-01T17:52:20
|
Rust
|
UTF-8
|
Python
| false
| false
| 4,289
|
py
|
byte_level_bpe.py
|
from typing import Dict, Iterator, List, Optional, Tuple, Union
from tokenizers import AddedToken, Tokenizer, decoders, pre_tokenizers, processors, trainers
from tokenizers.models import BPE
from tokenizers.normalizers import Lowercase, Sequence, unicode_normalizer_from_str
from .base_tokenizer import BaseTokenizer
class ByteLevelBPETokenizer(BaseTokenizer):
"""ByteLevelBPETokenizer
Represents a Byte-level BPE as introduced by OpenAI with their GPT-2 model
"""
def __init__(
self,
vocab: Optional[Union[str, Dict[str, int]]] = None,
merges: Optional[Union[str, Dict[Tuple[int, int], Tuple[int, int]]]] = None,
add_prefix_space: bool = False,
lowercase: bool = False,
dropout: Optional[float] = None,
unicode_normalizer: Optional[str] = None,
continuing_subword_prefix: Optional[str] = None,
end_of_word_suffix: Optional[str] = None,
trim_offsets: bool = False,
):
if vocab is not None and merges is not None:
tokenizer = Tokenizer(
BPE(
vocab,
merges,
dropout=dropout,
continuing_subword_prefix=continuing_subword_prefix or "",
end_of_word_suffix=end_of_word_suffix or "",
)
)
else:
tokenizer = Tokenizer(BPE())
# Check for Unicode normalization first (before everything else)
normalizers = []
if unicode_normalizer:
normalizers += [unicode_normalizer_from_str(unicode_normalizer)]
if lowercase:
normalizers += [Lowercase()]
# Create the normalizer structure
if len(normalizers) > 0:
if len(normalizers) > 1:
tokenizer.normalizer = Sequence(normalizers)
else:
tokenizer.normalizer = normalizers[0]
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=add_prefix_space)
tokenizer.decoder = decoders.ByteLevel()
tokenizer.post_processor = processors.ByteLevel(trim_offsets=trim_offsets)
parameters = {
"model": "ByteLevelBPE",
"add_prefix_space": add_prefix_space,
"lowercase": lowercase,
"dropout": dropout,
"unicode_normalizer": unicode_normalizer,
"continuing_subword_prefix": continuing_subword_prefix,
"end_of_word_suffix": end_of_word_suffix,
"trim_offsets": trim_offsets,
}
super().__init__(tokenizer, parameters)
@staticmethod
def from_file(vocab_filename: str, merges_filename: str, **kwargs):
vocab, merges = BPE.read_file(vocab_filename, merges_filename)
return ByteLevelBPETokenizer(vocab, merges, **kwargs)
def train(
self,
files: Union[str, List[str]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
):
"""Train the model using the given files"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
if isinstance(files, str):
files = [files]
self._tokenizer.train(files, trainer=trainer)
def train_from_iterator(
self,
iterator: Union[Iterator[str], Iterator[Iterator[str]]],
vocab_size: int = 30000,
min_frequency: int = 2,
show_progress: bool = True,
special_tokens: List[Union[str, AddedToken]] = [],
length: Optional[int] = None,
):
"""Train the model using the given iterator"""
trainer = trainers.BpeTrainer(
vocab_size=vocab_size,
min_frequency=min_frequency,
show_progress=show_progress,
special_tokens=special_tokens,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
)
self._tokenizer.train_from_iterator(
iterator,
trainer=trainer,
length=length,
)
|
516fb0ef4f35ea0f37c0a758e89bd649e33c627e
|
1634f33c5021e8465a695fb5244504e2eeeecff5
|
/kitsune/products/tests/test_templates.py
|
86eba4bffcc7b9fa6c8e150123d8b7753a109c8c
|
[] |
permissive
|
mozilla/kitsune
|
fee4b8598eb01f5b4add00f2f010b45e2a6ca901
|
67ec527bfc32c715bf9f29d5e01362c4903aebd2
|
refs/heads/main
| 2023-09-01T21:41:59.076570
| 2023-08-31T22:34:05
| 2023-08-31T22:34:05
| 489,645
| 1,218
| 697
|
BSD-3-Clause
| 2023-09-14T08:43:19
| 2010-01-26T18:53:57
|
Python
|
UTF-8
|
Python
| false
| false
| 6,908
|
py
|
test_templates.py
|
from django.conf import settings
from django.core.cache import cache
from pyquery import PyQuery as pq
from kitsune.products.models import HOT_TOPIC_SLUG
from kitsune.products.tests import ProductFactory, TopicFactory
from kitsune.questions.models import QuestionLocale
from kitsune.search.tests import Elastic7TestCase
from kitsune.sumo.urlresolvers import reverse
from kitsune.wiki.tests import ApprovedRevisionFactory, DocumentFactory, HelpfulVoteFactory
class ProductViewsTestCase(Elastic7TestCase):
search_tests = True
def test_products(self):
"""Verify that /products page renders products."""
# Create some products.
locale, _ = QuestionLocale.objects.get_or_create(locale=settings.LANGUAGE_CODE)
for i in range(3):
p = ProductFactory(visible=True)
p.questions_locales.add(locale)
# GET the products page and verify the content.
r = self.client.get(reverse("products"), follow=True)
self.assertEqual(200, r.status_code)
doc = pq(r.content)
self.assertEqual(3, len(doc(".card--product")))
def test_product_landing(self):
"""Verify that /products/<slug> page renders topics."""
# Create a product.
p = ProductFactory()
locale, _ = QuestionLocale.objects.get_or_create(locale=settings.LANGUAGE_CODE)
p.questions_locales.add(locale)
# Create some topics.
TopicFactory(slug=HOT_TOPIC_SLUG, product=p, visible=True)
topics = TopicFactory.create_batch(11, product=p, visible=True)
# Create a document and assign the product and 10 topics.
d = DocumentFactory(products=[p], topics=topics[:10])
ApprovedRevisionFactory(document=d)
# GET the product landing page and verify the content.
url = reverse("products.product", args=[p.slug])
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
doc = pq(r.content)
self.assertEqual(10, len(doc("div.card--topic")))
self.assertEqual(p.slug, doc("#support-search input[name=product]").attr["value"])
def test_firefox_product_landing(self):
"""Verify that there is no firefox button visible at header in the firefox landing page"""
p = ProductFactory(slug="firefox")
url = reverse("products.product", args=[p.slug])
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
doc = pq(r.content)
self.assertEqual(1, doc(".firefox-download-button.hidden").length)
def test_document_listing(self):
"""Verify /products/<product slug>/<topic slug> renders articles."""
# Create a topic and product.
p = ProductFactory()
t1 = TopicFactory(product=p)
# Create 3 documents with the topic and product and one without.
ApprovedRevisionFactory.create_batch(3, document__products=[p], document__topics=[t1])
ApprovedRevisionFactory()
# GET the page and verify the content.
url = reverse("products.documents", args=[p.slug, t1.slug])
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
doc = pq(r.content)
self.assertEqual(3, len(doc("#document-list > section > article")))
self.assertEqual(p.slug, doc("#support-search input[name=product]").attr["value"])
def test_document_listing_order(self):
"""Verify documents are sorted by display_order and number of helpful votes."""
# Create topic, product and documents.
p = ProductFactory()
t = TopicFactory(product=p)
docs = []
# FIXME: Can't we do this with create_batch and build the document
# in the approvedrevisionfactory
for i in range(3):
doc = DocumentFactory(products=[p], topics=[t])
ApprovedRevisionFactory(document=doc)
docs.append(doc)
# Add a lower display order to the second document. It should be first now.
docs[1].display_order = 0
docs[1].save()
url = reverse("products.documents", args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
doc = pq(r.content)
self.assertEqual(
doc("#document-list > section > article:first-child > div > h2 > a").text(),
docs[1].title,
)
# Add a helpful vote to the third document. It should be second now.
rev = docs[2].current_revision
HelpfulVoteFactory(revision=rev, helpful=True)
docs[2].save() # Votes don't trigger a reindex.
cache.clear() # documents_for() is cached
url = reverse("products.documents", args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
doc = pq(r.content)
self.assertEqual(
doc("#document-list > section > article:nth-child(2) > div > h2> a").text(),
docs[2].title,
)
# Add 2 helpful votes the first document. It should be second now.
rev = docs[0].current_revision
HelpfulVoteFactory(revision=rev, helpful=True)
HelpfulVoteFactory(revision=rev, helpful=True)
docs[0].save() # Votes don't trigger a reindex.
cache.clear() # documents_for() is cached
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
doc = pq(r.content)
self.assertEqual(
doc("#document-list > section > article:nth-child(2) > div > h2").text(), docs[0].title
)
def test_subtopics(self):
"""Verifies subtopics appear on document listing page."""
# Create a topic and product.
p = ProductFactory()
t = TopicFactory(product=p, visible=True)
# Create a documents with the topic and product
doc = DocumentFactory(products=[p], topics=[t])
ApprovedRevisionFactory(document=doc)
# GET the page and verify no subtopics yet.
url = reverse("products.documents", args=[p.slug, t.slug])
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
pqdoc = pq(r.content)
self.assertEqual(1, len(pqdoc(".subtopics>li")))
# Create a subtopic, it still shouldn't show up because no
# articles are assigned.
subtopic = TopicFactory(parent=t, product=p, visible=True)
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
pqdoc = pq(r.content)
self.assertEqual(1, len(pqdoc(".subtopics>li")))
# Add a document to the subtopic, now it should appear.
doc.topics.add(subtopic)
r = self.client.get(url, follow=True)
self.assertEqual(200, r.status_code)
pqdoc = pq(r.content)
self.assertEqual(2, len(pqdoc(".subtopics>li")))
|
e83cf13803cbef6d5421ddf9e56f1f28fa1e4c73
|
b50df8a902f4e2c1ecd8667b7b97937da3371caf
|
/Parte002/ex1030_hora_actual_actualizada_por_segundo.py
|
7bbfff7c36f5b217fe1c2b4f5793194ccf091efd
|
[] |
no_license
|
Fhernd/PythonEjercicios
|
5a5633855979baec89a3c257eb57aac076a7465f
|
204d3d59ddeed6cbf263b23f14e950c20f81f608
|
refs/heads/master
| 2021-11-23T00:26:28.861302
| 2021-10-14T16:50:27
| 2021-10-14T16:50:27
| 230,629,743
| 124
| 84
| null | 2021-09-15T18:45:25
| 2019-12-28T15:45:28
|
Python
|
UTF-8
|
Python
| false
| false
| 205
|
py
|
ex1030_hora_actual_actualizada_por_segundo.py
|
# Ejercicio 1030: Usar el módulo time para mostrar la hora actual actualizada cada segundo.
import time
while True:
hora_actual = time.strftime('%H:%M:%S')
print(hora_actual)
time.sleep(1)
|
23c48116934763785d3daa1374d1b0e67deca3db
|
5111b0c881c8d86705f2b237e14024396e34091a
|
/accounting_pdf_reports/__init__.py
|
ceea55ab52ece56c3b000518b2009c6c0efe8e04
|
[] |
no_license
|
odoomates/odooapps
|
a22fa15346694563733008c42549ebc0da7fc9f6
|
68061b6fa79818d17727ef620e28fff44b48df72
|
refs/heads/16.0
| 2023-08-11T15:25:28.508718
| 2023-08-10T17:58:45
| 2023-08-10T17:58:45
| 173,598,986
| 182
| 306
| null | 2023-08-10T17:58:46
| 2019-03-03T16:20:23
|
Python
|
UTF-8
|
Python
| false
| false
| 219
|
py
|
__init__.py
|
# -*- coding: utf-8 -*-
from . import wizard
from . import models
from . import report
def _pre_init_clean_m2m_models(cr):
cr.execute("""DROP TABLE IF EXISTS account_journal_account_report_partner_ledger_rel""")
|
8532e46cc7f81fc59739f2cf405548f1891b36f7
|
6c3d132dc176c6bec03482318753e8c2dca6e5f9
|
/test/test_remove_object.py
|
d4b454186c444dcde026e2f78430443fc24e372b
|
[
"MIT"
] |
permissive
|
dflook/python-minifier
|
c396952f2c5ad0a2743b4d7610375f27a506a4c8
|
22513c33b3638e42802e2a2062ec800ff7aed3f6
|
refs/heads/main
| 2023-06-07T07:28:38.829374
| 2023-05-09T19:54:28
| 2023-05-09T19:54:28
| 137,584,140
| 482
| 42
|
MIT
| 2023-09-02T17:19:49
| 2018-06-16T13:53:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,482
|
py
|
test_remove_object.py
|
import ast
import pytest
import sys
from python_minifier.transforms.remove_object_base import RemoveObject
from python_minifier.ast_compare import compare_ast
def test_remove_object_py3():
if sys.version_info < (3, 0):
pytest.skip('This test is python3 only')
source = '''
class Test(object):
pass
'''
expected = '''
class Test:
pass
'''
expected_ast = ast.parse(expected)
actual_ast = RemoveObject()(ast.parse(source))
compare_ast(expected_ast, actual_ast)
source = '''
class Test(another_base, object, third_base):
pass
'''
expected = '''
class Test(another_base, third_base):
pass
'''
expected_ast = ast.parse(expected)
actual_ast = RemoveObject()(ast.parse(source))
compare_ast(expected_ast, actual_ast)
expected_ast = ast.parse(expected)
actual_ast = RemoveObject()(ast.parse(source))
compare_ast(expected_ast, actual_ast)
source = '''
class Test(other_base):
pass
'''
expected = source
expected_ast = ast.parse(expected)
actual_ast = RemoveObject()(ast.parse(source))
compare_ast(expected_ast, actual_ast)
def test_no_remove_object_py2():
if sys.version_info >= (3, 0):
pytest.skip('This test is python2 only')
source = '''
class Test(object):
pass
'''
expected = '''
class Test(object):
pass
'''
expected_ast = ast.parse(expected)
actual_ast = RemoveObject()(ast.parse(source))
compare_ast(expected_ast, actual_ast)
|
fa1f15cba63bc385ff8f93828d963119ae92a774
|
b39b31269db70d72d57959feaa0b063eb2b37e12
|
/test/framework/functional/base_functional_test_case.py
|
85bd6a187eee061ae7c31aba28daba9ce4edc121
|
[
"Apache-2.0"
] |
permissive
|
box/ClusterRunner
|
477a1d1fa02d5cb5819ccdea891ea73ced347ffd
|
55d18016f2c7d2dbb8aec5879459cae654edb045
|
refs/heads/master
| 2023-08-16T21:23:11.232430
| 2022-02-08T16:28:01
| 2022-02-09T06:40:51
| 25,371,175
| 168
| 45
|
Apache-2.0
| 2022-10-15T03:04:49
| 2014-10-17T18:20:39
|
Python
|
UTF-8
|
Python
| false
| false
| 9,844
|
py
|
base_functional_test_case.py
|
from contextlib import suppress
import http.client
import os
import tempfile
from unittest import TestCase
from app.common.build_artifact import BuildArtifact
from app.master.slave import SlaveRegistry
from app.util import fs, log
from app.util.conf.base_config_loader import BaseConfigLoader
from app.util.conf.configuration import Configuration
from app.util.process_utils import is_windows
from app.util.network import Network
from app.util.secret import Secret
from app.util.url_builder import UrlBuilder
from test.framework.functional.fs_item import Directory
from test.framework.functional.functional_test_cluster import FunctionalTestCluster, TestClusterTimeoutError
class BaseFunctionalTestCase(TestCase):
"""
This is the base class for all functional tests. This class has two main purposes:
- Make available a `FunctionalTestCluster` object for use in functional tests (self.cluster)
- Implement any helper assertion methods that might be useful for making our tests easier to read and write
"""
def setUp(self):
# Configure logging to go to stdout. This makes debugging easier by allowing us to see logs for failed tests.
log.configure_logging('DEBUG')
self._reset_config()
Secret.set('testsecret')
SlaveRegistry.reset_singleton()
self.cluster = FunctionalTestCluster(verbose=self._get_test_verbosity())
self._network = Network()
def _reset_config(self):
Configuration.reset_singleton()
config = Configuration.singleton()
conf_loader = BaseConfigLoader()
conf_loader.configure_defaults(config)
conf_loader.configure_postload(config)
def tearDown(self):
# Give the cluster a bit of extra time to finish working (before forcefully killing it and failing the test)
with suppress(TestClusterTimeoutError):
self.cluster.block_until_build_queue_empty(timeout=5)
# Kill processes and make sure all processes exited with 0 exit code
services = self.cluster.kill()
# only check the exit code if not on Windows as Popen.terminate kills the process on Windows and the exit
# code is not zero.
# TODO: remove the is_windows() check after we can handle exit on Windows gracefully.
if not is_windows():
for service in services:
self.assertEqual(
service.return_code,
0,
'Service running on url: {} should exit with code 0, but exited with code {}.'.format(
service.url,
service.return_code,
),
)
# Remove the temp dir. This will delete the log files, so should be run after cluster shuts down.
self.cluster.master_app_base_dir.cleanup()
[slave_app_base_dir.cleanup() for slave_app_base_dir in self.cluster.slaves_app_base_dirs]
def _get_test_verbosity(self):
"""
Get test verbosity from an env variable. We need to use an env var since Nose does not support specifying
command-line test configuration natively. (But if we need more of these configuration paramaters, we should
instead look at the 'nose-testconfig' plugin instead of adding tons of environment variables.)
:return: Whether or not tests should be run verbosely
:rtype: bool
"""
is_verbose = os.getenv('CR_VERBOSE') not in ('0', '', None) # default value of is_verbose is False
return is_verbose
def assert_build_status_contains_expected_data(self, build_id, expected_data):
"""
Assert that the build status endpoint contains the expected fields and values. This assertion does an API
request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
:param expected_data: A dict of expected keys and values in the build status response
:type expected_data: dict
"""
build_status = self.cluster.master_api_client.get_build_status(build_id).get('build')
self.assertIsInstance(build_status, dict, 'Build status API request should return a dict.')
self.assertDictContainsSubset(expected_data, build_status,
'Build status API response should contain the expected status data.')
def assert_build_has_successful_status(self, build_id):
"""
Assert that the build status endpoint contains fields signifying the build was successful (had no failures).
This assertion does an API request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
"""
expected_successful_build_params = {
'result': 'NO_FAILURES',
'status': 'FINISHED',
}
self.assert_build_status_contains_expected_data(build_id, expected_successful_build_params)
def assert_build_has_failure_status(self, build_id):
"""
Assert that the build status endpoint contains fields signifying the build was failed. This assertion does an
API request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
"""
expected_failure_build_params = {
'result': 'FAILURE',
'status': 'FINISHED',
}
self.assert_build_status_contains_expected_data(build_id, expected_failure_build_params)
def assert_build_has_canceled_status(self, build_id):
"""
Assert that the build status endpoint contains fields signifying the build was failed. This assertion does an
API request to the master service of self.cluster.
:param build_id: The id of the build whose status to check
:type build_id: int
"""
expected_failure_build_params = {
'result': 'FAILURE',
'status': 'CANCELED',
}
self.assert_build_status_contains_expected_data(build_id, expected_failure_build_params)
def assert_build_artifact_contents_match_expected(self, master_api, build_id, expected_build_artifact_contents):
"""
Assert that artifact files for this build have the expected contents.
:type master_api: app.util.url_builder.UrlBuilder
:param build_id: The id of the build whose artifacts to check
:type build_id: int
:param expected_build_artifact_contents: A list of FSItems corresponding to the expected artifact dir contents
:type expected_build_artifact_contents: list[FSItem]
"""
with tempfile.TemporaryDirectory() as build_artifacts_dir_path:
self._download_and_extract_zip_results(master_api, build_id, build_artifacts_dir_path)
self.assert_directory_contents_match_expected(build_artifacts_dir_path, expected_build_artifact_contents)
# Also check the tar archive even though it is deprecated.
with tempfile.TemporaryDirectory() as build_artifacts_dir_path:
self._download_and_extract_tar_results(master_api, build_id, build_artifacts_dir_path)
self.assert_directory_contents_match_expected(build_artifacts_dir_path, expected_build_artifact_contents)
def assert_directory_contents_match_expected(self, dir_path, expected_dir_contents):
"""
Assert that the specified directory has the expected contents.
:param dir_path: The path of the directory whose artifacts to check
:type dir_path: string
:param expected_dir_contents: A list of FSItems corresponding to the expected directory contents
:type expected_dir_contents: list[FSItem]
"""
if expected_dir_contents is not None:
dir_path = os.path.abspath(dir_path) # converts path to absolute, removes trailing slash if present
expected_dir_name = os.path.basename(dir_path)
expected_build_artifacts = Directory(expected_dir_name, expected_dir_contents)
expected_build_artifacts.assert_matches_path(dir_path, allow_extra_items=False)
def _download_and_extract_tar_results(self, master_api, build_id, download_dir):
"""
:type master_api: app.util.url_builder.UrlBuilder
:type build_id: int
:type download_dir: str
"""
download_artifacts_url = master_api.url('build', build_id, 'result')
download_filepath = os.path.join(download_dir, BuildArtifact.ARTIFACT_TARFILE_NAME)
response = self._network.get(download_artifacts_url)
if response.status_code == http.client.OK:
# save tar file to disk, decompress, and delete
with open(download_filepath, 'wb') as file:
chunk_size = 500 * 1024
for chunk in response.iter_content(chunk_size):
file.write(chunk)
fs.extract_tar(download_filepath, delete=True)
def _download_and_extract_zip_results(self, master_api: UrlBuilder, build_id: int, download_dir: str):
"""Download the artifacts.zip from the master and extract it."""
download_artifacts_url = master_api.url('build', build_id, 'artifacts.zip')
download_filepath = os.path.join(download_dir, BuildArtifact.ARTIFACT_ZIPFILE_NAME)
response = self._network.get(download_artifacts_url)
if response.status_code == http.client.OK:
# save file to disk, decompress, and delete
with open(download_filepath, 'wb') as file:
chunk_size = 500 * 1024
for chunk in response.iter_content(chunk_size):
file.write(chunk)
fs.unzip_directory(download_filepath, delete=True)
|
d4358ffbc42101443b0645641608822050abe368
|
73a0f661f1423d63e86489d4b2673f0103698aab
|
/python/oneflow/test/modules/test_higher_derivative_conv.py
|
05c5f5592749452b9544283050a1ef7576247881
|
[
"Apache-2.0"
] |
permissive
|
Oneflow-Inc/oneflow
|
4fc3e081e45db0242a465c4330d8bcc8b21ee924
|
0aab78ea24d4b1c784c30c57d33ec69fe5605e4a
|
refs/heads/master
| 2023-08-25T16:58:30.576596
| 2023-08-22T14:15:46
| 2023-08-22T14:15:46
| 81,634,683
| 5,495
| 786
|
Apache-2.0
| 2023-09-14T09:44:31
| 2017-02-11T06:09:53
|
C++
|
UTF-8
|
Python
| false
| false
| 4,759
|
py
|
test_higher_derivative_conv.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
import torch as pytorch_origin
import oneflow as oneflow_origin
def _test_convnd_grad_grad_impl(test_case, ndim, rtol=1e-4, atol=1e-5):
minibatch = np.random.randint(1, 5)
groups = np.random.randint(1, 5)
in_channels = np.random.randint(1, 5) * groups
out_channels = in_channels * np.random.randint(1, 5)
padding = np.random.randint(1, 3)
stride = np.random.randint(1, 3)
dilation = np.random.randint(1, 3)
x_shape = [minibatch, in_channels] + [np.random.randint(8, 12) for i in range(ndim)]
w_shape = [out_channels, in_channels // groups] + [
np.random.randint(2, 5) for i in range(ndim)
]
x = random_tensor(len(x_shape), *x_shape)
w = random_tensor(len(w_shape), *w_shape)
init_grad_x = random_tensor(len(x_shape), *x_shape)
init_grad_w = random_tensor(len(w_shape), *w_shape)
y = eval(f"torch.nn.functional.conv{ndim}d")(
x, w, stride=stride, padding=padding, groups=groups, dilation=dilation
)
init_grad_y = random_tensor(len(y.oneflow.shape), *y.oneflow.shape)
dx = torch.autograd.grad(
outputs=y,
inputs=x,
grad_outputs=init_grad_y,
create_graph=True,
retain_graph=True,
)[0]
test_case.assertTrue(
np.allclose(
dx.pytorch.detach().cpu().numpy(),
dx.oneflow.detach().numpy(),
rtol=rtol,
atol=atol,
)
)
dw = torch.autograd.grad(
outputs=y,
inputs=w,
grad_outputs=init_grad_y,
create_graph=True,
retain_graph=True,
)[0]
test_case.assertTrue(
np.allclose(
dw.pytorch.detach().cpu().numpy(),
dw.oneflow.detach().numpy(),
rtol=rtol,
atol=atol,
)
)
# torch.autograd.grad in autotest does not support inputs/outpus/grad_outputs as a list
# so use the original pytorch/oneflow module
ddx_pytorch, ddw_pytorch = pytorch_origin.autograd.grad(
outputs=[dx.pytorch, dw.pytorch],
inputs=[x.pytorch, w.pytorch],
grad_outputs=[init_grad_x.pytorch, init_grad_w.pytorch],
create_graph=True,
retain_graph=True,
)
ddx_oneflow, ddw_oneflow = oneflow_origin.autograd.grad(
outputs=[dx.oneflow, dw.oneflow],
inputs=[x.oneflow, w.oneflow],
grad_outputs=[init_grad_x.oneflow, init_grad_w.oneflow],
create_graph=True,
retain_graph=True,
)
test_case.assertTrue(
np.allclose(
ddw_pytorch.detach().cpu().numpy(),
ddw_oneflow.detach().numpy(),
rtol=rtol,
atol=atol,
)
)
test_case.assertTrue(
np.allclose(
ddx_pytorch.detach().cpu().numpy(),
ddx_oneflow.detach().numpy(),
rtol=rtol,
atol=atol,
)
)
dgrad_dx = torch.autograd.grad(
outputs=dx,
inputs=init_grad_y,
grad_outputs=init_grad_x,
create_graph=True,
retain_graph=True,
)[0]
test_case.assertTrue(
np.allclose(
dgrad_dx.pytorch.detach().cpu().numpy(),
dgrad_dx.oneflow.detach().numpy(),
rtol=rtol,
atol=atol,
)
)
dgrad_dw = torch.autograd.grad(
outputs=dw,
inputs=init_grad_y,
grad_outputs=init_grad_w,
create_graph=True,
retain_graph=True,
)[0]
test_case.assertTrue(
np.allclose(
dgrad_dw.pytorch.detach().cpu().numpy(),
dgrad_dw.oneflow.detach().numpy(),
rtol=rtol,
atol=atol,
)
)
class TestConvHigherDerivative(flow.unittest.TestCase):
def test_conv1d_grad_grad(test_case):
_test_convnd_grad_grad_impl(test_case, 1)
def test_conv2d_grad_grad(test_case):
_test_convnd_grad_grad_impl(test_case, 2)
def test_conv3d_grad_grad(test_case):
_test_convnd_grad_grad_impl(test_case, 3, atol=1e-3)
if __name__ == "__main__":
unittest.main()
|
38c5536f1157a86e87b82ed7a89a3c8c6136564f
|
0fd8922e6b9c6ed20a9c89fb2887056aad16b5e6
|
/examples/bidirect.py
|
e2bbdb0a501dcbfb33a5927879a83e735f1be3ce
|
[
"MIT"
] |
permissive
|
philipperemy/cond_rnn
|
52b1fbe4bdf44927d73b7de31b68f5b3ff69b778
|
fd6b2c33f0d961b6bab78255a950deb4e51b87b9
|
refs/heads/master
| 2023-08-16T14:14:34.440172
| 2023-08-08T04:49:03
| 2023-08-08T04:49:03
| 191,509,198
| 219
| 37
|
MIT
| 2023-08-07T08:05:53
| 2019-06-12T06:15:04
|
Python
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
bidirect.py
|
import numpy as np
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Bidirectional, GRU
from cond_rnn import ConditionalRecurrent
model = Sequential()
forward_layer = ConditionalRecurrent(GRU(units=12, return_sequences=True))
backward_layer = ConditionalRecurrent(GRU(units=13, return_sequences=True, go_backwards=True))
# concat mode.
model.add(Bidirectional(
layer=forward_layer,
backward_layer=backward_layer
))
model.compile(loss='categorical_crossentropy')
NUM_SAMPLES = 100
TIME_STEPS = 10
INPUT_DIM = 3
NUM_CLASSES = 2
train_inputs = np.random.uniform(size=(NUM_SAMPLES, TIME_STEPS, INPUT_DIM))
train_targets = np.zeros(shape=[NUM_SAMPLES, NUM_CLASSES])
assert model.predict(x=[train_inputs, train_targets]).shape == (NUM_SAMPLES, 10, 12 + 13)
|
e67917fe2f1dc9f3fa8c8dc0e4dc44d594c284a1
|
c6759b857e55991fea3ef0b465dbcee53fa38714
|
/tools/nntool/nntool/generation/generators/general/dsp_generators.py
|
566dffb5c41bdd112f50dedeb1d6eebc9ceaabf5
|
[
"Apache-2.0",
"AGPL-3.0-or-later",
"AGPL-3.0-only",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft"
] |
permissive
|
GreenWaves-Technologies/gap_sdk
|
1b343bba97b7a5ce62a24162bd72eef5cc67e269
|
3fea306d52ee33f923f2423c5a75d9eb1c07e904
|
refs/heads/master
| 2023-09-01T14:38:34.270427
| 2023-08-10T09:04:44
| 2023-08-10T09:04:44
| 133,324,605
| 145
| 96
|
Apache-2.0
| 2023-08-27T19:03:52
| 2018-05-14T07:50:29
|
C
|
UTF-8
|
Python
| false
| false
| 7,634
|
py
|
dsp_generators.py
|
# Copyright (C) 2020 GreenWaves Technologies, SAS
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import numpy as np
from nntool.graph.types.dsp_preprocessing import MFCCPreprocessingNode
import logging
from nntool.generation.at_types.gen_ctrl import GenCtrl
from nntool.generation.bindings import (CommentBindingList, GNodeArgEdge,
GNodeArgNode, Imm, NodeBindingList)
from nntool.generation.generators.general.autotiler_kernel import NewAutoTilerKernel
from nntool.generation.generators.generator_base import GeneratorBase, paramstype
from nntool.generation.generators.helpers.in_out_bindings_mixin import \
InOutBindingsMixin
from nntool.graph.types import RFFT2DPreprocessingNode
from bfloat16 import bfloat16
LOG = logging.getLogger(__name__)
DSP_DTYPE = {
np.int16: "FIX16",
np.int32: "FIX16",
np.uint32: "FIX16",
bfloat16: "FLOAT16",
np.float16: "FLOAT16",
np.float32: "FLOAT32",
}
@paramstype(RFFT2DPreprocessingNode)
class GenRfftParameters(GeneratorBase):
@classmethod
def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
return True
@classmethod
def bindings_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
gen.bindings.append(
CommentBindingList(
f"Node {cname} input_q {qrec.in_qs[0].q} window_q {qrec.in_qs[1].q} fft_twiddles_q {qrec.in_qs[2].q} rfft_twiddles {qrec.in_qs[4].q} -> out_q {qrec.out_qs[0]}")
)
binding_list = [
GNodeArgEdge(in_eparams[0]),
GNodeArgEdge(out_eparams[0], "GNA_OUT"),
GNodeArgEdge(in_eparams[2]),
GNodeArgEdge(in_eparams[4]),
GNodeArgEdge(in_eparams[3])
]
if node.win_fn is not None:
binding_list.append(GNodeArgEdge(in_eparams[1]))
gen.bindings.append(NodeBindingList(cname, *binding_list))
return True
@classmethod
def kernel_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
del in_eparams, out_eparams
gen.kernels.append(RFFTKernel(cname, node, qrec))
return True
class RFFTKernel(NewAutoTilerKernel):
CALL_TEMPLATE = '''
// generator for {node_name}: N_FRAMES={n_frames}, FRAME_SIZE={frame_size}, FRAME_STEP={frame_stride}, N_FFT={n_fft}, PREEMP_FACTOR={preemp_factor}, SKIP_PREEMP={skip_preemp}, NO_WIN={no_window}, OUT_FFT={out_fft}, SPECT_MAGSQUARED={magsquared}, DTYPE={data_type}
RFFT_2D_Generator("{cname}", {gen_ctrl}, {n_frames}, {frame_size}, {frame_stride}, {n_fft}, {preemp_factor}, {skip_preemp}, {no_window}, {out_fft}, {magsquared}, {data_type});
'''
def __init__(self, cname, params, qrec, gen_ctrl=None):
if gen_ctrl is None:
gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
if qrec.out_qs[0].is_floating:
gen_ctrl.float_dump = 1
attrs = {
'n_frames': params.n_frames,
'frame_size': params.frame_size,
'frame_stride': params.frame_step,
'n_fft': params.n_fft,
'preemp_factor': params.preemp_factor,
'skip_preemp': 0,
'no_window': int(params.win_fn is None),
'out_fft': 0,
'magsquared': int(params.magsquared),
'data_type': DSP_DTYPE[qrec.out_qs[0].dtype],
}
# other attributes
extra_attrs = {
'cname': cname,
'node_name': params.name
}
super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
@paramstype(MFCCPreprocessingNode)
class GenMfccParameters(GeneratorBase):
@classmethod
def globals_generator(cls, gen, node, qrec, pnode, fnode) -> bool:
return True
@classmethod
def bindings_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
gen.bindings.append(
CommentBindingList(
f"Node {cname} input_q {qrec.in_qs[0].q} window_q {qrec.in_qs[1].q} fft_twiddles_q {qrec.in_qs[2].q} rfft_twiddles {qrec.in_qs[4].q} -> out_q {qrec.out_qs[0]}")
)
binding_list = [
GNodeArgEdge(in_eparams[0]),
GNodeArgEdge(out_eparams[0], "GNA_OUT"),
GNodeArgEdge(in_eparams[2]),
GNodeArgEdge(in_eparams[4]),
GNodeArgEdge(in_eparams[3]),
GNodeArgEdge(in_eparams[5]),
GNodeArgEdge(in_eparams[6])
]
if node.win_fn is not None:
binding_list.insert(5, GNodeArgEdge(in_eparams[1]))
if not qrec.out_qs[0].dtype in [np.float16, np.float32, bfloat16]:
binding_list.append(Imm(node.quant_norm))
if node.n_dct:
binding_list.append(GNodeArgEdge(in_eparams[7]))
gen.bindings.append(NodeBindingList(cname, *binding_list))
return True
@classmethod
def kernel_generator(cls, gen, node, qrec, in_eparams, out_eparams, cname) -> bool:
del in_eparams, out_eparams
gen.kernels.append(MFCCKernel(cname, node, qrec))
return True
class MFCCKernel(NewAutoTilerKernel):
CALL_TEMPLATE = '''
// generator for {node_name}: N_FRAMES={n_frames}, FRAME_SIZE={frame_size}, FRAME_STEP={frame_stride}, N_FFT={n_fft}, N_MELBANKS={n_melbanks}, SIZE_MEL_COEFF={size_mel_coeff}, N_DCT={n_dct}, PREEMP_FACTOR={preemp_factor}, NO_WIN={no_window}, LIFTER_COEFF={lift_coeff}, SPECT_MAGSQUARED={magsquared}, DTYPE={data_type}, LOG_TYPE={log_type}, OUT_FFT={out_fft}
MFCC_Generator("{cname}", {gen_ctrl}, {n_frames}, {frame_size}, {frame_stride}, {n_fft}, {n_melbanks}, {size_mel_coeff}, {n_dct}, {preemp_factor}, {no_window}, {lift_coeff}, {magsquared}, {data_type}, {log_type}, {out_fft});
'''
def __init__(self, cname, params, qrec, gen_ctrl=None):
if gen_ctrl is None:
gen_ctrl = GenCtrl(None, cname=cname)
else:
gen_ctrl.cname = cname
if qrec.out_qs[0].is_floating:
gen_ctrl.float_dump = 1
if params.log_type is not None and params.log_offset:
gen_ctrl.mfcc_log_offset = int(np.round(params.log_offset * 2**(30)))
attrs = {
'n_frames': params.n_frames,
'frame_size': params.frame_size,
'frame_stride': params.frame_step,
'n_fft': params.n_fft,
'n_melbanks': params.n_fbanks,
'size_mel_coeff': params.get_melfilter_size()[0],
'n_dct': params.n_dct,
'preemp_factor': params.preemp_factor,
'no_window': int(params.win_fn is None),
'lift_coeff': 0,
'magsquared': int(params.magsquared),
'data_type': DSP_DTYPE[qrec.out_qs[0].dtype],
'log_type': 0 if not params.log_type else (2 if params.log_type == "db" else 1),
'out_fft': 0,
}
# other attributes
extra_attrs = {
'cname': cname,
'node_name': params.name
}
super().__init__(attrs, extra_attrs, gen_ctrl=gen_ctrl)
|
1fb111a14ed6df0c561f10e8027972d7e1ade7a0
|
df87814cb32990ad8c27d0b13a821aabce012819
|
/kolibri/plugins/setup_wizard/test/test_api.py
|
21ff4d6135b6324740d511c9a09bf0734f292183
|
[
"MIT"
] |
permissive
|
learningequality/kolibri
|
26812d4ae771f3b389d3317a586bc032fc84866b
|
cc9da2a6acd139acac3cd71c4cb05c15d4465712
|
refs/heads/release-v0.16.x
| 2023-09-01T18:07:29.720772
| 2023-08-31T15:43:47
| 2023-08-31T15:43:47
| 49,976,939
| 689
| 682
|
MIT
| 2023-09-14T20:02:29
| 2016-01-19T19:22:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,099
|
py
|
test_api.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from django.urls import reverse
from rest_framework.test import APITestCase
from kolibri.core.auth.models import FacilityUser
from kolibri.core.auth.test.helpers import clear_process_cache
from kolibri.core.auth.test.helpers import create_dummy_facility_data
from kolibri.core.auth.test.helpers import provision_device
class GetFacilityAdminsTest(APITestCase):
def setUp(self):
clear_process_cache()
create_dummy_facility_data(classroom_count=1)
def _make_request(self):
return self.client.get(
reverse(
"kolibri:kolibri.plugins.setup_wizard:facilityimport-facilityadmins"
),
format="json",
)
def test_fails_if_device_provisioned(self):
provision_device()
response = self._make_request()
self.assertEqual(response.status_code, 403)
def test_only_returns_admins(self):
response = self._make_request()
sorted_admins = sorted(response.data, key=lambda x: x["username"])
self.assertEqual(sorted_admins[0]["username"], "facadmin")
class GrantSuperuserPermissionsTest(APITestCase):
def setUp(self):
clear_process_cache()
facility_data = create_dummy_facility_data(classroom_count=1)
self.admin = facility_data["facility_admin"]
self.admin.set_password("password")
self.admin.save()
self.coach = facility_data["classroom_coaches"][0]
self.coach.set_password("password")
self.coach.save()
def _make_request(self, data):
return self.client.post(
reverse(
"kolibri:kolibri.plugins.setup_wizard:facilityimport-grantsuperuserpermissions"
),
data,
format="json",
)
def test_fails_if_device_provisioned(self):
provision_device()
response = self._make_request(
{"user_id": self.admin.id, "password": "password"}
)
self.assertEqual(response.status_code, 403)
def test_fails_if_user_not_found(self):
id_copy = self.admin.id
self.admin.delete()
response = self._make_request({"user_id": id_copy, "password": "password"})
self.assertEqual(response.status_code, 404)
def test_fails_if_password_invalid(self):
response = self._make_request(
{"user_id": self.admin.id, "password": "passward"}
)
self.assertEqual(response.status_code, 403)
def test_fails_if_user_not_admin(self):
response = self._make_request(
{"user_id": self.coach.id, "password": "password"}
)
self.assertEqual(response.status_code, 403)
def test_successfully_adds_device_permissions(self):
response = self._make_request(
{"user_id": self.admin.id, "password": "password"}
)
self.assertEqual(response.status_code, 200)
class CreateSuperuserTest(APITestCase):
def setUp(self):
clear_process_cache()
facility_data = create_dummy_facility_data(classroom_count=1)
self.admin = facility_data["facility_admin"]
self.admin.set_password("password")
self.admin.save()
self.coach = facility_data["classroom_coaches"][0]
self.coach.set_password("password")
self.coach.save()
def _make_request(self, data):
return self.client.post(
reverse(
"kolibri:kolibri.plugins.setup_wizard:facilityimport-createsuperuser"
),
data,
format="json",
)
def test_successfully_adds_device_permissions(self):
response = self._make_request(
{
"username": "new_superuser",
"password": "password",
"full_name": "Super User",
}
)
self.assertEqual(response.status_code, 200)
superuser = FacilityUser.objects.get(username="new_superuser")
self.assertTrue(superuser.is_superuser)
|
0c23e46a8a3acfc399c65d659cc2328ad9887349
|
1e8f44db6e7bfcc6d6899db033d4fe35a11cb5b4
|
/morph_net/network_regularizers/resource_function_test.py
|
4f4db0c928184ee49495ea145caa101f776fbd5c
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
google-research/morph-net
|
abb3ab4b655d8e5aea00df975b479181e69584d6
|
e575a05fb18327312cbf67e870e28af4ccd4a0e4
|
refs/heads/master
| 2023-09-01T07:37:36.349243
| 2023-07-27T11:42:41
| 2023-07-27T11:43:29
| 175,291,625
| 1,101
| 176
|
Apache-2.0
| 2023-07-27T11:43:36
| 2019-03-12T20:30:12
|
Python
|
UTF-8
|
Python
| false
| false
| 57,168
|
py
|
resource_function_test.py
|
"""Tests for network_regularizers.resource_function."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from morph_net.network_regularizers import resource_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.contrib import layers as contrib_layers
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
layers = contrib_layers
class ResourceFunctionTest(parameterized.TestCase, tf.test.TestCase):
def assertNearRelatively(self, expected, actual):
self.assertNear(expected, actual, expected * 1e-6)
def setUp(self):
super(ResourceFunctionTest, self).setUp()
self.image_shape = (1, 11, 13, 17)
self.image = tf.placeholder(tf.float32, shape=[1, None, None, 17])
net = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv1')
layers.conv2d_transpose(
self.image, 29, [7, 5], stride=2, padding='SAME', scope='convt2')
net = tf.reduce_mean(net, axis=(1, 2))
layers.fully_connected(net, 23, scope='FC')
net = layers.conv2d(
self.image, 10, [7, 5], stride=2, padding='SAME', scope='conv2')
layers.separable_conv2d(
net, None, [3, 2], depth_multiplier=1, padding='SAME', scope='dw1')
self.video_shape = (1, 11, 9, 13, 17)
self.video = tf.placeholder(tf.float32, shape=[1, None, None, None, 17])
net = layers.conv3d(
self.video, 19, [7, 3, 5], stride=2, padding='SAME', scope='vconv1')
g = tf.get_default_graph()
self.conv_op = g.get_operation_by_name('conv1/Conv2D')
self.convt_op = g.get_operation_by_name(
'convt2/conv2d_transpose')
self.matmul_op = g.get_operation_by_name('FC/MatMul')
self.dw_op = g.get_operation_by_name('dw1/depthwise')
self.conv3d_op = g.get_operation_by_name(
'vconv1/Conv3D')
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvFlopFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
flop_cost_tensor = resource_function.flop_function(
self.conv_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
flop_cost, _ = sess.run(
[flop_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected FLOP cost =
# 2 * batch_size * feature_map_width * feature_map_height
# * kernel_width * kernel_height * input_depth * output_depth
expected_flop_cost = (
2 * batch_size * 6 * 7 * 7 * 5 * num_alive_inputs * num_alive_outputs)
self.assertEqual(expected_flop_cost, flop_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
flop_loss_tensor = resource_function.flop_function(
self.conv_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
flop_loss, _ = sess.run(
[flop_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected FLOP regularization loss =
# 2 * batch_size * feature_map_width * feature_map_height
# * kernel_width * kernel_height
# * (num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
expected_flop_loss = (
2 * batch_size * 6 * 7 * 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs))
self.assertEqual(expected_flop_loss, flop_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvMemoryFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
memory_cost_tensor = resource_function.memory_function(
self.conv_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_cost, _ = sess.run(
[memory_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory cost = input_feature + weights + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs
# + kernel_width * kernel_height * num_alive_inputs * num_alive_outputs
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_cost = (
batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 6 * 7 * num_alive_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_cost, memory_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvMemoryFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
memory_loss_tensor = resource_function.memory_function(
self.conv_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_loss, _ = sess.run(
[memory_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory loss = input_feature + weights + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs
# + kernel_width * kernel_height * (
# num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_loss = (
batch_size * 11 * 13 * reg_inputs
+ 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * 6 * 7 * reg_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_loss, memory_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_ComputeBound', 32, 4, 9, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_MemoryBound', 32, 4, 9, 1000, 20))
def testConvLatencyFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs, peak_compute,
memory_bandwidth):
latency_cost_tensor = resource_function.latency_function(
self.conv_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_cost, _ = sess.run(
[latency_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency cost = max(compute_cost, memory_cost)
expected_compute_cost = (
2 * batch_size * 6 * 7 * 7 * 5 * num_alive_inputs * num_alive_outputs
/ peak_compute)
expected_memory_cost = (
(batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 6 * 7 * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_latency_cost = max(expected_compute_cost, expected_memory_cost)
self.assertNearRelatively(expected_latency_cost, latency_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_ComputeBound',
32, 4, 9, 3, 7, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_MemoryBound',
32, 4, 9, 3, 7, 1000, 20))
def testConvLatencyFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth):
latency_loss_tensor = resource_function.latency_function(
self.conv_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_loss, _ = sess.run(
[latency_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency loss = max(compute_loss, memory_loss)
expected_compute_cost = (
2 * batch_size * 6 * 7 * 7 * 5 * num_alive_inputs * num_alive_outputs
/ peak_compute)
expected_memory_cost = (
(batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 6 * 7 * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_compute_loss = (
2 * batch_size * 6 * 7 * 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
/ peak_compute)
expected_memory_loss = (
(batch_size * 11 * 13 * reg_inputs
+ 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * 6 * 7 * reg_outputs)
* self.image.dtype.size / memory_bandwidth)
if expected_memory_cost > expected_compute_cost:
expected_latency_loss = expected_memory_loss
else:
expected_latency_loss = expected_compute_loss
self.assertNearRelatively(expected_latency_loss, latency_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvModelSizeFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
model_size_cost = resource_function.model_size_function(
self.conv_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
# Expected model size cost =
# kernel_width * kernel_height * input_depth * output_depth
expected_model_size_cost = 7 * 5 * num_alive_inputs * num_alive_outputs
self.assertEqual(expected_model_size_cost, model_size_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvModelSizeFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
model_size_loss = resource_function.model_size_function(
self.conv_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected model size regularization loss =
# kernel_width * kernel_height
# * (num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
expected_model_size_loss = (
7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs))
self.assertEqual(expected_model_size_loss, model_size_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvActivationCountFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
activation_count_cost = resource_function.activation_count_function(
self.conv_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
# Expected activation count cost = output_depth
expected_activation_count_cost = num_alive_outputs
self.assertEqual(expected_activation_count_cost, activation_count_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvActivationCountFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
activation_count_loss = resource_function.activation_count_function(
self.conv_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected model size regularization loss = reg_outputs
expected_activation_count_loss = reg_outputs
self.assertEqual(expected_activation_count_loss, activation_count_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut29', 1, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvTransposeFlopFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
flop_cost_tensor = resource_function.flop_function(
self.convt_op, False, num_alive_inputs, num_alive_outputs, 17, 29,
batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
flop_cost, _ = sess.run(
[flop_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected FLOP cost =
# 2 * batch_size * feature_map_width * feature_map_height
# * kernel_width * kernel_height * input_depth * output_depth
expected_flop_cost = (
2 * batch_size * 11 * 13 * 7 * 5 * num_alive_inputs * num_alive_outputs)
self.assertEqual(expected_flop_cost, flop_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut29',
1, 17, 19, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvTransposeFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
flop_loss_tensor = resource_function.flop_function(
self.convt_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
flop_loss, _ = sess.run(
[flop_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected FLOP regularization loss =
# 2 * batch_size * feature_map_width * feature_map_height
# * kernel_width * kernel_height
# * (num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
expected_flop_loss = (
2 * batch_size * 11 * 13 * 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs))
self.assertEqual(expected_flop_loss, flop_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut29', 1, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvTransposeMemoryFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
memory_cost_tensor = resource_function.memory_function(
self.convt_op, False, num_alive_inputs, num_alive_outputs, 17, 29,
batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_cost, _ = sess.run(
[memory_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory cost = input_feature + weights + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs
# + kernel_width * kernel_height * num_alive_inputs * num_alive_outputs
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_cost = (
batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 22 * 26 * num_alive_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_cost, memory_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut29',
1, 17, 29, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvTransposeMemoryFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
memory_loss_tensor = resource_function.memory_function(
self.convt_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_loss, _ = sess.run(
[memory_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory loss = input_feature + weights + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs
# + kernel_width * kernel_height * (
# num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_loss = (
batch_size * 11 * 13 * reg_inputs
+ 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * 22 * 26 * reg_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_loss, memory_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut29', 1, 17, 29, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_ComputeBound', 32, 4, 9, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_MemoryBound', 32, 4, 9, 1000, 20))
def testConvTransposeLatencyFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs, peak_compute,
memory_bandwidth):
latency_cost_tensor = resource_function.latency_function(
self.convt_op, False, num_alive_inputs, num_alive_outputs, 17, 29,
peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_cost, _ = sess.run(
[latency_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency cost = max(compute_cost, memory_cost)
expected_compute_cost = (
2 * batch_size * 11 * 13 * 7 * 5 * num_alive_inputs * num_alive_outputs
/ peak_compute)
expected_memory_cost = (
(batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 22 * 26 * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_latency_cost = max(expected_compute_cost, expected_memory_cost)
self.assertNearRelatively(expected_latency_cost, latency_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut29',
1, 17, 19, 17, 29, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_ComputeBound',
32, 4, 9, 3, 7, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_MemoryBound',
32, 4, 9, 3, 7, 1000, 20))
def testConvTransposeLatencyFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth):
latency_loss_tensor = resource_function.latency_function(
self.convt_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_loss, _ = sess.run(
[latency_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency loss = max(compute_loss, memory_loss)
expected_compute_cost = (
2 * batch_size * 11 * 13 * 7 * 5 * num_alive_inputs * num_alive_outputs
/ peak_compute)
expected_memory_cost = (
(batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 22 * 26 * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_compute_loss = (
2 * batch_size * 11 * 13 * 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
/ peak_compute)
expected_memory_loss = (
(batch_size * 11 * 13 * reg_inputs
+ 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * 22 * 26 * reg_outputs)
* self.image.dtype.size / memory_bandwidth)
if expected_memory_cost > expected_compute_cost:
expected_latency_loss = expected_memory_loss
else:
expected_latency_loss = expected_compute_loss
self.assertNearRelatively(expected_latency_loss, latency_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut29', 1, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvTransposeModelSizeFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
model_size_cost = resource_function.model_size_function(
self.convt_op, False, num_alive_inputs, num_alive_outputs, 17, 29,
batch_size)
# Expected model size cost =
# kernel_width * kernel_height * input_depth * output_depth
expected_model_size_cost = 7 * 5 * num_alive_inputs * num_alive_outputs
self.assertEqual(expected_model_size_cost, model_size_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut29',
1, 17, 19, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvTransposeModelSizeFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
model_size_loss = resource_function.model_size_function(
self.convt_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected FLOP regularization loss =
# kernel_width * kernel_height
# * (num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
expected_model_size_loss = (
7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs))
self.assertEqual(expected_model_size_loss, model_size_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut29', 1, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testConvTransposeActivationCountFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
activation_count_cost = resource_function.activation_count_function(
self.convt_op, False, num_alive_inputs, num_alive_outputs, 17, 29,
batch_size)
# Expected model size cost = output_depth
expected_activation_count_cost = num_alive_outputs
self.assertEqual(expected_activation_count_cost, activation_count_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut29',
1, 17, 19, 17, 29),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testConvTransposeActivationCountFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
activation_count_loss = resource_function.activation_count_function(
self.convt_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected FLOP regularization loss = reg_outputs
expected_activation_count_loss = reg_outputs
self.assertEqual(expected_activation_count_loss, activation_count_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testMatMulFlopFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
flop_cost = resource_function.flop_function(
self.matmul_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
# Expected FLOP cost =
# 2 * batch_size * input_depth * output_depth
expected_flop_cost = 2 * batch_size * num_alive_inputs * num_alive_outputs
self.assertEqual(expected_flop_cost, flop_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testMatMulFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
flop_loss = resource_function.flop_function(
self.matmul_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected FLOP regularization loss =
# 2 * batch_size
# * (num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
expected_flop_loss = (
2 * batch_size * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs))
self.assertEqual(expected_flop_loss, flop_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testMatMulMemoryFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
memory_cost_tensor = resource_function.memory_function(
self.matmul_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_cost, _ = sess.run(
[memory_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory cost = input_feature + weights + output_feature =
# (batch_size * num_alive_inputs
# + num_alive_inputs * num_alive_outputs
# + batch_size * num_alive_outputs) * dtype.size
expected_memory_cost = (
batch_size * num_alive_inputs
+ num_alive_inputs * num_alive_outputs
+ batch_size * num_alive_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_cost, memory_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testMatMulMemoryFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
memory_loss_tensor = resource_function.memory_function(
self.matmul_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_loss, _ = sess.run(
[memory_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory loss = input_feature + weights + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs
# + kernel_width * kernel_height * (
# num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_loss = (
batch_size * reg_inputs
+ (num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * reg_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_loss, memory_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_ComputeBound', 32, 4, 9, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_MemoryBound', 32, 4, 9, 1000, 20))
def testMatMulLatencyFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs, peak_compute,
memory_bandwidth):
latency_cost_tensor = resource_function.latency_function(
self.matmul_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_cost, _ = sess.run(
[latency_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency cost = max(compute_cost, memory_cost)
expected_compute_cost = (
2 * batch_size * num_alive_inputs * num_alive_outputs / peak_compute)
expected_memory_cost = (
(batch_size * num_alive_inputs
+ num_alive_inputs * num_alive_outputs
+ batch_size * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_latency_cost = max(expected_compute_cost, expected_memory_cost)
self.assertNearRelatively(expected_latency_cost, latency_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_ComputeBound',
32, 4, 9, 3, 7, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_MemoryBound',
32, 4, 9, 3, 7, 1000, 20))
def testMatMulLatencyFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth):
latency_loss_tensor = resource_function.latency_function(
self.matmul_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_loss, _ = sess.run(
[latency_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency loss = max(compute_loss, memory_loss)
expected_compute_cost = (
2 * batch_size * num_alive_inputs * num_alive_outputs / peak_compute)
expected_memory_cost = (
(batch_size * num_alive_inputs
+ num_alive_inputs * num_alive_outputs
+ batch_size * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_compute_loss = (
2 * batch_size * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
/ peak_compute)
expected_memory_loss = (
(batch_size * reg_inputs
+ (num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * reg_outputs)
* self.image.dtype.size / memory_bandwidth)
if expected_memory_cost > expected_compute_cost:
expected_latency_loss = expected_memory_loss
else:
expected_latency_loss = expected_compute_loss
self.assertNearRelatively(expected_latency_loss, latency_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testMatMulModelSizeFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
model_size_cost = resource_function.model_size_function(
self.matmul_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
# Expected model size cost = input_depth * output_depth
expected_model_size_cost = num_alive_inputs * num_alive_outputs
self.assertEqual(expected_model_size_cost, model_size_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testMatMulModelSizeFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
model_size_loss = resource_function.model_size_function(
self.matmul_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected model size regularization loss =
# num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs
expected_model_size_loss = (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
self.assertEqual(expected_model_size_loss, model_size_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19', 1, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testMatMulActivationCountFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
activation_count_cost = resource_function.activation_count_function(
self.matmul_op, False, num_alive_inputs, num_alive_outputs, 17, 19,
batch_size)
# Expected model size cost = output_depth
expected_activation_count_cost = num_alive_outputs
self.assertEqual(expected_activation_count_cost, activation_count_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveOut19_RegIn17_RegOut19',
1, 17, 19, 17, 19),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testMatMulActivationCountFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
activation_count_loss = resource_function.activation_count_function(
self.matmul_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected model size regularization loss = reg_outputs
expected_activation_count_loss = reg_outputs
self.assertEqual(expected_activation_count_loss, activation_count_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10', 1, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testDepthwiseConvFlopFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
flop_cost_tensor = resource_function.flop_function(
self.dw_op, False, num_alive_inputs, num_alive_outputs, 10, 10,
batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
flop_cost, _ = sess.run(
[flop_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected FLOP cost =
# 2 * batch_size * feature_map_width * feature_map_height
# * kernel_width * kernel_height * output_depth
expected_flop_cost = (
2 * batch_size * 6 * 7 * 3 * 2 * num_alive_outputs)
self.assertEqual(expected_flop_cost, flop_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10_RegIn10_RegOut10',
1, 10, 10, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testDepthwiseConvFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
flop_loss_tensor = resource_function.flop_function(
self.dw_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
flop_loss, _ = sess.run(
[flop_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected FLOP regularization loss =
# 2 * batch_size * feature_map_width * feature_map_height
# * kernel_width * kernel_height * (reg_inputs + reg_outputs)
expected_flop_loss = (
2 * batch_size * 6 * 7 * 3 * 2 * (reg_inputs + reg_outputs))
self.assertEqual(expected_flop_loss, flop_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10', 1, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testDepthwiseConvMemoryFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
memory_cost_tensor = resource_function.memory_function(
self.conv_op, False, num_alive_inputs, num_alive_outputs, 10, 10,
batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_cost, _ = sess.run(
[memory_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory cost = input_feature + weights + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs
# + kernel_width * kernel_height * num_alive_inputs * num_alive_outputs
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_cost = (
batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 6 * 7 * num_alive_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_cost, memory_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10_RegIn10_RegOut10',
1, 10, 10, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testDepthwiseConvMemoryFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
memory_loss_tensor = resource_function.memory_function(
self.conv_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_loss, _ = sess.run(
[memory_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory loss = input_feature + weights + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs
# + kernel_width * kernel_height * (
# num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_loss = (
batch_size * 11 * 13 * reg_inputs
+ 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * 6 * 7 * reg_outputs) * self.image.dtype.size
self.assertEqual(expected_memory_loss, memory_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10', 1, 10, 10, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_ComputeBound', 32, 4, 9, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_MemoryBound', 32, 4, 9, 1000, 20))
def testDepthwiseConvLatencyFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs, peak_compute,
memory_bandwidth):
latency_cost_tensor = resource_function.latency_function(
self.conv_op, False, num_alive_inputs, num_alive_outputs, 10, 10,
peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_cost, _ = sess.run(
[latency_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency cost = max(compute_cost, memory_cost)
expected_compute_cost = (
2 * batch_size * 6 * 7 * 7 * 5 * num_alive_inputs * num_alive_outputs
/ peak_compute)
expected_memory_cost = (
(batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 6 * 7 * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_latency_cost = max(expected_compute_cost, expected_memory_cost)
self.assertNearRelatively(expected_latency_cost, latency_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10_RegIn10_RegOut10',
1, 10, 10, 10, 10, 1, 1),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_ComputeBound',
32, 4, 9, 3, 7, 1000, 2000),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7_MemoryBound',
32, 4, 9, 3, 7, 1000, 20))
def testDepthwiseConvLatencyFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth):
latency_loss_tensor = resource_function.latency_function(
self.conv_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, peak_compute, memory_bandwidth, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
latency_loss, _ = sess.run(
[latency_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected latency loss = max(compute_loss, memory_loss)
expected_compute_cost = (
2 * batch_size * 6 * 7 * 7 * 5 * num_alive_inputs * num_alive_outputs
/ peak_compute)
expected_memory_cost = (
(batch_size * 11 * 13 * num_alive_inputs
+ 7 * 5 * num_alive_inputs * num_alive_outputs
+ batch_size * 6 * 7 * num_alive_outputs)
* self.image.dtype.size / memory_bandwidth)
expected_compute_loss = (
2 * batch_size * 6 * 7 * 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
/ peak_compute)
expected_memory_loss = (
(batch_size * 11 * 13 * reg_inputs
+ 7 * 5 * (
num_alive_inputs * reg_outputs + num_alive_outputs * reg_inputs)
+ batch_size * 6 * 7 * reg_outputs)
* self.image.dtype.size / memory_bandwidth)
if expected_memory_cost > expected_compute_cost:
expected_latency_loss = expected_memory_loss
else:
expected_latency_loss = expected_compute_loss
self.assertNearRelatively(expected_latency_loss, latency_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10', 1, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testDepthwiseConvModelSizeFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
model_size_cost = resource_function.model_size_function(
self.dw_op, False, num_alive_inputs, num_alive_outputs, 10, 10,
batch_size)
# Expected model size cost =
# kernel_width * kernel_height * output_depth
expected_model_size_cost = 3 * 2 * num_alive_outputs
self.assertEqual(expected_model_size_cost, model_size_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10_RegIn10_RegOut10',
1, 10, 10, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testDepthwiseConvModelSizeFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
model_size_loss = resource_function.model_size_function(
self.dw_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected model size regularization loss =
# kernel_width * kernel_height * (reg_inputs + reg_outputs)
expected_model_size_loss = 3 * 2 * (reg_inputs + reg_outputs)
self.assertEqual(expected_model_size_loss, model_size_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10', 1, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9', 32, 4, 9))
def testDepthwiseConvActivationCountFunction_Cost(
self, batch_size, num_alive_inputs, num_alive_outputs):
activation_count_cost = resource_function.activation_count_function(
self.dw_op, False, num_alive_inputs, num_alive_outputs, 10, 10,
batch_size)
# Expected model size cost = output_depth
expected_activation_count_cost = num_alive_outputs
self.assertEqual(expected_activation_count_cost, activation_count_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn10_AliveOut10_RegIn10_RegOut10',
1, 10, 10, 10, 10),
('_BatchSize32_AliveIn4_AliveOut9_RegIn3_RegOut7',
32, 4, 9, 3, 7))
def testDepthwiseConvActivationCountFlopFunction_Regularization(
self, batch_size, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs):
activation_count_loss = resource_function.activation_count_function(
self.dw_op, True, num_alive_inputs, num_alive_outputs, reg_inputs,
reg_outputs, batch_size)
# Expected model size regularization loss = reg_outputs
expected_activation_count_loss = reg_outputs
self.assertEqual(expected_activation_count_loss, activation_count_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn19_AliveIn11', 1, 19, 11),
('_BatchSize32_AliveIn15_AliveIn7', 32, 15, 7))
def testConcatFlopFunction_Cost(
self, batch_size, num_alive_inputs3, num_alive_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
flop_cost = resource_function.flop_function(
concat.op, False, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, 19, 11, batch_size)
expected_flop_cost = 0
self.assertEqual(expected_flop_cost, flop_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveIn19_RegIn17_RegIn19',
1, 19, 11, 19, 11),
('_BatchSize32_AliveIn4_AliveIn9_RegIn3_RegIn7',
32, 15, 7, 12, 6))
def testConcatFlopFunction_Regularization(
self, batch_size, num_alive_inputs3, num_alive_inputs4, reg_inputs3,
reg_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
flop_loss = resource_function.flop_function(
concat.op, True, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, reg_inputs3 + reg_inputs4,
reg_inputs3 + reg_inputs4, batch_size)
expected_flop_loss = 0
self.assertEqual(expected_flop_loss, flop_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn19_AliveIn11', 1, 19, 11),
('_BatchSize32_AliveIn15_AliveIn7', 32, 15, 7))
def testConcatMemoryFunction_Cost(
self, batch_size, num_alive_inputs3, num_alive_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
memory_cost_tensor = resource_function.memory_function(
concat.op, False, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, 19, 11, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_cost, _ = sess.run(
[memory_cost_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory cost = input_feature3 + input_feature4 + output_feature =
# (batch_size * feature_map_width * feature_map_height * num_alive_inputs3
# + batch_size * feature_map_width * feature_map_height * num_alive_inputs4
# + batch_size * feature_map_width * feature_map_height * num_alive_outputs)
# * dtype.size
expected_memory_cost = (
(batch_size * 6 * 7 * num_alive_inputs3
+ batch_size * 6 * 7 * num_alive_inputs4
+ batch_size * 6 * 7 * (num_alive_inputs3 + num_alive_inputs4))
* self.image.dtype.size)
self.assertEqual(expected_memory_cost, memory_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn19_AliveIn11_RegIn19_RegIn11', 1, 19, 11, 19, 11),
('_BatchSize32_AliveIn15_AliveIn7_RegIn12_RegIn6', 32, 15, 7, 12, 6))
def testConcatMemoryFunction_Regularization(
self, batch_size, num_alive_inputs3, num_alive_inputs4, reg_inputs3,
reg_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
memory_loss_tensor = resource_function.memory_function(
concat.op, True, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, reg_inputs3 + reg_inputs4,
reg_inputs3 + reg_inputs4, batch_size)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
memory_loss, _ = sess.run(
[memory_loss_tensor, self.image],
feed_dict={self.image: np.zeros(self.image_shape)})
# Expected memory cost = input_feature3 + input_feature4 + output_feature =
# (batch_size * feature_map_width * feature_map_height * reg_inputs3
# + batch_size * feature_map_width * feature_map_height * reg_inputs4
# + batch_size * feature_map_width * feature_map_height *
# (reg_inputs3 + reg_inputs4)) * dtype.size
expected_memory_loss = (
(batch_size * 6 * 7 * reg_inputs3
+ batch_size * 6 * 7 * reg_inputs4
+ batch_size * 6 * 7 * (reg_inputs3 + reg_inputs4))
* self.image.dtype.size)
self.assertEqual(expected_memory_loss, memory_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn19_AliveIn11', 1, 19, 11),
('_BatchSize32_AliveIn15_AliveIn7', 32, 15, 7))
def testConcatModelSizeFunction_Cost(
self, batch_size, num_alive_inputs3, num_alive_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
model_size_cost = resource_function.model_size_function(
concat.op, False, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, 19, 11, batch_size)
expected_model_size_cost = 0
self.assertEqual(expected_model_size_cost, model_size_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveIn19_RegIn17_RegIn19',
1, 19, 11, 19, 11),
('_BatchSize32_AliveIn4_AliveIn9_RegIn3_RegIn7',
32, 15, 7, 12, 6))
def testConcatModelSizeFunction_Regularization(
self, batch_size, num_alive_inputs3, num_alive_inputs4, reg_inputs3,
reg_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
model_size_loss = resource_function.model_size_function(
concat.op, True, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, reg_inputs3 + reg_inputs4,
reg_inputs3 + reg_inputs4, batch_size)
expected_model_size_loss = 0
self.assertEqual(expected_model_size_loss, model_size_loss)
@parameterized.named_parameters(
('_BatchSize1_AliveIn19_AliveIn11', 1, 19, 11),
('_BatchSize32_AliveIn15_AliveIn7', 32, 15, 7))
def testConcatActivationCountFunction_Cost(
self, batch_size, num_alive_inputs3, num_alive_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
activation_count_cost = resource_function.activation_count_function(
concat.op, False, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, 19, 11, batch_size)
expected_activation_count_cost = 0
self.assertEqual(expected_activation_count_cost, activation_count_cost)
@parameterized.named_parameters(
('_BatchSize1_AliveIn17_AliveIn19_RegIn17_RegIn19',
1, 19, 11, 19, 11),
('_BatchSize32_AliveIn4_AliveIn9_RegIn3_RegIn7',
32, 15, 7, 12, 6))
def testConcatActivationCountFunction_Regularization(
self, batch_size, num_alive_inputs3, num_alive_inputs4, reg_inputs3,
reg_inputs4):
conv3 = layers.conv2d(
self.image, 19, [7, 5], stride=2, padding='SAME', scope='conv3')
conv4 = layers.conv2d(
self.image, 11, [3, 7], stride=2, padding='SAME', scope='conv4')
concat = tf.concat([conv3, conv4], axis=3)
activation_count_loss = resource_function.activation_count_function(
concat.op, True, num_alive_inputs3 + num_alive_inputs4,
num_alive_inputs3 + num_alive_inputs4, reg_inputs3 + reg_inputs4,
reg_inputs3 + reg_inputs4, batch_size)
expected_activation_count_loss = 0
self.assertEqual(expected_activation_count_loss, activation_count_loss)
def testBadHardware(self):
with self.assertRaises(ValueError):
_ = resource_function.latency_function_factory('apple', 66)
with self.assertRaises(ValueError):
_ = resource_function.latency_function_factory(None, 11)
def testConvFlopsCoeff(self):
tf.compat.v1.reset_default_graph()
image = tf.constant(0.0, shape=[1, 11, 13, 17])
layers.conv2d(image, 19, [7, 5], stride=2, padding='SAME', scope='conv1')
conv_op = tf.get_default_graph().get_operation_by_name('conv1/Conv2D')
# Divide by the input depth and the output depth to get the coefficient.
expected_coeff = _flops(conv_op) / (17.0 * 19.0)
actual_coeff = resource_function.flop_coeff(conv_op)
self.assertNearRelatively(expected_coeff, actual_coeff)
def testConvFlopsCoeffUnknownShape(self):
tf.compat.v1.reset_default_graph()
image = tf.placeholder(tf.float32, shape=[1, None, None, 17])
net = layers.conv2d(
image, 19, [7, 5], stride=2, padding='SAME', scope='conv1')
self.conv_op = tf.get_default_graph().get_operation_by_name(
'conv1/Conv2D')
actual_coeff_tensor = resource_function.flop_coeff(self.conv_op)
with self.cached_session() as sess:
sess.run(tf.global_variables_initializer())
actual_coeff, _ = sess.run([actual_coeff_tensor, net],
feed_dict={image: np.zeros((1, 11, 13, 17))})
# We cannot use the _flops function above because the shapes are not all
# known in the graph.
expected_coeff = 2940.0
self.assertNearRelatively(expected_coeff, actual_coeff)
def testConvTransposeFlopsCoeff(self):
tf.compat.v1.reset_default_graph()
image = tf.constant(0.0, shape=[1, 11, 13, 17])
layers.conv2d_transpose(
image, 29, [7, 5], stride=2, padding='SAME', scope='convt2')
convt_op = tf.get_default_graph().get_operation_by_name(
'convt2/conv2d_transpose')
# Divide by the input depth and the output depth to get the coefficient.
expected_coeff = _flops(convt_op) / (17.0 * 29.0)
actual_coeff = resource_function.flop_coeff(convt_op)
self.assertNearRelatively(expected_coeff, actual_coeff)
def testFcFlopsCoeff(self):
expected_coeff = _flops(self.matmul_op) / (19.0 * 23.0)
actual_coeff = resource_function.flop_coeff(self.matmul_op)
self.assertNearRelatively(expected_coeff, actual_coeff)
def testConvNumWeightsCoeff(self):
actual_coeff = resource_function.num_weights_coeff(self.conv_op)
# The coefficient is just the filter size - 7 * 5 = 35:
self.assertNearRelatively(35, actual_coeff)
def testFcNumWeightsCoeff(self):
actual_coeff = resource_function.num_weights_coeff(self.matmul_op)
# The coefficient is 1.0, the number of weights is just inputs x outputs.
self.assertNearRelatively(1.0, actual_coeff)
def testDepthwiseConvFlopsCoeff(self):
tf.compat.v1.reset_default_graph()
image = tf.constant(0.0, shape=[1, 11, 13, 17])
net = layers.conv2d(
image, 10, [7, 5], stride=2, padding='SAME', scope='conv2')
layers.separable_conv2d(
net, None, [3, 2], depth_multiplier=1, padding='SAME', scope='dw1')
dw_op = tf.get_default_graph().get_operation_by_name('dw1/depthwise')
# Divide by the input depth (which is also the output depth) to get the
# coefficient.
expected_coeff = _flops(dw_op) / (10.0)
actual_coeff = resource_function.flop_coeff(dw_op)
self.assertNearRelatively(expected_coeff, actual_coeff)
def test_conv3d_flops_coeff(self):
tf.compat.v1.reset_default_graph()
input_depth = 17
output_depth = 10
video = tf.zeros([1, 15, 12, 13, input_depth])
_ = layers.conv3d(
video, output_depth, [7, 5, 3], stride=2, padding='SAME', scope='conv')
conv_op = tf.get_default_graph().get_operation_by_name('conv/Conv3D')
# Divide by the input depth and the output depth to get the coefficient.
expected_coeff = _flops(conv_op) / (input_depth * output_depth)
actual_coeff = resource_function.flop_coeff(conv_op)
self.assertNearRelatively(expected_coeff, actual_coeff)
def _flops(op):
"""Get the number of flops of a convolution, from the ops stats registry.
Args:
op: A tf.Operation object.
Returns:
The number os flops needed to evaluate conv_op.
"""
return ops.get_stats_for_node_def(
tf.get_default_graph(), op.node_def, 'flops').value
if __name__ == '__main__':
tf.test.main()
|
a34f92211a3f81b5510ea5d9dc6e439a76c7548e
|
316e768ac2ba60fb393a8b914f5c761e077609d1
|
/archivebox/parsers/medium_rss.py
|
a4159f286fa559b73b77e1cc82253dca5980d324
|
[
"MIT",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
ArchiveBox/ArchiveBox
|
88fc98ac08800d9785d4333572627a7f354f3a43
|
73a5f74d3840284bceaabced9cf99575b8c15d54
|
refs/heads/dev
| 2023-09-03T15:31:13.265845
| 2023-08-31T22:17:45
| 2023-08-31T22:17:45
| 90,356,372
| 9,794
| 606
|
MIT
| 2023-09-04T05:04:41
| 2017-05-05T08:50:14
|
Python
|
UTF-8
|
Python
| false
| false
| 1,193
|
py
|
medium_rss.py
|
__package__ = 'archivebox.parsers'
from typing import IO, Iterable
from datetime import datetime
from xml.etree import ElementTree
from ..index.schema import Link
from ..util import (
htmldecode,
enforce_types,
)
@enforce_types
def parse_medium_rss_export(rss_file: IO[str], **_kwargs) -> Iterable[Link]:
"""Parse Medium RSS feed files into links"""
rss_file.seek(0)
root = ElementTree.parse(rss_file).getroot()
items = root.find("channel").findall("item") # type: ignore
for item in items:
url = item.find("link").text # type: ignore
title = item.find("title").text.strip() # type: ignore
ts_str = item.find("pubDate").text # type: ignore
time = datetime.strptime(ts_str, "%a, %d %b %Y %H:%M:%S %Z") # type: ignore
yield Link(
url=htmldecode(url),
timestamp=str(time.timestamp()),
title=htmldecode(title) or None,
tags=None,
sources=[rss_file.name],
)
KEY = 'medium_rss'
NAME = 'Medium RSS'
PARSER = parse_medium_rss_export
|
4346279d861305c69a22ab87c38058fbe043fb1f
|
f4095ef092092399102bb21d1198e324f10f53ed
|
/pontoon/base/tests/test_utils.py
|
e32295be98168242bee81d20f08855cfaa68a49d
|
[
"BSD-3-Clause"
] |
permissive
|
mozilla/pontoon
|
2c53227570099ca666467d4e3d78e929bf456c9c
|
0c4f74e15b1e442a9cee9b1cd636214b24f5352b
|
refs/heads/master
| 2023-09-06T04:15:41.009180
| 2023-09-01T14:23:51
| 2023-09-01T14:23:51
| 1,385,890
| 1,367
| 713
|
BSD-3-Clause
| 2023-09-13T18:04:41
| 2011-02-19T11:25:51
|
Python
|
UTF-8
|
Python
| false
| false
| 12,490
|
py
|
test_utils.py
|
import pytest
from django.contrib.auth import get_user_model
from django.urls import reverse
from django.urls.exceptions import NoReverseMatch
from pontoon.base.models import Project
from pontoon.base.utils import (
aware_datetime,
extension_in,
get_m2m_changes,
get_object_or_none,
latest_datetime,
get_search_phrases,
is_email,
)
from pontoon.test.factories import (
ProjectFactory,
ResourceFactory,
LocaleFactory,
ProjectSlugHistoryFactory,
ProjectLocaleFactory,
)
@pytest.fixture
def project_d():
"""
Fixture that sets up and returns a Project with associated Locale and Resource.
"""
locale = LocaleFactory.create()
project = ProjectFactory.create(
name="Project D", slug="project-d", disabled=False, system_project=False
)
ResourceFactory.create(project=project, path="resource_d.po", format="po")
ProjectLocaleFactory.create(project=project, locale=locale)
return project
def create_slug_history_and_change_slug(project, new_slug):
"""
This function is a helper for tests that need to simulate changing a project's slug.
It records the project's current slug in the history, then updates the project's slug
to a new value.
"""
# Record the old slug in the history
ProjectSlugHistoryFactory.create(project=project, old_slug=project.slug)
# Change the slug of the project to the new_slug
project.slug = new_slug
project.save()
project.refresh_from_db()
return project
@pytest.mark.django_db
def test_project_view_redirects_old_slug(client, project_d):
"""
Test to ensure that accessing a project view with an old slug redirects to the new slug URL.
"""
old_slug = project_d.slug
new_slug = "project-d-new-1"
project_d = create_slug_history_and_change_slug(project_d, new_slug)
# First access the URL with the new slug and ensure it's working
response = client.get(
reverse("pontoon.projects.project", kwargs={"slug": new_slug})
)
assert response.status_code == 200
# Now access the URL with the old slug
response = client.get(
reverse("pontoon.projects.project", kwargs={"slug": old_slug})
)
# The old slug should cause a redirect to the new slug URL
assert response.status_code == 302
assert response.url == reverse(
"pontoon.projects.project", kwargs={"slug": new_slug}
)
@pytest.mark.django_db
def test_handle_old_slug_redirect_no_loop(client, project_d):
"""
Test that there is no redirect loop when a project's slug is renamed from 'cc' to 'dd' and then back to 'cc'.
"""
# Rename project from 'cc' to 'dd' and then back to 'cc'
create_slug_history_and_change_slug(project_d, "cc")
create_slug_history_and_change_slug(project_d, "dd")
create_slug_history_and_change_slug(project_d, "cc")
# Request the project detail view with slug 'cc'
response = client.get(reverse("pontoon.projects.project", kwargs={"slug": "cc"}))
# Assert that the response is not a redirect (status code is not 302)
assert response.status_code != 302
@pytest.mark.django_db
def test_handle_old_slug_redirect_no_redirect_to_different_project(client, project_d):
"""
Test that a request for a slug that was changed and then reused by a different project does not redirect to the original project.
"""
# Rename project from 'ee' to 'ff'
create_slug_history_and_change_slug(project_d, "ee")
create_slug_history_and_change_slug(project_d, "ff")
# Create a new project with slug 'ee'
project = ProjectFactory.create(
name="Project E", slug="ee", disabled=False, system_project=False
)
ResourceFactory.create(project=project, path="resource_e.po", format="po")
# Request the project detail view with slug 'ee'
response = client.get(reverse("pontoon.projects.project", kwargs={"slug": "ee"}))
# Assert that the response is successful (status code is 200)
assert response.status_code == 200
@pytest.mark.django_db
def test_handle_no_slug_redirect_project(client):
"""
Test to ensure that an attempt to access a project view without a slug raises a NoReverseMatch exception.
"""
with pytest.raises(NoReverseMatch):
# Try to access the URL without a slug
client.get(reverse("pontoon.projects.project", kwargs={}))
@pytest.mark.django_db
def test_handle_nonexistent_slug_redirect_project(client):
"""
Test to ensure that an attempt to access a project view with a non-existent slug returns a 404 error.
"""
slug = "nonexistent-slug"
response = client.get(reverse("pontoon.projects.project", kwargs={"slug": slug}))
# The expectation here is that the server should return a 404 error
assert response.status_code == 404
@pytest.mark.django_db
def test_translation_view_redirects_old_slug(client, project_d):
"""
Test to ensure that accessing a translation view with an old slug redirects to the new slug URL.
"""
# Add resource to project
resource_path = "resource_d.po"
old_slug = project_d.slug
new_slug = "project-d-new-2"
locale = project_d.locales.first().code
project_d = create_slug_history_and_change_slug(project_d, new_slug)
# First access the URL with the new slug and ensure it's working
response = client.get(
reverse(
"pontoon.translate",
kwargs={"project": new_slug, "locale": locale, "resource": resource_path},
)
)
assert response.status_code == 200
# Now access the URL with the old slug
response = client.get(
reverse(
"pontoon.translate",
kwargs={"project": old_slug, "locale": locale, "resource": resource_path},
)
)
# The old slug should cause a redirect to the new slug URL
assert response.status_code == 302
assert response.url == reverse(
"pontoon.translate",
kwargs={"project": new_slug, "locale": locale, "resource": resource_path},
)
@pytest.mark.django_db
def test_handle_no_slug_redirect_translate(client, project_d):
"""
Test to ensure that an attempt to access a translate view without a slug raises a NoReverseMatch exception.
"""
locale = project_d.locales.first().code
resource_path = "resource_d.po"
with pytest.raises(NoReverseMatch):
client.get(
reverse(
"pontoon.translate",
kwargs={"locale": locale, "resource": resource_path},
)
)
@pytest.mark.django_db
def test_handle_nonexistent_slug_redirect_translate(client, project_d):
"""
Test to ensure that an attempt to access a translate view with a non-existent slug returns a 404 error.
"""
locale = project_d.locales.first().code
resource_path = "resource_d.po"
slug = "nonexistent-slug"
response = client.get(
reverse(
"pontoon.translate",
kwargs={"project": slug, "locale": locale, "resource": resource_path},
)
)
assert response.status_code == 404
@pytest.mark.django_db
def test_localization_view_redirects_old_slug(client, project_d):
"""
Test to ensure that accessing a localization view with an old slug redirects to the new slug URL.
"""
old_slug = project_d.slug
new_slug = "project-d-new-3"
locale = project_d.locales.first().code
project_d = create_slug_history_and_change_slug(project_d, new_slug)
# First access the URL with the new slug and ensure it's working
response = client.get(
reverse(
"pontoon.localizations.localization",
kwargs={"slug": new_slug, "code": locale},
)
)
assert response.status_code == 200
# Now access the URL with the old slug
response = client.get(
reverse(
"pontoon.localizations.localization",
kwargs={"slug": old_slug, "code": locale},
)
)
# The old slug should cause a redirect to the new slug URL
assert response.status_code == 302
assert response.url == reverse(
"pontoon.localizations.localization",
kwargs={"slug": new_slug, "code": locale},
)
@pytest.mark.django_db
def test_handle_no_slug_redirect_localization(client, project_d):
"""
Test to ensure that an attempt to access a localization view without a slug raises a NoReverseMatch exception.
"""
locale = project_d.locales.first().code
with pytest.raises(NoReverseMatch):
client.get(
reverse(
"pontoon.localizations.localization",
kwargs={"code": locale},
)
)
@pytest.mark.django_db
def test_handle_nonexistent_slug_redirect_localization(client, project_d):
"""
Test to ensure that an attempt to access a localization view with a non-existent slug returns a 404 error.
"""
locale = project_d.locales.first().code
slug = "nonexistent-slug"
response = client.get(
reverse(
"pontoon.localizations.localization",
kwargs={"slug": slug, "code": locale},
)
)
assert response.status_code == 404
@pytest.mark.django_db
def test_get_m2m_changes_no_change(user_a):
assert get_m2m_changes(
get_user_model().objects.none(), get_user_model().objects.none()
) == ([], [])
assert get_m2m_changes(
get_user_model().objects.filter(pk=user_a.pk),
get_user_model().objects.filter(pk=user_a.pk),
) == ([], [])
@pytest.mark.django_db
def test_get_m2m_added(user_a, user_b):
assert get_m2m_changes(
get_user_model().objects.none(), get_user_model().objects.filter(pk=user_b.pk)
) == ([user_b], [])
assert get_m2m_changes(
get_user_model().objects.filter(pk=user_a.pk),
get_user_model().objects.filter(pk__in=[user_a.pk, user_b.pk]),
) == ([user_b], [])
@pytest.mark.django_db
def test_get_m2m_removed(user_a, user_b):
assert get_m2m_changes(
get_user_model().objects.filter(pk=user_b.pk),
get_user_model().objects.none(),
) == ([], [user_b])
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user_a.pk, user_b.pk]),
get_user_model().objects.filter(pk=user_a.pk),
) == ([], [user_b])
@pytest.mark.django_db
def test_get_m2m_mixed(user_a, user_b, user_c):
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user_b.pk, user_c.pk]),
get_user_model().objects.filter(pk__in=[user_a.pk, user_b.pk]),
) == ([user_a], [user_c])
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user_a.pk, user_b.pk]),
get_user_model().objects.filter(pk__in=[user_c.pk]),
) == ([user_c], [user_a, user_b])
assert get_m2m_changes(
get_user_model().objects.filter(pk__in=[user_b.pk]),
get_user_model().objects.filter(pk__in=[user_c.pk, user_a.pk]),
) == ([user_a, user_c], [user_b])
def test_util_base_extension_in():
assert extension_in("filename.txt", ["bat", "txt"])
assert extension_in("filename.biff", ["biff"])
assert extension_in("filename.tar.gz", ["gz"])
assert not extension_in("filename.txt", ["png", "jpg"])
assert not extension_in(".dotfile", ["bat", "txt"])
# Unintuitive, but that's how splitext works.
assert not extension_in("filename.tar.gz", ["tar.gz"])
@pytest.mark.django_db
def test_util_base_get_object_or_none(project_a):
assert get_object_or_none(Project, slug="does-not-exist") is None
assert get_object_or_none(Project, slug=project_a.slug) == project_a
def test_util_base_latest_datetime():
larger = aware_datetime(2015, 1, 1)
smaller = aware_datetime(2014, 1, 1)
assert latest_datetime([None, None, None]) is None
assert latest_datetime([None, larger]) == larger
assert latest_datetime([None, smaller, larger]) == larger
@pytest.mark.parametrize(
"search_query,expected_results",
(
("", []),
("lorem ipsum dolor", ["lorem", "ipsum", "dolor"]),
('"lorem ipsum dolor"', ["lorem ipsum dolor"]),
('"lorem ipsum" dolor', ["lorem ipsum", "dolor"]),
('"lorem ipsum" "dolor dolor"', ["lorem ipsum", "dolor dolor"]),
),
)
def test_get_search_phrases(search_query, expected_results):
assert get_search_phrases(search_query) == expected_results
def test_is_email():
assert is_email("jane@doe.com") is True
assert is_email("john@doe") is False
|
764959b63695071d68c9790d51931c8ab2d64db3
|
80a3d98eae1d755d6914b5cbde63fd10f5cc2046
|
/autox/autox_video/mmaction2/mmaction/utils/precise_bn.py
|
2751b2e736c03ca41a6048b321e6f5527ca5bf94
|
[
"Apache-2.0"
] |
permissive
|
4paradigm/AutoX
|
efda57b51b586209e1d58e1dab7d0797083aadc5
|
7eab9f4744329a225ff01bb5ec360c4662e1e52e
|
refs/heads/master
| 2023-05-24T00:53:37.109036
| 2023-02-14T14:21:50
| 2023-02-14T14:21:50
| 388,068,949
| 752
| 162
|
Apache-2.0
| 2022-07-12T08:28:09
| 2021-07-21T09:45:41
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 5,826
|
py
|
precise_bn.py
|
# Adapted from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/precise_bn.py # noqa: E501
# Original licence: Copyright (c) 2019 Facebook, Inc under the Apache License 2.0 # noqa: E501
import logging
import time
import mmcv
import torch
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import Hook
from mmcv.utils import print_log
from torch.nn import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from torch.nn.modules.instancenorm import _InstanceNorm
from torch.nn.parallel import DataParallel, DistributedDataParallel
from torch.utils.data import DataLoader
def is_parallel_module(module):
"""Check if a module is a parallel module.
The following 3 modules (and their subclasses) are regarded as parallel
modules: DataParallel, DistributedDataParallel,
MMDistributedDataParallel (the deprecated version).
Args:
module (nn.Module): The module to be checked.
Returns:
bool: True if the input module is a parallel module.
"""
parallels = (DataParallel, DistributedDataParallel,
MMDistributedDataParallel)
return bool(isinstance(module, parallels))
@torch.no_grad()
def update_bn_stats(model, data_loader, num_iters=200, logger=None):
"""Recompute and update the batch norm stats to make them more precise.
During
training both BN stats and the weight are changing after every iteration,
so the running average can not precisely reflect the actual stats of the
current model.
In this function, the BN stats are recomputed with fixed weights, to make
the running average more precise. Specifically, it computes the true
average of per-batch mean/variance instead of the running average.
Args:
model (nn.Module): The model whose bn stats will be recomputed.
data_loader (iterator): The DataLoader iterator.
num_iters (int): number of iterations to compute the stats.
logger (:obj:`logging.Logger` | None): Logger for logging.
Default: None.
"""
model.train()
assert len(data_loader) >= num_iters, (
f'length of dataloader {len(data_loader)} must be greater than '
f'iteration number {num_iters}')
if is_parallel_module(model):
parallel_module = model
model = model.module
else:
parallel_module = model
# Finds all the bn layers with training=True.
bn_layers = [
m for m in model.modules() if m.training and isinstance(m, _BatchNorm)
]
if len(bn_layers) == 0:
print_log('No BN found in model', logger=logger, level=logging.WARNING)
return
print_log(f'{len(bn_layers)} BN found', logger=logger)
# Finds all the other norm layers with training=True.
for m in model.modules():
if m.training and isinstance(m, (_InstanceNorm, GroupNorm)):
print_log(
'IN/GN stats will be updated like training.',
logger=logger,
level=logging.WARNING)
# In order to make the running stats only reflect the current batch, the
# momentum is disabled.
# bn.running_mean = (1 - momentum) * bn.running_mean + momentum *
# batch_mean
# Setting the momentum to 1.0 to compute the stats without momentum.
momentum_actual = [bn.momentum for bn in bn_layers] # pyre-ignore
for bn in bn_layers:
bn.momentum = 1.0
# Note that running_var actually means "running average of variance"
running_mean = [torch.zeros_like(bn.running_mean) for bn in bn_layers]
running_var = [torch.zeros_like(bn.running_var) for bn in bn_layers]
finish_before_loader = False
prog_bar = mmcv.ProgressBar(len(data_loader))
for ind, data in enumerate(data_loader):
with torch.no_grad():
parallel_module(**data, return_loss=False)
prog_bar.update()
for i, bn in enumerate(bn_layers):
# Accumulates the bn stats.
running_mean[i] += (bn.running_mean - running_mean[i]) / (ind + 1)
# running var is actually
running_var[i] += (bn.running_var - running_var[i]) / (ind + 1)
if (ind + 1) >= num_iters:
finish_before_loader = True
break
assert finish_before_loader, 'Dataloader stopped before ' \
f'iteration {num_iters}'
for i, bn in enumerate(bn_layers):
# Sets the precise bn stats.
bn.running_mean = running_mean[i]
bn.running_var = running_var[i]
bn.momentum = momentum_actual[i]
class PreciseBNHook(Hook):
"""Precise BN hook.
Attributes:
dataloader (DataLoader): A PyTorch dataloader.
num_iters (int): Number of iterations to update the bn stats.
Default: 200.
interval (int): Perform precise bn interval (by epochs). Default: 1.
"""
def __init__(self, dataloader, num_iters=200, interval=1):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, but got'
f' {type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.num_iters = num_iters
def after_train_epoch(self, runner):
if self.every_n_epochs(runner, self.interval):
# sleep to avoid possible deadlock
time.sleep(2.)
print_log(
f'Running Precise BN for {self.num_iters} iterations',
logger=runner.logger)
update_bn_stats(
runner.model,
self.dataloader,
self.num_iters,
logger=runner.logger)
print_log('BN stats updated', logger=runner.logger)
# sleep to avoid possible deadlock
time.sleep(2.)
|
6f278761279c84ce9fd678c76675fff9dd5627bc
|
65d613ef216e674b6063e2716855aa47b3ad777d
|
/code/examples/dis_channels.py
|
694ccad7b5abc9a3e1d980a60691b214986f84a7
|
[
"Python-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
dsuch/pymqi
|
c7adb7d199baa0a55265da6ac3e6866f0d2e907e
|
de161f70d71fac9380c9d5e11479d9d9fcd3873f
|
refs/heads/main
| 2023-01-20T12:06:36.844340
| 2023-01-08T04:42:31
| 2023-01-08T04:42:31
| 9,715,588
| 112
| 81
|
NOASSERTION
| 2022-10-15T14:35:17
| 2013-04-27T14:14:02
|
Python
|
UTF-8
|
Python
| false
| false
| 968
|
py
|
dis_channels.py
|
# More examples are at https://dsuch.github.io/pymqi/examples.html
# or in code/examples in the source distribution.
import logging
import pymqi
logging.basicConfig(level=logging.INFO)
queue_manager = 'QM1'
channel = 'DEV.APP.SVRCONN'
host = '127.0.0.1'
port = '1414'
conn_info = '%s(%s)' % (host, port)
user = 'app'
password = 'password'
prefix = 'SYSTEM.*'
args = {pymqi.CMQCFC.MQCACH_CHANNEL_NAME: prefix}
qmgr = pymqi.connect(queue_manager, channel, conn_info, user, password)
pcf = pymqi.PCFExecute(qmgr)
try:
response = pcf.MQCMD_INQUIRE_CHANNEL(args)
except pymqi.MQMIError as e:
if e.comp == pymqi.CMQC.MQCC_FAILED and e.reason == pymqi.CMQC.MQRC_UNKNOWN_OBJECT_NAME:
logging.info('No channels matched prefix `%s`' % prefix)
else:
raise
else:
for channel_info in response:
channel_name = channel_info[pymqi.CMQCFC.MQCACH_CHANNEL_NAME]
logging.info('Found channel `%s`' % channel_name)
qmgr.disconnect()
|
cbf636f71ac4937cb60a52e310b402dfa28b1971
|
a50b71d12f5c304747d6ba803b9e2e4c0f61218b
|
/pyp2rpm/virtualenv.py
|
77014e29131cc2bcec2094196b136f69d751e7cf
|
[
"MIT"
] |
permissive
|
fedora-python/pyp2rpm
|
6b8fc80b3272c50102094d6dda55fa53a1eab20a
|
92399daa509d2769433f97cf7f07209f738139a2
|
refs/heads/main
| 2023-08-25T05:07:28.651158
| 2023-08-06T04:59:41
| 2023-08-06T05:39:13
| 39,019,167
| 123
| 54
|
MIT
| 2023-08-06T05:39:15
| 2015-07-13T15:03:09
|
Python
|
UTF-8
|
Python
| false
| false
| 4,338
|
py
|
virtualenv.py
|
import os
import glob
import logging
import pprint
from virtualenvapi.manage import VirtualEnvironment
import virtualenvapi.exceptions as ve
from pyp2rpm.exceptions import VirtualenvFailException
from pyp2rpm.settings import DEFAULT_PYTHON_VERSION, MODULE_SUFFIXES
logger = logging.getLogger(__name__)
def site_packages_filter(site_packages_list):
'''Removes wheel .dist-info files'''
return set([x for x in site_packages_list if not x.endswith(
('.egg-info', '.dist-info', '.pth', '__pycache__', '.pyc'))])
def scripts_filter(scripts):
'''
Removes .pyc files and __pycache__ from scripts
'''
return [x for x in scripts if not x.split('.')[-1] == 'pyc' and
not x == '__pycache__']
class DirsContent(object):
'''
Object to store and compare directory content before and
after instalation of package.
'''
def __init__(self, bindir=None, lib_sitepackages=None):
self.bindir = bindir
self.lib_sitepackages = lib_sitepackages
def fill(self, path):
'''
Scans content of directories
'''
self.bindir = set(os.listdir(path + 'bin/'))
self.lib_sitepackages = set(os.listdir(glob.glob(
path + 'lib/python*.*/site-packages/')[0]))
def __sub__(self, other):
'''
Makes differance of DirsContents objects attributes
'''
if any([self.bindir is None, self.lib_sitepackages is None,
other.bindir is None, other.lib_sitepackages is None]):
raise ValueError("Some of the attributes is uninicialized")
result = DirsContent(
self.bindir - other.bindir,
self.lib_sitepackages - other.lib_sitepackages)
return result
class VirtualEnv(object):
def __init__(self, name, temp_dir, name_convertor,
base_python_version):
self.name = name
self.temp_dir = temp_dir
self.name_convertor = name_convertor
if not base_python_version:
base_python_version = DEFAULT_PYTHON_VERSION
python_version = 'python' + base_python_version
self.env = VirtualEnvironment(temp_dir + '/venv',
python=python_version)
try:
self.env.open_or_create()
except (ve.VirtualenvCreationException,
ve.VirtualenvReadonlyException):
raise VirtualenvFailException('Failed to create virtualenv')
self.dirs_before_install = DirsContent()
self.dirs_after_install = DirsContent()
self.dirs_before_install.fill(temp_dir + '/venv/')
self.data = {}
def install_package_to_venv(self):
'''
Installs package given as first argument to virtualenv without
dependencies
'''
try:
self.env.install((self.name,), force=True,
options=["--no-deps"])
except (ve.PackageInstallationException,
ve.VirtualenvReadonlyException):
raise VirtualenvFailException(
'Failed to install package to virtualenv')
self.dirs_after_install.fill(self.temp_dir + '/venv/')
def get_dirs_differance(self):
'''
Makes final versions of site_packages and scripts using DirsContent
sub method and filters
'''
try:
diff = self.dirs_after_install - self.dirs_before_install
except ValueError:
raise VirtualenvFailException(
"Some of the DirsContent attributes is uninicialized")
self.data['has_pth'] = \
any([x for x in diff.lib_sitepackages if x.endswith('.pth')])
site_packages = site_packages_filter(diff.lib_sitepackages)
self.data['packages'] = sorted(
[p for p in site_packages if not p.endswith(MODULE_SUFFIXES)])
self.data['py_modules'] = sorted(set(
[os.path.splitext(m)[0] for m in site_packages - set(
self.data['packages'])]))
self.data['scripts'] = scripts_filter(sorted(diff.bindir))
logger.debug('Data from files differance in virtualenv:')
logger.debug(pprint.pformat(self.data))
@property
def get_venv_data(self):
self.install_package_to_venv()
self.get_dirs_differance()
return self.data
|
8869541d2b90a4f88597c0dba212d5c860c42abc
|
8355bc4e1ad1a863124c1d80d4a00b28ef587b48
|
/src/probnum/randprocs/covfuncs/_covariance_linear_operator.py
|
6d00de209d65a550c962a6095bb568c5f74c9d68
|
[
"MIT"
] |
permissive
|
probabilistic-numerics/probnum
|
af62f04253a08da71174e5c1b7d733deb1914eee
|
af410278783069542610d16b10ba12d2940a05a6
|
refs/heads/main
| 2023-08-31T05:12:08.877238
| 2023-06-19T20:34:15
| 2023-06-19T20:34:15
| 218,856,084
| 384
| 56
|
MIT
| 2023-09-10T18:52:24
| 2019-10-31T20:29:11
|
Python
|
UTF-8
|
Python
| false
| false
| 3,347
|
py
|
_covariance_linear_operator.py
|
"""LinearOperator that represents pairwise covariances of evaluations."""
from typing import Callable, Optional
import warnings
import numpy as np
from probnum import linops
from probnum.typing import ShapeType
_USE_KEOPS = True
try:
from pykeops.numpy import LazyTensor
except ImportError: # pragma: no cover
_USE_KEOPS = False
warnings.warn(
"KeOps is not installed and currently unavailable for Windows."
"This may prevent scaling to large datasets."
)
class CovarianceLinearOperator(linops.LinearOperator):
""":class:`~probnum.linops.LinearOperator` representing the pairwise
covariances of evaluations of :math:`f_0` and :math:`f_1` at the given input
points.
Supports both KeOps-based and standard implementations, but will prefer KeOps-based
implementations by default.
Parameters
----------
x0
*shape=* ``(prod(batch_shape_0),) +`` :attr:`input_shape_0` -- (Batch of)
input(s) for the first argument of the :class:`CovarianceFunction`.
x1
*shape=* ``(prod(batch_shape_1),) +`` :attr:`input_shape_1` -- (Batch of)
input(s) for the second argument of the :class:`CovarianceFunction`.
Can also be set to :data:`None`, in which case the function will behave as
if ``x1 == x0`` (potentially using a more efficient implementation for this
particular case).
shape
Shape of the linear operator.
evaluate_dense_matrix
Callable for the standard implementation that evaluates k(x0, x1) densely.
keops_lazy_tensor
:class:`~pykeops.numpy.LazyTensor` representing the covariance matrix
corresponding to the given batches of input points.
"""
def __init__(
self,
x0: np.ndarray,
x1: Optional[np.ndarray],
shape: ShapeType,
evaluate_dense_matrix: Callable[[np.ndarray, Optional[np.ndarray]], np.ndarray],
keops_lazy_tensor: Optional["LazyTensor"] = None,
):
self._x0 = x0
self._x1 = x1
self._evaluate_dense_matrix = evaluate_dense_matrix
self._keops_lazy_tensor = keops_lazy_tensor
self._use_keops = _USE_KEOPS and self._keops_lazy_tensor is not None
dtype = np.promote_types(x0.dtype, x1.dtype) if x1 is not None else x0.dtype
super().__init__(shape, dtype)
@property
def keops_lazy_tensor(self) -> Optional["LazyTensor"]:
""":class:`~pykeops.numpy.LazyTensor` representing the covariance matrix
corresponding to the given batches of input points.
When not using KeOps, this is set to :data:`None`.
"""
return self._keops_lazy_tensor
def _todense(self) -> np.ndarray:
return self._evaluate_dense_matrix(self._x0, self._x1)
def _matmul(self, x: np.ndarray) -> np.ndarray:
if self._use_keops:
return self.keops_lazy_tensor @ x
return self.todense() @ x
def _transpose(self) -> linops.LinearOperator:
return CovarianceLinearOperator(
self._x0,
self._x1,
(self.shape[1], self.shape[0]),
lambda x0, x1: self._evaluate_dense_matrix(x0, x1).T,
self._keops_lazy_tensor.T if self._keops_lazy_tensor is not None else None,
)
|
995a87d8923ab2779a4d782ba38fcfd1aebdd75d
|
6946f9a3e9d57b00ea275b2303ced0dedcdba1d4
|
/qf_lib/backtesting/events/event_manager.py
|
89e6278757fddd6f748a757a43ad78a8fef85a56
|
[
"Apache-2.0"
] |
permissive
|
quarkfin/qf-lib
|
8eaf76e3db385295ff8845b3250ba64a6fcfc7a6
|
f707e51bc2ff45f6e46dcdd24d59d83ce7dc4f94
|
refs/heads/master
| 2023-08-31T17:41:57.213680
| 2023-08-29T10:01:49
| 2023-08-29T10:01:49
| 202,696,503
| 379
| 51
|
Apache-2.0
| 2023-09-05T06:11:35
| 2019-08-16T09:10:20
|
Python
|
UTF-8
|
Python
| false
| false
| 4,979
|
py
|
event_manager.py
|
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import queue
import warnings
from typing import Dict, Type, Sequence
from qf_lib.backtesting.events.empty_queue_event.empty_queue_event import EmptyQueueEvent
from qf_lib.backtesting.events.end_trading_event.end_trading_event import EndTradingEvent
from qf_lib.backtesting.events.event_base import Event, EventNotifier, EventListener
from qf_lib.backtesting.events.time_event.time_event import TimeEvent
from qf_lib.common.utils.dateutils.timer import Timer
from qf_lib.common.utils.logging.qf_parent_logger import qf_logger
_EventType = Type[Event]
class EventManager:
"""
Class which takes the event and passes it to all handlers which are "interested" in this type of event.
Handlers can subscribe for the given event type, so that they will be notified each time when the event
of this type occurs.
"""
def __init__(self, timer: Timer) -> None:
self.logger = qf_logger.getChild(self.__class__.__name__)
self.events_queue = queue.Queue() # type: queue.Queue
self.timer = timer
self.continue_trading = True
"""
True if trading shall be continued (e.g. the backtest shall go on). False otherwise (e.g. no more data available
for the backtest or the user terminated the backtest).
"""
self._events_to_notifiers = {} # type: Dict[_EventType, EventNotifier]
"""
Mapping: event type to a corresponding notifier.
"""
def register_notifiers(self, notifiers_list: Sequence[EventNotifier]):
"""
Registers every notifier from the list of notifiers and associates them with certain types of events (defined
by EventNotifier.events_type()). After being registered notifier can have listeners added to it.
Next whenever event of type associated with the notifier occurs, event manager will tell the notifier
to notify all its listeners.
"""
for notifier in notifiers_list:
self._events_to_notifiers[notifier.events_type()] = notifier
def subscribe(self, event_type: _EventType, listener: EventListener):
"""
Whenever the event of type event_type occurs, the listener should be notified.
DEPRECATED
"""
notifier = self._events_to_notifiers[event_type]
notifier_type_name = str(type(notifier))
warnings.warn(
"EventManager.subscribe() is deprecated. Please use the {:s}.subscribe().".format(notifier_type_name),
DeprecationWarning
)
notifier.subscribe(listener)
def unsubscribe(self, event_type: _EventType, listener: EventListener):
"""
Stop notifications of events' occurrences of type event_type.
DEPRECATED
"""
notifier = self._events_to_notifiers[event_type]
notifier_type_name = str(type(notifier))
warnings.warn(
"EventManager.unsubscribe() is deprecated. Please use the {:s}.unsubscribe().".format(notifier_type_name),
DeprecationWarning
)
notifier.unsubscribe(listener)
def publish(self, event: Event):
"""
Puts a new event in the event queue.
"""
self.events_queue.put(event)
def dispatch_next_event(self) -> None:
"""
Takes next event from the events' queue and notifies proper handlers.
Returns
-------
the dispatched event
"""
event = self._get_next_event()
self._dispatch_event(event)
def _get_next_event(self):
try:
# this assumes that all components of a backtester are running in the same thread (no multi-threading)
# Checking if queue is empty in this manner is not thread-safe.
event = self.events_queue.get(block=False)
except queue.Empty:
event = EmptyQueueEvent()
return event
def _dispatch_event(self, event: Event):
str_template = 'Dispatching event: {}'.format(event)
self.logger.debug(str_template)
event_type = type(event)
if isinstance(event, TimeEvent):
event_type = TimeEvent
elif event_type == EndTradingEvent:
self.continue_trading = False
notifier = self._events_to_notifiers[event_type]
notifier.notify_all(event)
|
2a0307f2952ba9fa757935df25bd6ad9b3304634
|
3abc1fef99ac6ce0b845a1090fae7f6875fee729
|
/src/ralph/dc_view/urls/ui.py
|
8cd6988ccd7108fbb3652db6e6cef074dd8a35d6
|
[
"Apache-2.0"
] |
permissive
|
allegro/ralph
|
5ff9165a202e836061c99e8af20214e0d651622f
|
b4a72356f527b1f12c7babd7465d2d7fa3ffb0d3
|
refs/heads/ng
| 2023-09-02T01:13:43.672554
| 2023-09-01T09:48:38
| 2023-09-01T09:48:38
| 4,359,038
| 1,970
| 617
|
Apache-2.0
| 2023-09-01T09:44:39
| 2012-05-17T14:04:57
|
Python
|
UTF-8
|
Python
| false
| false
| 320
|
py
|
ui.py
|
from django.conf.urls import url
from ralph.dc_view.views.ui import ServerRoomView, SettingsForAngularView
urlpatterns = [
url(
r'^dc_view/?$',
ServerRoomView.as_view(),
name='dc_view'
),
url(
r'^settings.js$', SettingsForAngularView.as_view(), name='settings-js',
),
]
|
6857d42299697f316af4b3f4dbc62b609ba2d814
|
5eb1db74d541708ba07fe24057b15279569c6fae
|
/bostonkeyparty-2016/reverse/Alewife/sudhackar/05223a3cae8b71d81592d5977fb1c3622bcbf793.py
|
3841d9d24345b6fe7a135aad5d1410fe9721a40c
|
[] |
no_license
|
ByteBandits/writeups
|
4a66c62ce8a311211df8af6e6e6d0d3f6b34665e
|
3fbdab90224cfe76cf98bcab081582f06d6deadb
|
refs/heads/master
| 2021-12-13T13:55:58.855221
| 2021-12-06T06:32:21
| 2021-12-06T06:32:21
| 43,562,256
| 128
| 37
| null | 2016-07-03T05:54:48
| 2015-10-02T15:56:36
|
C
|
UTF-8
|
Python
| false
| false
| 69,548
|
py
|
05223a3cae8b71d81592d5977fb1c3622bcbf793.py
|
primes=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29 , 31, 37, 41, 43, 47, 53, 59, 61, 67, 71 , 73, 79, 83, 89, 97,101,103,107,109,113 ,127,131,137,139,149,151,157,163,167,173 ,179,181,191,193,197,199,211,223,227,229 ,233,239,241,251,257,263,269,271,277,281 ,283,293,307,311,313,317,331,337,347,349 ,353,359,367,373,379,383,389,397,401,409 ,419,421,431,433,439,443,449,457,461,463 ,467,479,487,491,499,503,509,521,523,541 ,547,557,563,569,571,577,587,593,599,601 ,607,613,617,619,631,641,643,647,653,659 ,661,673,677,683,691,701,709,719,727,733 ,739,743,751,757,761,769,773,787,797,809 ,811,821,823,827,829,839,853,857,859,863 ,877,881,883,887,907,911,919,929,937,941 ,947,953,967,971,977,983,991,997,1009,1013,1019,1021]
program=[1009,1994,1009,437683,1009,2991,1009,441671,1009,4985,1009,447653,1009,6979,1009,455629,1009,10967,1009,459617,1009,12961,1009,461611,1009,16949,1009,465599,1009,18943,1009,477563,1009,22931,1009,485539,1009,28913,1009,489527,1009,30907,1009,497503,1009,36889,1009,501491,1009,40877,1009,507473,1009,42871,1009,519437,1009,46859,1009,521431,1009,52841,1009,539377,1009,58823,1009,545359,1009,60817,1009,555329,1009,66799,1009,561311,1009,70787,1009,567293,1009,72781,1009,569287,1009,78763,1009,575269,1009,82751,1009,585239,1009,88733,1009,591221,1009,96709,1009,597203,1009,100697,1009,599197,1009,102691,1009,605179,1009,106679,1009,611161,1009,108673,1009,615149,1009,112661,1009,617143,1009,126619,1009,629107,1009,130607,1009,639077,1009,136589,1009,641071,1009,138583,1009,645059,1009,148553,1009,651041,1009,150547,1009,657023,1009,156529,1009,659017,1009,162511,1009,670981,1009,166499,1009,674969,1009,172481,1009,680951,1009,178463,1009,688927,1009,180457,1009,698897,1009,190427,1009,706873,1009,192421,1009,716843,1009,196409,1009,724819,1009,198403,1009,730801,1009,210367,1009,736783,1009,222331,1009,740771,1009,226319,1009,748747,1009,228313,1009,754729,1009,232301,1009,758717,1009,238283,1009,766693,1009,240277,1009,770681,1009,250247,1009,784639,1009,256229,1009,794609,1009,262211,1009,806573,1009,268193,1009,808567,1009,270187,1009,818537,1009,276169,1009,820531,1009,280157,1009,824519,1009,282151,1009,826513,1009,292121,1009,836483,1009,306079,1009,850441,1009,310067,1009,854429,1009,312061,1009,856423,1009,316049,1009,860411,1009,330007,1009,874369,1009,335989,1009,878357,1009,345959,1009,880351,1009,347953,1009,884339,1009,351941,1009,904279,1009,357923,1009,908267,1009,365899,1009,916243,1009,371881,1009,926213,1009,377863,1009,934189,1009,381851,1009,938177,1009,387833,1009,944159,1009,395809,1009,950141,1009,399797,1009,964099,1009,407773,1009,968087,1009,417743,1009,974069,1009,419737,1009,980051,1009,429707,1009,988027,1009,431701,997,1009,628342186020279030208477196652234251466546172127229920271442149116418270266805114129323801879492482137898525457610034761318243685701612999753301446499402184019047334363868952491996606714667813682795710496729144356627735845123676166105603998123636223487640503561426022924983170637012661801780667366986496506030776535567526896671098662632123365168743664803047333533266651378724651134747558720373671125128918192606546275484822315302537833017347515852873969123863196991591128905797797953641849823901200591253758989661116185809449387114754110693862270878790685521494032855174559448217678703819332109924602615048084195117590112536399286371826740412069584415578436175604905487388386875110557418165843986191168930994241095051887359640391104091658653518621901534663248053930033780121015835031607604047633957687509170016778799461147891167654749638057549827992932889122995155539900942798016011513799665913318026811781018347609393399492693089473128154346668249800406988857586278307546023464942900979079319193897231953014331315745983540688409217523781284767071786748835334904114251151768066947066974655638700742353772603987770915012114230380632982060506677995390598274839813186589487880327769882355406369328704667144104785532210079866539787033200765829359931127219031553817186725838729517291842565585345287913859037622248318083084610913049915914358110409209100852502593148685289707303219729372851454213282755549475037954209582986949880668930047153621586488494270546617243702586105425080170041634818438278320529852112847863734593944644022898353545446882029646417103689943919192691294079623058308937404741503543204905870498980563398747865075941095954326295793595346961572874520213826034401809759600671385592618044358788220764179037648551723688883462655964529249402109637617708231973919514209153002131026182417912198411370401495384651131774509722038523496382808921942374492604612687129063485332311030348703446367201242825533393540915424705051574412572120162877997562692412185247999611773833311632900771473429755915441182540269617877214830627528032509574263455045777758312790203647404347503161728097958892560903112974915627570659454527665768438056409725068410760146580760573561825743041360562327507023011036093343294488329397226160195885463366678787014633928020875226483619664858890687676379225725094561398719056931046049652997474332655225198880371911150572119222293804816869964938939569929273415161422698066449231388944785154171990521448471715053090799940542872248125377623217571815993265916187470537162268436325773823823025764997980088398890569841898462161853267169228882950197460075182837289919940982931539635781819167833327053366074466169096663110752528063831392419664656628996209161385538062512860770349218644155662924022242464260618852724359227862586009757511987837154780681347585339557128064674324666633148721900366457274096873746373036267539915146015258255209166409983804123119669569892997296082665906047894529706757974988307364081640652759420620606279591460036311764255114156837496511088449627305129870293650489346534475746640218520980739890581556540008279019229663760636485837245208764597357678419738833265275277731038701789683747837440024384633103957164283508891650623445519394459252271959449686279179444136374415706603701561041515600084286014592770962766546924881353153028839585503598573550964862568899783340905087121767189848319028823764126564635152182745746588976064614585513882178320384662630419491983556919743035918979272357120571425297548004511661675273618557126654548778343459193970691487722304790664607581194061709590959173879314951330697843870052828316024354166567082187518547658698577696795531130565515421083180252264454883596870044949725930149179966847413679961101162747165984569728196830920577134702664306366364197595517421138209600525104047202300339400088872288316077221656240102231143786257343020802118922926269205047219736723123445530507690137692235730009368553776231094320766355040830168009092017813288396850664476916182018546812944453888512931302160127019704913501725813307841245312731953948196722380163601568492780144962109057602369074555334991621987390374358769710617552391326453785020526405147640026146312997938788804138998232289707025657098750218408744926601492274136339769211112547916354899730071425547372588105403900685731200370521951579349841013973635462586811702994385448941952099615457769200940648360138128946530932948358590141678294540263461985603817191898095061752838756758169435695558894946752558724707225186802024978616855725317670898178008742128975014258782925817349723286317727283132139495271815132726481668557882676887050885378442098538933512778731774361164801103687075162670233525972446022073437141634190995911089958638936702125877258733854190658955211486896795807799247056213685696237168010636560453178295430632262922179196184338010387026042081248735157060332183087380384908870579342268028791502144475517809808520966859616554102946100611417937595977703395161367239525150406899496101767146677522362467244175944639777276553647414851795784699778438111349113652477278343467027323819397269455041625832813112347020033822455376729028165116279358528229084987521170918228041096904599804373545811652965962629163714889427307592378384787948608392935161760512373031943294253135653945376467534645062128214176859177671917924470733170543663095164294078188577644263829357684653992808048115290898706559292435665031983147770618047696164278180114571883909560849919923071788171847093498306550580036114960340160679659003480666374351890273587413582104233093778316851445659944947034432138454639433320133679779403332875832288448336528056715331939264128997610951123085318980941191657450923186786281447594211168439296167068599875424211947381124503574287428653464197183586766917064698969607222676938892700337244499023740021533322407975632591913985176741704511237708175586215484912007730703539347900914569981861379239832893109915816827892646998809604355655684309972455745534857625387871413240766757912260561276872396077870424036155192914575494839379065148798577492895218102356645374808828351190560242930341412752377938339484272418298446253783645567893675139638014454360329933950740656010948440208956227962611495754650589391649847173952559561706238119700416870351183225680846641395695009002342404433610812356666485300887852999615689594544734258696785715335953720841080346734273745462225722854716944440612200907986986761974645226700321349237318729650027840661849263165071729001132412715683532938282372956492101123225224688540919293418074389951077117059370386174729181602053802620109858124680047124580359410149534093848987322604588465223269320290130009628419324527856018953957600035078765304742008540572117322791116024555796432461153651126300748828389259598990206877366444955852954604969428779178082528034059004222296475937635225715400197410351141529356047922229074568209997829979346838932511651507196184691420815697818582222639999405831514378102612043068259534775279462337493896484375000000000000000000000000000000000000000000000000000000000000000000000000000000,997,5020973558043690795472466436617612912981186347488363636494918897580345094619354831599150783154458643768824774086684855736649234567349871237720799314877192413924062850078649636850002055053944829186672671708163716322615209049354719596052941417213532076312584086034682854165737646092676148538845676199745010081171006469722614059402250242308785689918241284131648855341488804135227553569275395839797208481228297968911162075439845594462320763237357071166784713446778736264921216635219120279353441051381445681864657026276821922514536569544725327622938331970575312038054498527495497627987060787647164378259819498672242544076388147523885345280342831806880870923391335094231290063505894883622740743763068210341558532266730092057471805710970239830698232092553019009302257534520260356188869212033343552453220921330349478151838282759011653076078016714211215874482198987741235273840751410610227050746299055028652883630584960936757395646616847713407087225242767250411952408673845861387774237778205037416623206011306490376976238654472840823151891585075016585390922891098308045393593571415253634799881795632166478595261694471140226795538975637348108530604584641278570409344858021708185167591243455235623668321570654443319286878417340676933354064642723586253821998813504508098460362918587904912942418728303547539706696690550986778207008561820335924406226344605013700816659534219628975931010285852931303562594784807047348570737396800467230532753148948447635460563997386651374238754668022458990241654392066026389403056595070158613582087349737094737658808886315648062674551872063472621765109045797432205715733767132011793304312274933554077784748964538523889920306652843830639205498914392896052991230020283366189067754757119427529149351327101078032307908260603623126125800088927147739506163527183895257938715531016954102699245294031066253590763425350011991169491433278653398617395440744762010472263802586218941241055055743015914720823242262689467262506542601425806024500357983702028225857710867057862345336237222517523041907324188469202303664776657176177623265573649232807583121306909916426816016755317491385615404780845499430052098612881460431944822161954276179349694442740819075713633944412726802575579188952121635648787756123395044724581329422707259129800369169535652642639801159987649031332406466937818074557103914266685904770137348134689897321542175254693720185576746443281607586987415388428553993232704096407666570219410589863873312494539460705640975599199448114612634267109102407317730904132567388335904394477797328315856196208327128937370947369504646584356343462801435206697336876183024095674551448335131879630403438713907124638478413853269148185567784954639444384365569717712309310955147830166043443238005822635747057545664360288684594697328877448799025973804079099439380682972847878899045536018223183365782765974974091131423937943531681735723827404656800066615668567805386492162335823765656414065284523264752292185725090814264699334921326855584899083371098784162003309739906310772506255393733874274017314112532904577039191591007997878380622798755534627692925758417448816637818373973221551606967096451086316437822279771077272376148121571163290736638931514234910318028624294165025145259063674698090046911948075952284431571943853982442651363186672689212765267770285908709835564485153350230618682629074091170797310328555429502555362409867954701387470139759267436167728204441560324447484705204937156843869157813675957725786241495777896024839838398414785265343398641920748711625568412042688914128968190201059965530908706732754193849067931364670270274047648481440681370096814642988815043774848838838626624938922121096710544680023288597319646570406408354982897201465676769575294628096694930977248824011077321485418818924406431852258175057513393216350344049082542468361410424350720060469598506634455467477484979788747749796354609315723688211379376475460277770530264522516753753687913348610617627180044792600195906279194933087464933773782907713330404447333076666728339988449815215434318007623282986066831788891581783579614726180929544276009541965962239461776447541627074351182275490323828208628197483439346482508248446963369489796326385731064053724062808612872776418445167164620078205714184802118485145759294486622205254314132962840493288591203480571662819511051274785273705020148845879775227997407048971388002158100936213131756838746879763532259763345808459116288029766085022119913164248480557500068314593231088987160279569530688677315004982622289441794353406429725797216878712063025668632142903490923190360349140006080712281827399665057312208935502000825739775748382409547767113303970336836210669133926895235962501629552040586816345650814016614025782130788916609032745001899785992601494105680942665185307812466814213377653431289952318114685582720572586088812989364579521550555538171289271181678410758350157911033362222192696775165760690677062098520936755788225598782659548261103482659701827359313630846363889110910616705120073617014817614569968408898086698115057255739332773912661929818195363021921324903304920137570439912113419361155599656792846430129297557180197243737754764342250277820536833873850512952381832218723730563316514909646802035262898179819524477957536959221311168489498330910004725990255589480473575235173541252809932701389183846177964235287033830925857819001160586184156930429524672516539501165080375924833291345560537459192125694725922302068041992414208055805065909773572571976130403681164189313519334650691350894602614796418772828939871059035015115580766571764509536576557130464695399374994550899074089674240089589432866719236464202233403294885582612550745275632442296172192623564576641486142828317213702576891904379598321939228709403256536312047969623685194330246456663079749586499084220375579316652507872292946875012466482824470432774707250524489619450216896375750616444251664515756466570884810083576309586519141251515118124071845062312157097256288192961806792066900270500521144047770243562264487965415262662776051613168034656779642758114200039168472440692230333077279334828068865916779206390832407497613723856853959754467657674563538356200113237531415811715639082774172579453150559544048837197565500009080784265163136751719074454322189938394735894637025740187989314178929192969601790353384766140549815636197188798439691133630864251539048445431466504628211935886080852752935630094058548912025946009818396075472885206608300233436748206662616807944144796532094594620433469104447555629020079624892194386879122958004893129494842096402682811580537753137281234266545777016470955138841847355634423380012680312487848379430425081882125872830213363022475922799999654579438370854572957242484172090786677950121432476319475502042779427036319762004097420400844853692174042251171285933661004386621015926998839941599937383368984020896260270592470934269224540700618758089646022331731630333468362387499863220719689333769720594951447656934220607389756444636956576624383813057790571838503297531863499031792359610404317936111291681774481991887238261842808901836334223213707711107364485905291532705821130807123125454420222217016446133202332841807820589616482431348754917145101606467632094604680353363557871712551081392926909985237301152861238448196740016300218475973366025567832749173016062387042861194087419952458165039784986962997512974528829545248332349853718708838814455644705761832485931700173130071897488979237327246760914209146081519385108482784142154660288373455889906583829801152124158506717738674619195960806284400917112864093439783694796630478012378864676928266821333294750308467294460965175361532830951966724506606586746876954737693583126883851147396217944080036427834332965580301642572700340135310646747719129141401437460780643498914539005330191345728255970110381137385266964222518724087095866095862614718716057244839384375250846048030470719467935727917893539564429681478585454577297831321858756129328359401791440618517519405327157366260531819490852031046823010869408062657846696435219976461406268554894567770971764444584971135801033696477178443244408796389792415991584094216896934420766505635365939320304110028135204359699782309572808211604994257856468898963160670508388545426450891965355149110111299200922903858711721574257678665591805489566917051248264400361657973441314647767689458266932904273127096255063678875054002411113587425151644025362144946685722667666911552246977486100064975984657109331523076430943145383842843621837003905509584456012817099690437316894531250000000000000000000000000000000000000000000000000000000000000000000,36245651685809845304492628113575794772175397649419099113249877307900325591212664159525112257976186262177576925165788428184727502868295212091525088936946409650133956767734324927766246011976989507374907377126249628035718622112298251507273187,997,1013,1019,1021,997,76993463136706695564099584,448219,38496731568353347782049792,997,2038,997,2550715459588113191740567853821480131,452303,850238486529371063913522617940493377,997,3057,997,823141644718296094427700641815714988069885293953120708465576171875,458429,164628328943659218885540128363142997613977058790624141693115234375,997,5095,997,631438627444550736415080642521598404068584545931170399811149426440245178848956151040482280551968904085414619,466597,90205518206364390916440091788799772009797792275881485687307060920035025549850878720068897221709843440773517,997,7133,997,5073109956636842437922137917951595815016016809,470681,461191814239712948902012537995599619546910619,997,11209,997,438913347985031337591501224029935452069274872286254190655683643165087965696023,472723,33762565229617795199346248002302727082251913252788783896591049474237535822771,997,13247,997,1327219005770146547008686156809262336252304098712358296185474497662899493331386682893480798625505521976657389795886656059621285172228153327439765792208091,476807,78071706221773326294628597459368372720723770512491664481498499862523499607728628405498870507382677763332787635052156238801252068954597254555280340718123,997,17323,997,1610805172267842357617095639683797092166770721,489059,84779219593044334611426086299147215377198459,997,19361,997,2139592160996833904444519196048266285572585258708462244562014613878578784718122611950631792346783335492062143279594557904625677746393365345309654855942839875030957,497227,93025746130297126280196486784707229807503706900367923676609331038199077596440113563070947493338405890959223620851937300201116423756233275883028471997514777175259,997,23437,997,3630479287668869549750268730991097182488777024603224804293163934532866119354925487660843942045312796152147554107969601026775451735159578840647282686001322831,501311,125188940954098949991388576930727489051337138779421544975626342570098831701893982333132549725700441246625777727861020725061222473626192373815423540896597339,997,29551,997,133988324106187784280288196620247367470528903233752384280195808358192193530364030141478616319286544825474620032432885193545948964662015331837905297972451259,509479,4322204003425412396138328923233786047436416233346851105812768011554586888076259036821890849009243381466923226852673715920837063376194042962513074128143589,997,31589,997,5738931991401785278290295421593851207075494906229631943,513563,155106270037886088602440416799833816407445808276476539,997,37703,997,34852745127404453271355467204505037583904157560232023595243138842012775445916534653451148210482054873983793399474308536689963560530019612055489851743503416053101848051770672331302987621294126588219,519689,850066954326937884667206517183049697168394086834927404762027776634457937705281333011003614889806216438629107304251427724145452695854136879402191505939107708612240196384650544665926527348637233859,997,41779,997,12248497864385846776394397892390056468417217564487186546243452016977586379266627254227299054926635541031409485825456387995220131626108414165879951336719480556031331,531941,284848787543856901776613904474187359730632966615981082470777953883199683238758773354123233835503152117009522926173404371981863526188567771299533752016732105954217,997,43817,997,1009515176242195759436333727151660084423170233448167305610109739888867033605693921607825857499212362037761794631882229647666556510526199774621660504072978457006363045937600675597892161507028576725666537473371,533983,21479046303025441690134760152162980519641919860599304374683185955082277310759445140592039521259837490165144566635792120163118223628217016481311925618574009723539639275268099480806216202277203760120564627093,997,47893,997,398646830977963183322447766593140434683576908048526864634254825288673604304004967,552361,7521638320338927987215995596096989333652394491481638955363298590352332156679339,997,54007,997,27944128401974460395330123754686275388009856688602196578125401,558487,473629294948719667717459724655699582847624689637325365730939,997,60121,997,77940803786893016647539322934964966025718379100488200464157090690974981385586170948362089960785846708669816939830504157041168608009049605052660003972843137251140280478249968574051330621847204291870411456652836799,568697,1277718094867098633566218408769917475831448837712921319084542470343852153862068376202657212471899126371636343275909904213789649311623764017256721376603985856576070171774589648754939846259790234292957564863161259,997,62159,997,557222273128694516674162426702913777225589943389658106186243857297684547349274454742677626685673955962922402597754341425208664507439159276967833017800342899319139904259010271667792380324047918033,574823,8316750345204395771256155622431548913814775274472509047555878467129620109690663503622054129636924715864513471608273752615054694140882974283101985340303625362972237377000153308474513139164894299,997,68273,997,17440508702575850052896911458212242773207097595049949521684725971965638090857668453673550447629042635774807276483992681915852975891558766105215085808976020372544754224990747638780047860989301211703499,580949,245640967641913381026717062791721729200099966127464077770207408055854057617713640192585217572240037123588834880056234956561309519599419240918522335337690427782320482042123206180000674098440862136669,997,72349,997,1424380298289275705894559600755711170503199000303179135694877603277659422923208851946781214320169642370322910276576643316546509039955592318643318520069355214503644382280559886430362113741588407066504267684585657137741659,582991,19512058880675009669788487681585084527441082195933960762943528812022731820865874684202482387947529347538670003788721141322554918355556059159497513973552811157584169620281642279867974160843676809130195447734050097777283,997,74387,997,426450076438797242652822176973974841347834224968515749390409652341,589117,5398102233402496742440787050303479004402964873019186701144425979,997,80501,997,824106655669782210425565987496491762126702214637601168400484229048639977197249286980373617053059742599416312068799210836902298656937345312575927639796580262327402995651567079800656463080089983019,599327,9928995851443159161753807078270985085863882104067483956632340109019758761412642011811730325940478826498991711672279648637377092252257172440673826985500967015992807176524904575911523651567349193,997,84577,997,4944528468545255770865574380114221190998471661157926844550406264799390854466449317327746143102114005846689827694830766828943828956818930311295208977848091145573604349436839941159175394504050467760661318694531,605453,55556499646575907537815442473193496528072715293909290388206811964038099488387070981210630821372067481423481210054278278976896954570999216980845044694922372422175329768953257765833431398921915368097318187579,997,90691,997,514996339419582002586289324658434030355366668863920078962167376692296652132613308797333141596921224150313108804994586497792307478786701439693069492499380684727657941859765639397142855552266467039291,611579,5309240612573010335941127058334371446962542977978557515073890481363883011676425863890032387597126022168176379432933881420539252358625788038072881365972996749769669503708924117496318098476973887003,997,98843,997,301442113655254712905909901926689012451481130124254792162323448571207614502495384196126869113454266413263841134002419787734946904753889329810749588333759276172364193230338694508431109559638625045463746364341506749635448119,613621,2984575382725294187187226751749396162885951783408463288735875728427798163391043407882444248648062043695681595386162572155791553512414745839710391963700586892795685081488501925826050589699392327182809369943975314352826219,997,102919,997,2555239879563593045696761114418609456507282255603548227099728781334221193057686271515208071736549649604470137028762071704870151737556338573583097780176553033728339957624547890696818131074248336445137980275782255127376514077,619747,24808154170520320832007389460374849092303711219451924534948823119749720320948410403060272541131549996159904242997690016552137395510255714306632017283267505181828543277908231948512797389070372198496485245395944224537636059,997,104957,997,1012323941142663752943575066252780797696120427731616910828998531998548452296595951381134724249072458579321607158097952811936582645824027310322752213386853835911703333296464759502534737503217216491270980521356531,625873,9460971412548259373304439871521315866318882502164644026439238616808864040155102349356399292047406154946930908019607035625575538746018946825446282368101437718800965731742661303762006892553431929824962434779033,997,109033,997,14538361229487745257682170457856734814405959216469425962052011400576054111276050364279603615018009986387137829571146551766463482651444282213513369961768211142997430772394358145784688162016585046608655019700504896800057220809286471,629957,133379460820988488602588719796850778113816139600636935431669829363083065241064682241097280871724862260432457152028867447398747547260956717555168531759341386633003952040315212346648515247858578409253715777068852264220708447791619,997,111071,997,3375454873554339821014112396646130304474852067498624799546619899489477269929546975713496262833655453356344306236948923050310411808869829632508321207505306583328796000277316597491734113305075324587336522411370280436859,631999,29871282066852564787735507934921507119246478473439157518111680526455551061323424563836250113572172153595967311831406398675313378839556014446976293871728376843617663719268288473378177993850224111392358605410356464043,997,115147,997,39459694765875470141576638079098465499479668623064100064810566131731733407973558410719836295273087266359541647498772639126368333953423358659625851571582603515211711056260787965851889797806599055825412860621841289331451,644251,310706257999019449933674315583452484247871406480819685549689497100249869354122507171022333033646356428027886988179312119105262472074199674485242925760492941064659142175281795006707793683516527998625298115132608577413,997,129413,997,7553129670700810117236177443049339501407393353024497112491720588260921329,654461,57657478402296260436917385061445339705399949259728985591539851818785659,997,133489,997,25980185209260680155983489091150957266592864263389645865369284413013694711280605231725376148215553402565525344770150494341675231231995828510711703056761109302213135579423282202157355856501247492062574715366490096512678476350758824550709012111849700772341340219,656503,189636388388764088729806489716430345011626746448099604856710105204479523440004417749820263855587981040624272589563142294464782709722597288399355496764679629943161573572432716804068290923366770015055289893186059098632689608399699449275248263590143801257965987,997,139603,997,1061284478093134120490574326138962643383576170348591120451178777693851339358901234566104134284107020944540308065951031722956299232212106738300915810230160757429918716369835743989417390909386076858457295662168989239554402068894161149423576941499,660587,7635140130166432521514923209632824772543713455745259859360998400675189491790656363784921829382064898881584950114755623906160426131022350635258387123957991060646897240070760748125304970571122855096815076706251721147873396179094684528227172241,997,141641,997,27806982057853581864459841488908693336336627810182075117867298413541027939991858305143598359129694257228778010677915055505909918139828064754213739020059405463623859463575550295673176217514418739572901818140347404036512846794190316979114076091957059089918247219,666713,186624040656735448754764036838313378096218978591826007502465090023765288187864820839889921873353652733079047051529631245006106833153208488283313684698385271567945365527352686548142122265197441205187260524431861772057133199961008838786000510684275564361867431,997,151831,997,3285953693565530256115800482073279437127404312775462128879985648281136509133852277946366476820598526610405012654942447679756925982034824013822476121812295179872880312347210182423656346523480923472854342890714431110955178199116118028229426669,672839,21761282738844571232554970079955492961108637832950080323708514227027394100224187271167989912719195540466258361953261242912297523059833271614718384912664206489224372929451723062408320175652191546177843330402082325238113762908053761776353819,997,153869,997,622356281582461391123389336535578068400307622361011899125957703826390742157096885215871433085702703260992789059584333983894288210865658524955265536811354609574720641602347188123108545087591929076283949532906136151478757549983,674881,3964052748932875102696747366468650117199411607394980249209921680422870969153483345324021866787915307394858529041938433018434956757106105254492137177142386048246628290460810115433812389092942223415821334604497682493495271019,997,159983,997,10241536787881076649864784083320725924155487294468200096074282642623762885457,687133,62831514036080224845796221370065803215677836162381595681437316825912655739,997,166097,997,11405250480124509891101363855667497861474483259204554342932963047962397866264996748378991721576010916172344867162584304709430613352445633564921189986187763396984414357275984363672100783488186693142363961454827603244723320557,691217,68294913054637783779050082968068849469907085384458409239119539209355675845898184122029890548359346803427214773428648531194195289535602596197132874168789002377152181780095714752527549601725668821211760248232500618231876171,997,170173,997,1944807869360994770207438536559919056634540443061560281988106382931336667032429874652507850233703229371452968943082618826443877218595974482072630868786603739049970890829171640243310329078072773142632259951108763745080055495578703651,697343,11241663984745634509869586916531324026789251115962776196463042675903680156256820084696577168980943522378340860942674097262681371205757077931055669761772275948265727692654171330886186873283657648223307860989067998526474309222998287,997,176287,997,225073418047842319520715088364130442319988885603897716155330438594055372663041,705511,1257393396915320220786117812090114202904965841362557073493466137396957389179,997,182401,997,225329171368509161580472750897026209271177545132054535919092795048175517709558661017404496471571909222764775628194943731479366517164081876622381021735832173151443162628171041881190685097728786249713457178472422049206541511338483211278808636889900059219,715721,1244912548997288185527473761862023255641864890232345502315429806895997335411926303963560753986585133827429699603286981941874953133503214787968955921192442945588083771426359347409893287832755725136538437450123878724898019399660128239109439982817127399,997,184439,997,147948824613896094021559256468208240621247063714817440181805623939619560948705390107983487027464637347879398154756846108483531621445912903031841905046959331289859397842582645928423887704550435180423126450785960262310310693874091438507922975428859476159809606172410845400699,723889,774601175988984785453189824440880840948937506360300733936155099160311837427776911560122968730181347371096325417575110515620584405475983785507025680874132624554237685039699716902742867563091283667136787700450053729373354418188960411036245944653714534868113121321522750789,997,194629,997,3663574983339472396374498535089718440151822588170139197923643104403964824873940016494197681580897911686389150772108064161475879219326417780345224634357375369523896053565948898451794062203069501481019903036711514600073580767056801019,734099,18982253799686385473442997591138437513740013410207975118775352872559403237688808375617604567776673117546057776021285306536144451913608382281581474789416452691833658308631859577470435555456318660523419186718712510881210263041745083,997,196667,997,8455278772776224374271023592698437808269362247194077422320886658621746174281418149174663922086515928205781678321990770649841595335371231946470738240060062353185284927152119985585357359688807996283753901135009827258161244214785267280127016797383,742267,42920196816122966366857987780195115778017067244639986915334450043765209006504660655708953919220893036577571971177618125126099468707468182469394610355634834280128349884020913632412981521262984752709410665659948361716554539161346534416888410139,997,200743,997,7418664063326667174171516407096672529836721739118262523809114802763367797444381,748393,37279718911189282282268926668827500149933275070946042833211632174690290439419,997,202781,997,1216142943931601900840499346326153294372963200662871598602878771751067301282935383481935570679941736286303494921026416345368409318863091781019560429709350064711390142526964314902108955296323216588439774090112698978970772874445246117374500899,754519,5763710634746928440002366570266129357217835074231618950724543941948186262004433097070784695165600645906651634696807660404589617624943562943220665543646208837494740011976134193848857608039446524115828313223282933549624515992631498186609009,997,215009,997,138691792998900981393067649792787848641037326193951782941236989320550933465674492476050781517256816980352563136501783747517774908582709149868844034074488483067697567425768441388229379485083067254522425925225280069810787466572004302011,758603,621936291474892293242455828667210083592095633156734452651286947625788939307957365363456419359896040270639296576241182724294954747007664349187641408405777950976222275451876418781297665852390436118934645404597668474487836172968629157,997,227237,997,2028493490644134181545071400466908110867796623181315854487633734557528690094386218346576853932530189093546742060337625157893636505419736098290811151374445048600018545156783393583253452964962427130653747772754254546889532223197256645401908955067327736231726068875643791579593259,766771,8936094672441119742489301323642767008228178956745884821531426143425236520239586864962893629658723299971571550926597467655919103548104564309651150446583458363876733679104772658957063669449173687800236774329313896682332741071353553503973167202939769763135357131610765601672217,997,231313,997,14149311638818710272508768215683034141538149399907158174569462453878173117322097753573649385838904038871126756217699203940141028311501017698570123066261061120171424198367746633234728713058889449330070324521632921517787014310284981552853646663319503779299,772897,61787387069077337434536105745340760443398032314005057530871015082437437193546278399884931815890410650092256577369865519389262132364633265059258179328650921922145957198112430712815409227331394975240481766470012757719593948953209526431675312940259841831,997,233351,997,30093876104292118234789101271657244880699998710604456683832260298422030702485065967861524326546606571039352053995299041938053973700300262936244544083334881309585916203495310045228392711906230431409629365533390067925197505085591927635071892148491,776981,129158266542026258518408159964194184037339050260104964308292962654171805590064660806272636594620629060254729845473386446086068556653649197151264137696716228796506078126589313498834303484576096272144332040915837201395697446719278659377990953427,997,237427,997,3341433274322410860909358920965655465914609433427753011787584445887163322575233606213353128070580943023650547154816689267633028226655069995528795004468493200031965955545943936872867057651796177238218640454146294226353627831975242231915884119383355513992675163415937324785211381,785149,13980892361181635401294388790651278100061127336517795028399934920029972060984241030181393841299501853655441619894630499027753256178473096215601652738361896234443372198937003919970155052936385678820998495624043072076793421891109800133539264097838307589927511143999737760607579,997,243541,997,4118476469007064308947375538559785902711241611772556924176911678360484298072250699,789233,17089113979282424518453840408961767231166977642209779768368928125977113270009339,997,245579,997,931152388751175201699068624778090289867417543528220592117152773927749269913068388840365617398521258477932276771590394237676949552026467097846301304616397612088831084369673137110207936130665169120295304026669889420982367047795994272488560887275037507805158319176442153344967337522071321821019,803527,3709770473112251799597882967243387609033536029992910725566345712859558844275172863905839113141518958079411461241395992978792627697316601983451399619985647856927613881950888992470947952711813422790021131580358125183196681465322686344575939789940388477311387725802558379860427639530164628769,997,255769,997,2891247412588934131535766310701430184913777993027195144625265514460073835877768281374678458180366942013537958473137407582426122601856395302375298667160312868283179007377451488414685852848881981660077537670642989489234007367886409503052947081175934785149006331,813737,11249989932252661990411542064986109668925206198549397449903756865603400139602211211574624350896369424177190499895476294095043278606445117908075092090117948903825599250495920188383991645326389033696799757473319025249937771859480192618883062572669006946105083,997,261883,997,13899422530678692053654718384395654516426347025655826879567494777451559730600568674546707009339019292739476335367327016017867581118288482087649526209799357787918091759795268071323583783939990378975910915464265365184472642331075137172120703394599859293071808179295648708579999684135452477,825989,52849515325774494500588282830401728199339722531010748591511386986507831675287333363295463913836575257564548803678049490562234148738739475618439263155130637976874873611388851982218949748821256193824756332563746635682405484148574666053690887431938628490767331480211592047832698418765979,997,267997,997,1355699963191169974672204718471466004562253812733568890856366430755543607435448494756142541415471836728113575979434855183498286076476238071951618737682287160425884197494461103908314040707400572892652361779504788601631816417329651376067771467758410992920761631,828031,5039776814837063102870649511046342024394995586370144575674224649648860994183823400580455544295434337279232624458865632652409985414409806958927950697703669741360164302953387003376632121588849713355584988027898842385248388168511715152668295419176249044315099,997,274111,997,197770168183920893490702355213946062548862275593686785036823325489830493680361725109,838241,729779218390852005500746698206443035235654153482239059176469835755832080001334779,997,276149,997,235066939145603730138455797181895332556660643522557763976711397934654296027286248141866352067440614645980785211857345634122543197521067658886043737467928065242170439144153675267262025034856560884161819395341422586089798939047835618577282359011,840283,848617108828894332629804321956300839554731565063385429518813710955430671578650715313596938871626767675020885241362258606940589160725876024859363673169415397986174870556511463058707671605980364202750250524698276484078696530858612341434232343,997,282263,997,653856944817121418564574769158558214241652061694177451283815899696349734329343253379,844367,2326893042053812877454002737219068378084171038057571001010020995360675211136452859,997,286339,997,153750924705191331202642487855432301214999247349347135664932925672884382532572106143629862520851000964392448797347295129760339890447171549018979033727220079019470962832403597856104509630053226079444303822073464068832801004488749074363606794131843019,846409,543289486590782089055273808676439226908124548937622387508596910504891811069159385666536616681452300227535154760944505758870458976845129148476957716350600985934526370432521547194715581731636841270121214918987505543578802136002646905878469237214993,997,288377,997,9247489933034770345008842513605671240539329503796940360077790769034919055860192075961498268156080222753546521642413997960863602513249430650096399945058850657850730509948718278723816846696059730196008820021372108447626975263432296338773003684247622175988091632568335607339591109011826268182783373997524131,856619,31561399088855871484671817452579082732216141651184096792074371225375150361297583877001700573911536596428486421987761085190660759430885428839919453737402220675258465904261837128750228145720340376095593242393761462278590359260861079654515370935998710498252872466103534496039560099016471905060694109206567,997,298567,997,1717407504335870232912715929360339133649245648531977431437710884144242413898244908199676348333630782469611821101441951565524681310844200869852683280497944626558680801715390769378642397671865997499800973267974091300091032739882947247896538134151189417,870913,5594161251908372094178227782932700761072461395869633327158667375062678872632719570682984847992282679054110166454208311288354010784508797621669978112371155135370295771059904786249649503817153086318569945498286942345573396546849991035493609557495731,997,312833,997,186094640094728709905582884915209478720162295193640169865982338425202130855964448777365803096561599373266189003786443004976861963671620462507490499001087000062228617512324435299563374854468112230954656065794720439794736047301775255274615166365555906615082699,874997,598375048536105176545282588151798966945859470076013407929203660531196562237827809573523482625599997984778742777448369790922385735278522387483892279746260450360863721904580177812100883776424798170272205999339937105449312049201849695416769023683459506800909,997,316909,997,99031418963697124674853988324325827852540883371919853044916122582908820976031775150461062479829273919254632432784471230762209126862746456311631012017430778342731130579275835965526802369447102695762351319328603240840216102895901102277717945225511617318692885468391820057855359602494115072883,877039,316394309788169727395699643208708715183836688089200808450211254258494635706171805592527356165588734566308729817202783484863287945248391234222463297180290026654093068943373277845133553895997133213298247026608956041023054641839939623890472668452113793350456503093903578459601787867393338891,997,318947,997,42242760426214852365008492021688249001199142271915160025206075397415289834494590552596465606988123248212551380300285932126774225020692469707080058167842164648746139668051737622910245137506042977114350839236678790426338968454103079402971043834421080175047132836133633496964302426327,881123,133257919325598903359648239816051258678861647545473690931249449203202807048878834550777494028353701098462307193376296315857331940128367412325173685072057301731060377501740497233155347436927580369445901701062078203237662361053952931870571116196911924842419977401052471599256474531,997,323023,997,107307369572290541384475737454740253494018594946523953948261811827034605510928874078489851241855815524617369122814813937716249011703046172889308428655342212025001891566435646387451504158097825535109820621633632481094109611807638752054352547330203174484844484659,895417,324191448858883810829231835210695629891294848781039135795352905821856814232413516853443659340954125452016220914848380476484135987018266383351384980831849583157105412587418871261182792018422433640815168041189221997263171032651476592309222197372215028655119289,997,337289,997,55064612321770155133157362474829199989035428581948598172981940137568403555187622908028564874669701059511946421406182521933816203496055859680958661046865172951343822363678488724509509061180572829585917059572354958143854719027588481590624599078397653942081134186048842079801519289417050168490082593323,899501,163396475732255653214116802595932344181114031400440944133477567173793482359607189638066958085073296912498357333549503032444558467347346764631924810228086566621198285945633497698841273178577367446842483856297789193305206881387502912731823736137678498344454404112904575904455546852869585069703509179,997,343403,997,191910400529505153557224617029107865694106741205348818904526992376067891765008424641623636027152487218384283069648741467693297373871543851103342180022015891721139900840652679154541010152081242584577176398651308152433498225132989871250359174352180752175700889727556239511651,901543,553055909306931278262895149939792120155927208084578728831489891573682685201753385134362063478825611580358164465846517197963393008275342510384271412167192771530662538445684954335853055193317701972844888756920196404707487680498529888329565343954411389555333976160104436633,997,353593,997,195306582131271451009334326954770025184144851578630324452422872021506820732014956379715234978255404349682123138757786675633123282560391857045894094521929176524667838378563095623212129113112952618142325576737520038648102942539205523023679173996788382216711019,905627,559617713843184673379181452592464255541962325440201502729005363958472265707779244641017865267207462320006083492142655231040467858339231682079925772269138041617959422288146405797169424392873789736797494489219255125066197543092279435597934595979336338729831,997,355631,997,1215456615647736981075511688343015609651098104001912463828764256884376773285956483375387,926047,3443219874356195413811647842331488979181581031166890832376102710720614088628771907579,997,359707,997,152371024479309333613791270766813059934644240963510757940065726092042020167346294014239858246488261844181541553866797374541435361748272964733304606225836878830766560242205779764084434055945144673618075979996122754129399688334667194097420399639375481262924139926860301,930131,424431823062142990567663706871345570848591200455461721281520128390089192666702768841893755561248640234488973687651246168639095715176247812627589432383946737690157549421186016055945498763078397419548958161549088451613926708453111961274151531028901062013716267205739,997,365821,997,8805855872789019240710264044493313902829667579523595450483405927948456557973767381593768592699990307656047373848464298739897608488173415985344555327733960422040018571984480783688585363881034183770028635322562755061525965264076157536693656742309428857200200534740552445744340920479346348313632819317,938299,23994157691523213189946223554477694558118985230309524388238163291412688168865851176004819053678447704784870228469929969318522093973224566717560096260855477989209859869167522571358543225833880609727598461369380803982359578376229312089083533357791359283924252138257636091946433025829281603034421851,997,373973,997,1095376316278855322873293779067055170364191817021514439349116525031244403693815779672168678363649208457797649035029957775057822028226826207868697627085868397348111959279609148252080232892263125008811953482298559446432650497666173228960391046465035759461531583942148845722771586537518927520699,948509,2936665727289156361590599943879504478188181815071084287799240013488590894621490025930747126980292784069162597949141977949216681040822590369621173262964794630960085681714769834455979176654860924956600411480693188864430698385164003294799975995884814368529575292070104144028878248089863076463,997,380087,997,15369226844695333062849381477909582790193081087319522766401784914789199098254937701754942361481513813591507178592892064416009512989526432006888411812015707106697003660843933262218400518290304215112593252662042221838402637726323711885254485048250408355850463886893101619739,956677,40552049722151274572161956406093885989955359069444651098685448323982055668218832986160797787550168373592367225838765341467043569893209583131631693435397644080994732614363940005853299520554892388159876656100375255510297197167081033998032942079816380886148981231907919841,997,386201,997,17936335753803465319644606369717202510207061261553117383696431100566559448104053392344197,960761,46831163848050823288889311670279902115423136453141298651948906267797805347530165515259,997,390277,997,25893139746956375074671910466488819057973682757474498176744179853848279269471981476034178206434626829662341282305098421017615074690674938772214414369410376488767243876576772000943323646306806499600311513657727872637967224802501913807127372752863262895742486671,966887,66563341251815874227948355954984110688878361844407450325820513763106116373963962663326936263328089536407046998213620619582558032623843030262761990666864721050815536957780904886743762586906957582520081011973593502925365616458873814414209184454661344204993539,997,396391,997,73974572111317406360276225631919880970967230930546543997873687812509688512760842513788622380060283819337381239289168279398371925148558197982252822440463431991798888852483797550848552471579198228007110388157478970670518327902466795935796129943875075370218141183995374789284599233523734628628605149143767,973013,186333934789212610479285203103072748037700833578202881606734730006321633533402625979316429168917591484477030829443748814605470844202917375270158242923081692674556395094417626072666379021610071103292469491580551563401809390182536009913844156029912028640347962680089105262681610160009407125009080980211,997,404543,997,1307141793874862084461913041302240551365792174026016528279865697359005336719217669177196976468676813204191353984957820150881355004620906305582162121381248966040062853839635204011545101328676009151433346953408816476579132602933740590965014866256568699705065819,987307,3259705221633072529830207085541747010887262279366624758802657599399015802292313389469319143313408511731150508690667880675514601009029691535117611275264960015062500882393105246911583793837097279679384905120720240589972899259186385513628466000639822193778219,997,408619,997,7093242912463185348072070110212638351742462073193796871217834643050267179901187029507018490754437695656607535423037831588564687811978689937562105842348187968983989171429748478114974959055204956755248726089665726321535007236838047456328883300447788907493192538880498815296925888779,991391,17342892206511455618758117628881756361228513626390701396620622599144907530320750683391243253678331774221534316437745309507493124234666723563721530176890435131990193573177869139645415547812237058081292728825588572913288526251437768841879910270043493661352548994817845514173412931,997,416771,997,1185579641965561589160299697265721932824027132750467864460376877428924099444308658448594136390967744065181760700818974092571770178341715224937792646992702816801853703682587972668759794434610387225171807457005618044826070319744092967744160173564727037441446431202779572868249757967428377671066761,997517,2829545684882008566015035077006496259723215113962930464105911401978339139485223528516931113104934950036233319095033351056257208062868055429445805840078049682104662777285412822598472063089762260680600972451087393901732864724926236199866730724498155220623977162775130245508949303024888729525219,997,426961,997,169052738021865017945085217436498678340551974017269410534135929153837234291820715450799841864356228195340732263779,1003643,401550446607755387042957761131825839288722028544582922883933323405789155087460131712113638632675126354728580199,997,428999,997,89600456898162554710940038315160351182309734414129170687610033541862560707475499371198298316876962891405560250165152771675263407396177488042619118232855720276625778667788670057647578107276749358158513983871052535363437572582097937450349496907401800441822855338949217171751568948262972788043043591458773382280560737161315249005363622939,1011811,207889691179031449445336515812437009703734882631390187210232096384831927395534801325286074981153046151753040023585041233585297928993451248358745053904537634052496006189764895725400413241941413824033675136591769223581061653322733033527493032267753597312814049510323009679237978998289960065065066337491353555175314935409084104420797269,997,439189,997,433,1013,1019]
from sympy.ntheory import factorint
fail=628342186020279030208477196652234251466546172127229920271442149116418270266805114129323801879492482137898525457610034761318243685701612999753301446499402184019047334363868952491996606714667813682795710496729144356627735845123676166105603998123636223487640503561426022924983170637012661801780667366986496506030776535567526896671098662632123365168743664803047333533266651378724651134747558720373671125128918192606546275484822315302537833017347515852873969123863196991591128905797797953641849823901200591253758989661116185809449387114754110693862270878790685521494032855174559448217678703819332109924602615048084195117590112536399286371826740412069584415578436175604905487388386875110557418165843986191168930994241095051887359640391104091658653518621901534663248053930033780121015835031607604047633957687509170016778799461147891167654749638057549827992932889122995155539900942798016011513799665913318026811781018347609393399492693089473128154346668249800406988857586278307546023464942900979079319193897231953014331315745983540688409217523781284767071786748835334904114251151768066947066974655638700742353772603987770915012114230380632982060506677995390598274839813186589487880327769882355406369328704667144104785532210079866539787033200765829359931127219031553817186725838729517291842565585345287913859037622248318083084610913049915914358110409209100852502593148685289707303219729372851454213282755549475037954209582986949880668930047153621586488494270546617243702586105425080170041634818438278320529852112847863734593944644022898353545446882029646417103689943919192691294079623058308937404741503543204905870498980563398747865075941095954326295793595346961572874520213826034401809759600671385592618044358788220764179037648551723688883462655964529249402109637617708231973919514209153002131026182417912198411370401495384651131774509722038523496382808921942374492604612687129063485332311030348703446367201242825533393540915424705051574412572120162877997562692412185247999611773833311632900771473429755915441182540269617877214830627528032509574263455045777758312790203647404347503161728097958892560903112974915627570659454527665768438056409725068410760146580760573561825743041360562327507023011036093343294488329397226160195885463366678787014633928020875226483619664858890687676379225725094561398719056931046049652997474332655225198880371911150572119222293804816869964938939569929273415161422698066449231388944785154171990521448471715053090799940542872248125377623217571815993265916187470537162268436325773823823025764997980088398890569841898462161853267169228882950197460075182837289919940982931539635781819167833327053366074466169096663110752528063831392419664656628996209161385538062512860770349218644155662924022242464260618852724359227862586009757511987837154780681347585339557128064674324666633148721900366457274096873746373036267539915146015258255209166409983804123119669569892997296082665906047894529706757974988307364081640652759420620606279591460036311764255114156837496511088449627305129870293650489346534475746640218520980739890581556540008279019229663760636485837245208764597357678419738833265275277731038701789683747837440024384633103957164283508891650623445519394459252271959449686279179444136374415706603701561041515600084286014592770962766546924881353153028839585503598573550964862568899783340905087121767189848319028823764126564635152182745746588976064614585513882178320384662630419491983556919743035918979272357120571425297548004511661675273618557126654548778343459193970691487722304790664607581194061709590959173879314951330697843870052828316024354166567082187518547658698577696795531130565515421083180252264454883596870044949725930149179966847413679961101162747165984569728196830920577134702664306366364197595517421138209600525104047202300339400088872288316077221656240102231143786257343020802118922926269205047219736723123445530507690137692235730009368553776231094320766355040830168009092017813288396850664476916182018546812944453888512931302160127019704913501725813307841245312731953948196722380163601568492780144962109057602369074555334991621987390374358769710617552391326453785020526405147640026146312997938788804138998232289707025657098750218408744926601492274136339769211112547916354899730071425547372588105403900685731200370521951579349841013973635462586811702994385448941952099615457769200940648360138128946530932948358590141678294540263461985603817191898095061752838756758169435695558894946752558724707225186802024978616855725317670898178008742128975014258782925817349723286317727283132139495271815132726481668557882676887050885378442098538933512778731774361164801103687075162670233525972446022073437141634190995911089958638936702125877258733854190658955211486896795807799247056213685696237168010636560453178295430632262922179196184338010387026042081248735157060332183087380384908870579342268028791502144475517809808520966859616554102946100611417937595977703395161367239525150406899496101767146677522362467244175944639777276553647414851795784699778438111349113652477278343467027323819397269455041625832813112347020033822455376729028165116279358528229084987521170918228041096904599804373545811652965962629163714889427307592378384787948608392935161760512373031943294253135653945376467534645062128214176859177671917924470733170543663095164294078188577644263829357684653992808048115290898706559292435665031983147770618047696164278180114571883909560849919923071788171847093498306550580036114960340160679659003480666374351890273587413582104233093778316851445659944947034432138454639433320133679779403332875832288448336528056715331939264128997610951123085318980941191657450923186786281447594211168439296167068599875424211947381124503574287428653464197183586766917064698969607222676938892700337244499023740021533322407975632591913985176741704511237708175586215484912007730703539347900914569981861379239832893109915816827892646998809604355655684309972455745534857625387871413240766757912260561276872396077870424036155192914575494839379065148798577492895218102356645374808828351190560242930341412752377938339484272418298446253783645567893675139638014454360329933950740656010948440208956227962611495754650589391649847173952559561706238119700416870351183225680846641395695009002342404433610812356666485300887852999615689594544734258696785715335953720841080346734273745462225722854716944440612200907986986761974645226700321349237318729650027840661849263165071729001132412715683532938282372956492101123225224688540919293418074389951077117059370386174729181602053802620109858124680047124580359410149534093848987322604588465223269320290130009628419324527856018953957600035078765304742008540572117322791116024555796432461153651126300748828389259598990206877366444955852954604969428779178082528034059004222296475937635225715400197410351141529356047922229074568209997829979346838932511651507196184691420815697818582222639999405831514378102612043068259534775279462337493896484375000000000000000000000000000000000000000000000000000000000000000000000000000000
success=5020973558043690795472466436617612912981186347488363636494918897580345094619354831599150783154458643768824774086684855736649234567349871237720799314877192413924062850078649636850002055053944829186672671708163716322615209049354719596052941417213532076312584086034682854165737646092676148538845676199745010081171006469722614059402250242308785689918241284131648855341488804135227553569275395839797208481228297968911162075439845594462320763237357071166784713446778736264921216635219120279353441051381445681864657026276821922514536569544725327622938331970575312038054498527495497627987060787647164378259819498672242544076388147523885345280342831806880870923391335094231290063505894883622740743763068210341558532266730092057471805710970239830698232092553019009302257534520260356188869212033343552453220921330349478151838282759011653076078016714211215874482198987741235273840751410610227050746299055028652883630584960936757395646616847713407087225242767250411952408673845861387774237778205037416623206011306490376976238654472840823151891585075016585390922891098308045393593571415253634799881795632166478595261694471140226795538975637348108530604584641278570409344858021708185167591243455235623668321570654443319286878417340676933354064642723586253821998813504508098460362918587904912942418728303547539706696690550986778207008561820335924406226344605013700816659534219628975931010285852931303562594784807047348570737396800467230532753148948447635460563997386651374238754668022458990241654392066026389403056595070158613582087349737094737658808886315648062674551872063472621765109045797432205715733767132011793304312274933554077784748964538523889920306652843830639205498914392896052991230020283366189067754757119427529149351327101078032307908260603623126125800088927147739506163527183895257938715531016954102699245294031066253590763425350011991169491433278653398617395440744762010472263802586218941241055055743015914720823242262689467262506542601425806024500357983702028225857710867057862345336237222517523041907324188469202303664776657176177623265573649232807583121306909916426816016755317491385615404780845499430052098612881460431944822161954276179349694442740819075713633944412726802575579188952121635648787756123395044724581329422707259129800369169535652642639801159987649031332406466937818074557103914266685904770137348134689897321542175254693720185576746443281607586987415388428553993232704096407666570219410589863873312494539460705640975599199448114612634267109102407317730904132567388335904394477797328315856196208327128937370947369504646584356343462801435206697336876183024095674551448335131879630403438713907124638478413853269148185567784954639444384365569717712309310955147830166043443238005822635747057545664360288684594697328877448799025973804079099439380682972847878899045536018223183365782765974974091131423937943531681735723827404656800066615668567805386492162335823765656414065284523264752292185725090814264699334921326855584899083371098784162003309739906310772506255393733874274017314112532904577039191591007997878380622798755534627692925758417448816637818373973221551606967096451086316437822279771077272376148121571163290736638931514234910318028624294165025145259063674698090046911948075952284431571943853982442651363186672689212765267770285908709835564485153350230618682629074091170797310328555429502555362409867954701387470139759267436167728204441560324447484705204937156843869157813675957725786241495777896024839838398414785265343398641920748711625568412042688914128968190201059965530908706732754193849067931364670270274047648481440681370096814642988815043774848838838626624938922121096710544680023288597319646570406408354982897201465676769575294628096694930977248824011077321485418818924406431852258175057513393216350344049082542468361410424350720060469598506634455467477484979788747749796354609315723688211379376475460277770530264522516753753687913348610617627180044792600195906279194933087464933773782907713330404447333076666728339988449815215434318007623282986066831788891581783579614726180929544276009541965962239461776447541627074351182275490323828208628197483439346482508248446963369489796326385731064053724062808612872776418445167164620078205714184802118485145759294486622205254314132962840493288591203480571662819511051274785273705020148845879775227997407048971388002158100936213131756838746879763532259763345808459116288029766085022119913164248480557500068314593231088987160279569530688677315004982622289441794353406429725797216878712063025668632142903490923190360349140006080712281827399665057312208935502000825739775748382409547767113303970336836210669133926895235962501629552040586816345650814016614025782130788916609032745001899785992601494105680942665185307812466814213377653431289952318114685582720572586088812989364579521550555538171289271181678410758350157911033362222192696775165760690677062098520936755788225598782659548261103482659701827359313630846363889110910616705120073617014817614569968408898086698115057255739332773912661929818195363021921324903304920137570439912113419361155599656792846430129297557180197243737754764342250277820536833873850512952381832218723730563316514909646802035262898179819524477957536959221311168489498330910004725990255589480473575235173541252809932701389183846177964235287033830925857819001160586184156930429524672516539501165080375924833291345560537459192125694725922302068041992414208055805065909773572571976130403681164189313519334650691350894602614796418772828939871059035015115580766571764509536576557130464695399374994550899074089674240089589432866719236464202233403294885582612550745275632442296172192623564576641486142828317213702576891904379598321939228709403256536312047969623685194330246456663079749586499084220375579316652507872292946875012466482824470432774707250524489619450216896375750616444251664515756466570884810083576309586519141251515118124071845062312157097256288192961806792066900270500521144047770243562264487965415262662776051613168034656779642758114200039168472440692230333077279334828068865916779206390832407497613723856853959754467657674563538356200113237531415811715639082774172579453150559544048837197565500009080784265163136751719074454322189938394735894637025740187989314178929192969601790353384766140549815636197188798439691133630864251539048445431466504628211935886080852752935630094058548912025946009818396075472885206608300233436748206662616807944144796532094594620433469104447555629020079624892194386879122958004893129494842096402682811580537753137281234266545777016470955138841847355634423380012680312487848379430425081882125872830213363022475922799999654579438370854572957242484172090786677950121432476319475502042779427036319762004097420400844853692174042251171285933661004386621015926998839941599937383368984020896260270592470934269224540700618758089646022331731630333468362387499863220719689333769720594951447656934220607389756444636956576624383813057790571838503297531863499031792359610404317936111291681774481991887238261842808901836334223213707711107364485905291532705821130807123125454420222217016446133202332841807820589616482431348754917145101606467632094604680353363557871712551081392926909985237301152861238448196740016300218475973366025567832749173016062387042861194087419952458165039784986962997512974528829545248332349853718708838814455644705761832485931700173130071897488979237327246760914209146081519385108482784142154660288373455889906583829801152124158506717738674619195960806284400917112864093439783694796630478012378864676928266821333294750308467294460965175361532830951966724506606586746876954737693583126883851147396217944080036427834332965580301642572700340135310646747719129141401437460780643498914539005330191345728255970110381137385266964222518724087095866095862614718716057244839384375250846048030470719467935727917893539564429681478585454577297831321858756129328359401791440618517519405327157366260531819490852031046823010869408062657846696435219976461406268554894567770971764444584971135801033696477178443244408796389792415991584094216896934420766505635365939320304110028135204359699782309572808211604994257856468898963160670508388545426450891965355149110111299200922903858711721574257678665591805489566917051248264400361657973441314647767689458266932904273127096255063678875054002411113587425151644025362144946685722667666911552246977486100064975984657109331523076430943145383842843621837003905509584456012817099690437316894531250000000000000000000000000000000000000000000000000000000000000000000
v11=1019
password=raw_input()
print password
for i,j in enumerate(password):
v11*=pow(primes[i],ord(j))
print factorint(v11)
v10,v7,v6,v14=0,0,0,0
j=0
while j<0x1a7:
v7=program[2*j]
v6=program[(2*j)+1]
j+=1
v10=v11
v10*=v7
if v10%v6==0:
v11=v10/v6
v14=1
break
print j-1,factorint(v11)
while v14==1:
v14=0
j=0
while j<0x1a7:
v7=program[2*j]
v6=program[(2*j)+1]
j+=1
v10=v11
v10*=v7
if v10%v6==0:
v11=v10/v6
v14=1
break
print j-1,factorint(v11)
print v11==fail
import math
import gmpy2
def factor_print(x):
v4=x
v2=int(gmpy2.mpz(x))
message=""
for i in xrange(0xAB):
c=0
while v4%primes[i]==0:
v4/=primes[i]
c+=1
message+=chr(c)
print message
factor_print(fail)
factor_print(success)
|
5d7364b78f811c343a920a42b7cefd1f6a7bde49
|
c83318a170bcf9728282820190918bd4c76ffaa6
|
/synology_api/docker_api.py
|
12cb733143933d7d5f3019398b82ea7abde054da
|
[
"MIT"
] |
permissive
|
N4S4/synology-api
|
9ae3ffb50f4a0d6aacff448a7fec000888076091
|
e355c443a05289c366f85915982eb5006b874a25
|
refs/heads/master
| 2023-05-25T21:59:50.721096
| 2023-05-05T19:42:35
| 2023-05-05T19:42:35
| 138,232,891
| 418
| 119
|
MIT
| 2023-09-13T07:58:39
| 2018-06-21T23:43:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,664
|
py
|
docker_api.py
|
from __future__ import annotations
from typing import Optional
from . import base_api_core
class Docker(base_api_core.Core):
def __init__(self,
ip_address: str,
port: str,
username: str,
password: str,
secure: bool = False,
cert_verify: bool = False,
dsm_version: int = 7,
debug: bool = True,
otp_code: Optional[int] = None
) -> None:
super(Docker, self).__init__(ip_address, port, username, password, secure, cert_verify, dsm_version, debug,
otp_code)
return
def containers(self) -> dict[str, object] | str:
api_name = 'SYNO.Docker.Container'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'list', 'limit': '-1', 'offset': '0', 'type': 'all'}
return self.request_data(api_name, api_path, req_param)
def container_resources(self) -> dict[str, object] | str:
api_name = 'SYNO.Docker.Container.Resource'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'get'}
return self.request_data(api_name, api_path, req_param)
def system_resources(self) -> dict[str, object] | str:
api_name = 'SYNO.Core.System.Utilization'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'get'}
return self.request_data(api_name, api_path, req_param)
def downloaded_images(self) -> dict[str, object] | str:
api_name = 'SYNO.Docker.Image'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'list', 'limit': '-1', 'offset': '0',
"show_dsm": 'false'}
return self.request_data(api_name, api_path, req_param)
def images_registry_resources(self) -> dict[str, object] | str:
api_name = 'SYNO.Docker.Registry'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'get'}
return self.request_data(api_name, api_path, req_param)
def network(self) -> dict[str, object] | str:
api_name = 'SYNO.Docker.Network'
info = self.gen_list[api_name]
api_path = info['path']
req_param = {'version': info['maxVersion'], 'method': 'list'}
return self.request_data(api_name, api_path, req_param)
|
bdf97fb676de2a5a805de6a375fea50ed4006875
|
b095173b2dbc77c8ad61c42403258c76169b7a63
|
/src/sagemaker/jumpstart/accessors.py
|
8117606299a2f294b20ec3faedf2e2db1b5b2f38
|
[
"Apache-2.0"
] |
permissive
|
aws/sagemaker-python-sdk
|
666665e717cfb76698ba3ea7563b45344634264d
|
8d5d7fd8ae1a917ed3e2b988d5e533bce244fd85
|
refs/heads/master
| 2023-09-04T01:00:20.663626
| 2023-08-31T15:29:19
| 2023-08-31T15:29:19
| 110,621,895
| 2,050
| 1,255
|
Apache-2.0
| 2023-09-14T17:37:15
| 2017-11-14T01:03:33
|
Python
|
UTF-8
|
Python
| false
| false
| 9,415
|
py
|
accessors.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains accessors related to SageMaker JumpStart."""
from __future__ import absolute_import
from typing import Any, Dict, List, Optional
import boto3
from sagemaker.deprecations import deprecated
from sagemaker.jumpstart.types import JumpStartModelHeader, JumpStartModelSpecs
from sagemaker.jumpstart import cache
from sagemaker.jumpstart.constants import JUMPSTART_DEFAULT_REGION_NAME
class SageMakerSettings(object):
"""Static class for storing the SageMaker settings."""
_parsed_sagemaker_version = ""
@staticmethod
def set_sagemaker_version(version: str) -> None:
"""Set SageMaker version."""
SageMakerSettings._parsed_sagemaker_version = version
@staticmethod
def get_sagemaker_version() -> str:
"""Return SageMaker version."""
return SageMakerSettings._parsed_sagemaker_version
class JumpStartModelsAccessor(object):
"""Static class for storing the JumpStart models cache."""
_cache: Optional[cache.JumpStartModelsCache] = None
_curr_region = JUMPSTART_DEFAULT_REGION_NAME
_content_bucket: Optional[str] = None
_cache_kwargs: Dict[str, Any] = {}
@staticmethod
def set_jumpstart_content_bucket(content_bucket: str) -> None:
"""Sets JumpStart content bucket."""
JumpStartModelsAccessor._content_bucket = content_bucket
@staticmethod
def get_jumpstart_content_bucket() -> Optional[str]:
"""Returns JumpStart content bucket."""
return JumpStartModelsAccessor._content_bucket
@staticmethod
def _validate_and_mutate_region_cache_kwargs(
cache_kwargs: Optional[Dict[str, Any]] = None, region: Optional[str] = None
) -> Dict[str, Any]:
"""Returns cache_kwargs with region argument removed if present.
Raises:
ValueError: If region in `cache_kwargs` is inconsistent with `region` argument.
Args:
cache_kwargs (Optional[Dict[str, Any]]): cache kwargs to validate.
region (str): The region to validate along with the kwargs.
"""
cache_kwargs_dict = {} if cache_kwargs is None else cache_kwargs
if region is not None and "region" in cache_kwargs_dict:
if region != cache_kwargs_dict["region"]:
raise ValueError(
f"Inconsistent region definitions: {region}, {cache_kwargs_dict['region']}"
)
del cache_kwargs_dict["region"]
return cache_kwargs_dict
@staticmethod
def _set_cache_and_region(region: str, cache_kwargs: dict) -> None:
"""Sets ``JumpStartModelsAccessor._cache`` and ``JumpStartModelsAccessor._curr_region``.
Args:
region (str): region for which to retrieve header/spec.
cache_kwargs (dict): kwargs to pass to ``JumpStartModelsCache``.
"""
new_cache_kwargs = JumpStartModelsAccessor._validate_and_mutate_region_cache_kwargs(
cache_kwargs, region
)
if (
JumpStartModelsAccessor._cache is None
or region != JumpStartModelsAccessor._curr_region
or new_cache_kwargs != JumpStartModelsAccessor._cache_kwargs
):
JumpStartModelsAccessor._cache = cache.JumpStartModelsCache(
region=region, **cache_kwargs
)
JumpStartModelsAccessor._curr_region = region
JumpStartModelsAccessor._cache_kwargs = new_cache_kwargs
@staticmethod
def _get_manifest(
region: str = JUMPSTART_DEFAULT_REGION_NAME, s3_client: Optional[boto3.client] = None
) -> List[JumpStartModelHeader]:
"""Return entire JumpStart models manifest.
Raises:
ValueError: If region in `cache_kwargs` is inconsistent with `region` argument.
Args:
region (str): Optional. The region to use for the cache.
s3_client (boto3.client): Optional. Boto3 client to use for accessing JumpStart models
s3 cache. If not set, a default client will be made.
"""
additional_kwargs = {}
if s3_client is not None:
additional_kwargs.update({"s3_client": s3_client})
cache_kwargs = JumpStartModelsAccessor._validate_and_mutate_region_cache_kwargs(
{**JumpStartModelsAccessor._cache_kwargs, **additional_kwargs}, region
)
JumpStartModelsAccessor._set_cache_and_region(region, cache_kwargs)
return JumpStartModelsAccessor._cache.get_manifest() # type: ignore
@staticmethod
def get_model_header(region: str, model_id: str, version: str) -> JumpStartModelHeader:
"""Returns model header from JumpStart models cache.
Args:
region (str): region for which to retrieve header.
model_id (str): model ID to retrieve.
version (str): semantic version to retrieve for the model ID.
"""
cache_kwargs = JumpStartModelsAccessor._validate_and_mutate_region_cache_kwargs(
JumpStartModelsAccessor._cache_kwargs, region
)
JumpStartModelsAccessor._set_cache_and_region(region, cache_kwargs)
return JumpStartModelsAccessor._cache.get_header( # type: ignore
model_id=model_id, semantic_version_str=version
)
@staticmethod
def get_model_specs(
region: str, model_id: str, version: str, s3_client: Optional[boto3.client] = None
) -> JumpStartModelSpecs:
"""Returns model specs from JumpStart models cache.
Args:
region (str): region for which to retrieve header.
model_id (str): model ID to retrieve.
version (str): semantic version to retrieve for the model ID.
s3_client (boto3.client): boto3 client to use for accessing JumpStart models s3 cache.
If not set, a default client will be made.
"""
additional_kwargs = {}
if s3_client is not None:
additional_kwargs.update({"s3_client": s3_client})
cache_kwargs = JumpStartModelsAccessor._validate_and_mutate_region_cache_kwargs(
{**JumpStartModelsAccessor._cache_kwargs, **additional_kwargs}
)
JumpStartModelsAccessor._set_cache_and_region(region, cache_kwargs)
return JumpStartModelsAccessor._cache.get_specs( # type: ignore
model_id=model_id, semantic_version_str=version
)
@staticmethod
def set_cache_kwargs(cache_kwargs: Dict[str, Any], region: str = None) -> None:
"""Sets cache kwargs, clears the cache.
Raises:
ValueError: If region in `cache_kwargs` is inconsistent with `region` argument.
Args:
cache_kwargs (str): cache kwargs to validate.
region (str): Optional. The region to validate along with the kwargs.
"""
cache_kwargs = JumpStartModelsAccessor._validate_and_mutate_region_cache_kwargs(
cache_kwargs, region
)
JumpStartModelsAccessor._cache_kwargs = cache_kwargs
if region is None:
JumpStartModelsAccessor._cache = cache.JumpStartModelsCache(
**JumpStartModelsAccessor._cache_kwargs
)
else:
JumpStartModelsAccessor._curr_region = region
JumpStartModelsAccessor._cache = cache.JumpStartModelsCache(
region=region, **JumpStartModelsAccessor._cache_kwargs
)
@staticmethod
def reset_cache(cache_kwargs: Dict[str, Any] = None, region: Optional[str] = None) -> None:
"""Resets cache, optionally allowing cache kwargs to be passed to the new cache.
Raises:
ValueError: If region in `cache_kwargs` is inconsistent with `region` argument.
Args:
cache_kwargs (str): cache kwargs to validate.
region (str): The region to validate along with the kwargs.
"""
cache_kwargs_dict = {} if cache_kwargs is None else cache_kwargs
JumpStartModelsAccessor.set_cache_kwargs(cache_kwargs_dict, region)
@staticmethod
@deprecated()
def get_manifest(
cache_kwargs: Optional[Dict[str, Any]] = None, region: Optional[str] = None
) -> List[JumpStartModelHeader]:
"""Return entire JumpStart models manifest.
Raises:
ValueError: If region in `cache_kwargs` is inconsistent with `region` argument.
Args:
cache_kwargs (Dict[str, Any]): Optional. Cache kwargs to use.
(Default: None).
region (str): Optional. The region to use for the cache.
(Default: None).
"""
cache_kwargs_dict: Dict[str, Any] = {} if cache_kwargs is None else cache_kwargs
JumpStartModelsAccessor.set_cache_kwargs(cache_kwargs_dict, region)
return JumpStartModelsAccessor._cache.get_manifest() # type: ignore
|
14771233d7f15cebbe39f3573dcd22bdabd7663f
|
562e99134dbce70c96a7d5c1d746c175bcdddd5e
|
/example/core/migrations/0001_initial.py
|
5105d7cda4e871ff81a54f0c3b5b47c017b0375f
|
[
"BSD-2-Clause"
] |
permissive
|
mlavin/django-selectable
|
5d1e7627df1d5200e1b339df90ee2fcb6ff8443d
|
2281dc1b3b27f74ca5870109b5b49b8e03bf3f4a
|
refs/heads/master
| 2023-07-06T08:23:06.412877
| 2023-06-22T11:33:40
| 2023-06-22T11:33:40
| 4,803,005
| 114
| 63
|
BSD-2-Clause
| 2023-06-22T09:44:06
| 2012-06-27T01:53:04
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,329
|
py
|
0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.12 on 2017-02-25 01:13
from __future__ import unicode_literals
import django.db.models.deletion
import localflavor.us.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="City",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=200)),
("state", localflavor.us.models.USStateField(max_length=2)),
],
),
migrations.CreateModel(
name="Farm",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name="Fruit",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("name", models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name="farm",
name="fruit",
field=models.ManyToManyField(to="core.Fruit"),
),
migrations.AddField(
model_name="farm",
name="owner",
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="farms",
to=settings.AUTH_USER_MODEL,
),
),
]
|
383d8b97a9ef1dfe45346b16b0274dc7dc19d225
|
364774e29ef2474552ea3839de0951e63cbae0a6
|
/wouso/core/user/models.py
|
0398c7e6ea68ebf9ee9f6dba7c617263d68d1f82
|
[
"Apache-2.0"
] |
permissive
|
rosedu/wouso
|
66c50ef750cf79d6959768f7df93cc08607cc266
|
ed34c62ac925db719388f27fe5acb40376d8d0c1
|
refs/heads/master
| 2022-10-29T14:28:51.818073
| 2022-09-24T18:54:04
| 2022-09-24T18:54:04
| 2,965,476
| 121
| 97
|
NOASSERTION
| 2019-11-15T09:33:50
| 2011-12-12T16:15:01
|
Python
|
UTF-8
|
Python
| false
| false
| 12,436
|
py
|
models.py
|
# coding=utf-8
import logging
from md5 import md5
from datetime import datetime, timedelta
from random import shuffle
from django.db import models
from django.db.models import Sum, Q
from django.contrib.auth.models import User, Permission
from django.conf import settings
from wouso.core.decorators import cached_method, drop_cache
from wouso.core.game.models import Game
from wouso.core.magic.manager import MagicManager
from wouso.core.god import God
from wouso.core.magic.models import Spell
from .. import deprecated
class Race(models.Model):
""" Groups a large set of players together and it's used extensively for 'can_play' checks.
"""
name = models.CharField(max_length=100)
title = models.CharField(max_length=100, default='', blank=True)
artifacts = models.ManyToManyField('magic.Artifact', blank=True, through='magic.RaceArtifactAmount')
can_play = models.BooleanField(default=True, blank=True)
logo = models.ImageField(upload_to=settings.MEDIA_ARTIFACTS_DIR, null=True, blank=True)
@property
def points(self):
""" Sum of race members points
"""
return self.player_set.aggregate(points=Sum('points'))['points'] or 0
@property
def children(self):
return self.playergroup_set.all()
@property
def sisters(self):
return Race.objects.filter(can_play=self.can_play).exclude(pk=self.pk)
def __unicode__(self):
return self.name if not self.title else self.title
class PlayerGroup(models.Model):
""" Group players together """
# The group owner. If null, it belongs to the core game.
owner = models.ForeignKey(Game, blank=True, null=True)
name = models.CharField(max_length=100)
title = models.CharField(max_length=100, default='', blank=True)
parent = models.ForeignKey('Race', default=None, null=True, blank=True)
artifacts = models.ManyToManyField('magic.Artifact', blank=True, through='magic.GroupArtifactAmount')
players = models.ManyToManyField('user.Player', blank=True)
# used only for sorting and position
points = models.FloatField(default=0, editable=False)
@property
def live_points(self):
""" Calculate sum of user points dynamically """
p = self.players.aggregate(total=models.Sum('points'))
if p['total'] is not None:
return int(p['total'])
else:
return 0
@property
@deprecated('Please get rid of me')
def children(self):
""" All groups with parent set to this group, cached """
return []
@property
@deprecated('Please get rid of me')
def sisters(self):
""" All groups with the same parent as this group or of the same
class, if parent is not set.
"""
if self.parent:
return list(self.parent.children.exclude(id=self.id))
else:
return []
@property
def online_players(self):
oldest = datetime.now() - timedelta(minutes=10)
res = self.players.filter(last_seen__gte=oldest)
return res
def destroy(self):
"""
Delete the group and free its members
"""
for p in self.members:
p.group = None
p.save()
self.delete()
def __unicode__(self):
return self.name if self.title == '' else self.title
class Player(models.Model):
""" Base class for the game user. This is extended by game specific
player models.
"""
user = models.ForeignKey(User, unique=True, related_name="%(class)s_related")
full_name = models.CharField(max_length=200)
# Unique differentiator for ladder
# Do not modify it manually, use scoring.score instead
points = models.FloatField(default=0, blank=True, null=True, editable=False)
level_no = models.IntegerField(default=1, blank=True, null=True)
# The maximum reached level by the user
max_level = models.IntegerField(default=0, blank=False, null=False)
last_seen = models.DateTimeField(null=True, blank=True)
# artifacts available for using
artifacts = models.ManyToManyField('magic.Artifact', blank=True, through='magic.PlayerArtifactAmount')
# spells available for casting
spells_collection = models.ManyToManyField(Spell, blank=True, through='magic.PlayerSpellAmount', related_name='spell_collection')
nickname = models.CharField(max_length=20, null=True, blank=False, default="admin")
# race
race = models.ForeignKey(Race, blank=False, default=None, null=True)
description = models.TextField(max_length=600, blank=True)
EXTENSIONS = {}
def get_neighbours_from_top(self, count, user_race=None, spell_type=None):
""" Returns an array of neighbouring players from top: count up and count down
user_race and spell_type are used by mass spells for neighbours list.
"""
base_query = Player.objects.exclude(user__is_superuser=True).exclude(race__can_play=False)
allUsers = list(base_query.order_by('-points'))
try:
pos = allUsers.index(self)
except ValueError:
return []
if (spell_type is not None) and (user_race is not None) and (spell_type != 'o'):
if spell_type == 'p':
allUsers = [user for user in allUsers if user.race.name == user_race.name]
else:
allUsers = [user for user in allUsers if user.race.name != user_race.name]
if len(allUsers) <= 2*count+1:
return allUsers
start = max(pos-count, 0)
if pos + count >= len(allUsers):
start = len(allUsers)-2*count-1
players = allUsers[start:start+2*count+1]
return players
def get_division(self, count):
from wouso.interface.top.models import TopUser
all_users = list(TopUser.objects.all())
try:
curr_user = TopUser.objects.get(id=self.id)
except Exception:
return []
curr_user_pos = curr_user.position
division = [user for user in all_users if abs(curr_user.position - user.position) < 20]
shuffle(division)
return division
def user_name(self):
return self.user.username
def in_staff_group(self):
return self.user.has_perm('config.change_setting')
@property
def race_name(self):
return self.race.name if self.race else ''
# Magic manager
@property
def magic(self):
return MagicManager(self)
# Other stuff
@property
def level(self):
""" Return an artifact object for the current level_no.
Ask God about the right artifact object, given the player instance.
In the future, God may check players race and give specific artifacts.
"""
return God.get_user_level(self.level_no, player=self)
@property
def coins(self):
# TODO check usage and deprecate this function
from wouso.core.scoring.models import History
return History.user_coins(self.user)
@property
def group(self):
return self._group()
@cached_method
def _group(self):
""" Return the core game group, if any
"""
try:
group = self.playergroup_set.filter(owner=None).get()
except (PlayerGroup.DoesNotExist, PlayerGroup.MultipleObjectsReturned):
group = None
return group
def set_group(self, group):
"""
Set the core group, which is unique
"""
for g in self.playergroup_set.filter(owner=None):
g.players.remove(self)
group.players.add(self)
drop_cache(self._group, self)
return group
def level_progress(self):
""" Return a dictionary with: points_gained, points_left, next_level """
return God.get_level_progress(self)
@property
def avatar(self):
return self._avatar()
@cached_method
def _avatar(self):
avatar = "http://www.gravatar.com/avatar/%s.jpg?d=%s" % (md5(self.user.email).hexdigest(), settings.AVATAR_DEFAULT)
return avatar
# special:
#@cached_method
def get_extension(self, cls):
if self.__class__ is cls:
return self
if cls == Player:
return self.user.get_profile()
if self.__class__ != Player:
obj = self.user.get_profile()
else:
obj = self
return obj._get_extension(cls)
def _get_extension(self, cls):
""" Search for an extension of this object, with the type cls
Create instance if there isn't any.
Using an workaround, while: http://code.djangoproject.com/ticket/7623 gets fixed.
Also see: http://code.djangoproject.com/ticket/11618
"""
try:
extension = cls.objects.get(user=self.user)
except cls.DoesNotExist:
extension = cls(player_ptr=self)
for f in self._meta.local_fields:
setattr(extension, f.name, getattr(self, f.name))
extension.save()
return extension
@classmethod
def register_extension(cls, attr, ext_cls):
"""
Register new attribute with an ext_cls
"""
cls.EXTENSIONS[attr] = ext_cls
@classmethod
def get_quest_gods(cls):
from wouso.core.scoring.models import History
from wouso.games.quest.models import QuestGame, QuestResult
def quest_points(user):
return int(History.objects.filter(game=QuestGame.get_instance(),
user=user).aggregate(points=Sum('amount'))['points'] or 0)
users = list(cls.objects.exclude(race__can_play=False).filter(
id__in=QuestResult.objects.values_list('user')))
users.sort(lambda b, a: quest_points(a) - quest_points(b))
gods = users[:10]
return gods
@classmethod
def get_by_permission(cls, permission):
perm = Permission.objects.get(codename=permission)
users = User.objects.filter(Q(groups__permissions=perm) |
Q(user_permissions=perm)).distinct()
return Player.objects.filter(user__in=users)
@property
def race_name(self):
return self._race_name()
@cached_method
def _race_name(self):
if self.race:
return self.race.name
return ''
def save(self, **kwargs):
""" Clear cache for extensions
"""
#for k, v in self.EXTENSIONS.iteritems():
# drop_cache(self.get_extension, self, v)
#drop_cache(self.get_extension, self, self.__class__)
drop_cache(self._race_name, self)
drop_cache(self._group, self)
update_display_name(self, save=False)
return super(Player, self).save(**kwargs)
def __getitem__(self, item):
if item in self.__class__.EXTENSIONS:
return self.get_extension(self.__class__.EXTENSIONS[item])
return super(Player, self).__getitem__(item)
def __unicode__(self):
return self.full_name or self.user.__unicode__()
# Hack for having user and user's profile always in sync
def user_post_save(sender, instance, **kwargs):
profile, new = Player.objects.get_or_create(user=instance)
if new:
# add in default group
from wouso.core.config.models import ChoicesSetting
try:
default_group = PlayerGroup.objects.get(pk=int(ChoicesSetting.get('default_group').get_value()))
except (PlayerGroup.DoesNotExist, ValueError):
pass
else:
default_group.players.add(profile)
try:
default_race = Race.objects.get(pk=int(ChoicesSetting.get('default_race').get_value()))
except (Race.DoesNotExist, ValueError):
pass
else:
profile.race = default_race
profile.save()
profile.nickname = profile.user.username
profile.save()
update_display_name(profile)
models.signals.post_save.connect(user_post_save, User)
def update_display_name(player, save=True):
display_name = unicode(settings.DISPLAY_NAME).format(first_name=player.user.first_name,
last_name=player.user.last_name,
nickname=player.nickname).strip()
player.full_name = display_name
if save:
player.save()
|
85a9e24173a33ce0f2af55ca541799fd4b1374b5
|
afbae26b958b5ef20548402a65002dcc8e55b66a
|
/release/stubs.min/Tekla/Structures/Drawing/UI.py
|
0c9db90e5075ee484cd997e7978ec762352f1bd3
|
[
"MIT"
] |
permissive
|
gtalarico/ironpython-stubs
|
d875cb8932c7644f807dc6fde9dd513d159e4f5c
|
c7f6a6cb197e3949e40a4880a0b2a44e72d0a940
|
refs/heads/master
| 2023-07-12T01:43:47.295560
| 2022-05-23T18:12:06
| 2022-05-23T18:12:06
| 95,340,553
| 235
| 88
|
NOASSERTION
| 2023-07-05T06:36:28
| 2017-06-25T05:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 4,329
|
py
|
UI.py
|
# encoding: utf-8
# module Tekla.Structures.Drawing.UI calls itself UI
# from Tekla.Structures.Drawing,Version=2017.0.0.0,Culture=neutral,PublicKeyToken=2f04dbe497b71114
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class DrawingObjectSelector(object):
# no doc
def GetSelected(self):
""" GetSelected(self: DrawingObjectSelector) -> DrawingObjectEnumerator """
pass
def SelectObject(self,DrawingObject):
""" SelectObject(self: DrawingObjectSelector,DrawingObject: DrawingObject) -> bool """
pass
def SelectObjects(self,DrawingObjects,ExtendSelection):
""" SelectObjects(self: DrawingObjectSelector,DrawingObjects: ArrayList,ExtendSelection: bool) -> bool """
pass
def UnselectAllObjects(self):
""" UnselectAllObjects(self: DrawingObjectSelector) -> bool """
pass
def UnselectObject(self,DrawingObject):
""" UnselectObject(self: DrawingObjectSelector,DrawingObject: DrawingObject) -> bool """
pass
def UnselectObjects(self,DrawingObjects):
""" UnselectObjects(self: DrawingObjectSelector,DrawingObjects: ArrayList) -> bool """
pass
class DrawingSelector(object):
# no doc
def GetSelected(self):
""" GetSelected(self: DrawingSelector) -> DrawingEnumerator """
pass
class Events(MarshalByRefObject):
""" Events() """
def InitializeLifetimeService(self):
""" InitializeLifetimeService(self: Events) -> object """
pass
def OnDrawingEditorClose(self,EventName,Parameters):
""" OnDrawingEditorClose(self: Events,EventName: str,*Parameters: Array[object]) """
pass
def OnDrawingEditorOpen(self,EventName,Parameters):
""" OnDrawingEditorOpen(self: Events,EventName: str,*Parameters: Array[object]) """
pass
def OnDrawingListSelectionChanged(self,EventName,Parameters):
""" OnDrawingListSelectionChanged(self: Events,EventName: str,*Parameters: Array[object]) """
pass
def OnDrawingLoaded(self,EventName,Parameters):
""" OnDrawingLoaded(self: Events,EventName: str,*Parameters: Array[object]) """
pass
def OnSelectionChange(self,EventName,Parameters):
""" OnSelectionChange(self: Events,EventName: str,*Parameters: Array[object]) """
pass
def Register(self):
""" Register(self: Events) """
pass
def UnRegister(self):
""" UnRegister(self: Events) """
pass
DrawingEditorClosed=None
DrawingEditorClosedDelegate=None
DrawingEditorOpened=None
DrawingEditorOpenedDelegate=None
DrawingListSelectionChanged=None
DrawingListSelectionChangedDelegate=None
DrawingLoaded=None
DrawingLoadedDelegate=None
SelectionChange=None
SelectionChangeDelegate=None
class Picker(object):
# no doc
def IsInteractive(self):
""" IsInteractive(self: Picker) -> bool """
pass
def PickObject(self,prompt,*__args):
"""
PickObject(self: Picker,prompt: str) -> (DrawingObject,ViewBase,Point)
PickObject(self: Picker,prompt: str,typeFilter: Array[Type]) -> (DrawingObject,ViewBase,Point)
PickObject(self: Picker,prompt: str) -> (DrawingObject,ViewBase)
PickObject(self: Picker,prompt: str) -> Tuple[DrawingObject,ViewBase]
"""
pass
def PickObjectAndPoint(self,prompt):
""" PickObjectAndPoint(self: Picker,prompt: str) -> Tuple[DrawingObject,ViewBase,Point] """
pass
def PickPoint(self,prompt,pickedPoint=None,pickedView=None):
"""
PickPoint(self: Picker,prompt: str) -> Tuple[Point,ViewBase]
PickPoint(self: Picker,prompt: str) -> (Point,ViewBase)
"""
pass
def PickPoints(self,*__args):
"""
PickPoints(self: Picker,prompts: StringList) -> (PointList,ViewBase)
PickPoints(self: Picker,prompts: StringList) -> Tuple[PointList,ViewBase]
PickPoints(self: Picker,numberOfPicks: int,prompts: StringList) -> (PointList,ViewBase)
PickPoints(self: Picker,numberOfPicks: int,prompts: StringList) -> Tuple[PointList,ViewBase]
"""
pass
def PickThreePoints(self,firstPrompt,secondPrompt,thirdPrompt,firstPickedPoint,secondPickedPoint,thirdPickedPoint,pickedView):
""" PickThreePoints(self: Picker,firstPrompt: str,secondPrompt: str,thirdPrompt: str) -> (Point,Point,Point,ViewBase) """
pass
def PickTwoPoints(self,firstPrompt,secondPrompt,firstPickedPoint,secondPickedPoint,pickedView):
""" PickTwoPoints(self: Picker,firstPrompt: str,secondPrompt: str) -> (Point,Point,ViewBase) """
pass
|
947d7b229ffd0f1f6f739f2c36bc7ba8f34e42de
|
0bdabe35aefc31cfcc7aa46e3d2463b67b46ee96
|
/pyFileFixity/lib/profilers/pyinstrument/__main__.py
|
95fee11c5ac945ba3d5446cee78df6ccbf3a8158
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
lrq3000/pyFileFixity
|
1bed7958a786110a06df33d846a2e844576686e8
|
dd71ba67e08a5fe2ce50fb5403f93103284ad83e
|
refs/heads/master
| 2023-08-18T11:36:37.905055
| 2023-08-15T18:04:15
| 2023-08-15T18:04:15
| 31,501,322
| 107
| 10
|
MIT
| 2022-12-09T01:36:05
| 2015-03-01T15:53:56
|
Python
|
UTF-8
|
Python
| false
| false
| 4,960
|
py
|
__main__.py
|
from optparse import OptionParser
import sys
import os
import codecs
from pyinstrument import Profiler
from pyinstrument.profiler import SignalUnavailableError
# Python 3 compatibility. Mostly borrowed from SymPy
PY3 = sys.version_info[0] > 2
if PY3:
import builtins
exec_ = getattr(builtins, "exec")
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("exec _code_ in _globs_, _locs_")
def main():
usage = ("usage: pyinstrument [options] scriptfile [arg] ...")
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('', '--setprofile',
dest='setprofile', action='store_true',
help='run in setprofile mode, instead of signal mode', default=False)
parser.add_option('', '--html',
dest="output_html", action='store_true',
help="output HTML instead of text", default=False)
parser.add_option('-o', '--outfile',
dest="outfile", action='store',
help="save report to <outfile>", default=None)
parser.add_option('', '--unicode',
dest='unicode', action='store_true',
help='force unicode text output')
parser.add_option('', '--no-unicode',
dest='unicode', action='store_false',
help='force ascii text output')
parser.add_option('', '--color',
dest='color', action='store_true',
help='force ansi color text output')
parser.add_option('', '--no-color',
dest='color', action='store_false',
help='force no color text output')
if not sys.argv[1:]:
parser.print_help()
sys.exit(2)
(options, args) = parser.parse_args()
sys.argv[:] = args
if len(args) > 0:
progname = args[0]
sys.path.insert(0, os.path.dirname(progname))
with open(progname, 'rb') as fp:
code = compile(fp.read(), progname, 'exec')
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
}
try:
profiler = Profiler(use_signal=not options.setprofile)
except SignalUnavailableError:
profiler = Profiler(use_signal=False)
profiler.start()
try:
exec_(code, globs, None)
except IOError as e:
import errno
if e.errno == errno.EINTR:
print(
'Failed to run program due to interrupted system system call.\n'
'This happens because pyinstrument is sending OS signals to the running\n'
'process to interrupt it. If your program has long-running syscalls this\n'
'can cause a problem.\n'
'\n'
'You can avoid this error by running in \'setprofile\' mode. Do this by\n'
'passing \'--setprofile\' when calling pyinstrument at the command-line.\n'
'\n'
'For more information, see\n'
'https://github.com/joerick/pyinstrument/issues/16\n'
)
raise
except (SystemExit, KeyboardInterrupt):
pass
profiler.stop()
if options.outfile:
f = codecs.open(options.outfile, 'w', 'utf-8')
else:
f = sys.stdout
unicode_override = options.unicode != None
color_override = options.color != None
unicode = options.unicode if unicode_override else file_supports_unicode(f)
color = options.color if color_override else file_supports_color(f)
if options.output_html:
f.write(profiler.output_html())
else:
f.write(profiler.output_text(unicode=unicode, color=color))
f.close()
else:
parser.print_usage()
return parser
def file_supports_color(file_obj):
"""
Returns True if the running system's terminal supports color, and False
otherwise.
Borrowed from Django
https://github.com/django/django/blob/master/django/core/management/color.py
"""
plat = sys.platform
supported_platform = plat != 'Pocket PC' and (plat != 'win32' or
'ANSICON' in os.environ)
is_a_tty = hasattr(file_obj, 'isatty') and file_obj.isatty()
if not supported_platform or not is_a_tty:
return False
return True
def file_supports_unicode(file_obj):
encoding = getattr(file_obj, 'encoding', None)
if not encoding:
return False
codec_info = codecs.lookup(encoding)
if 'utf' in codec_info.name:
return True
return False
if __name__ == '__main__':
main()
|
eeec088528a8b8ff6c4751c349983c137c8f9538
|
338fb36acc32ff4b127385a4c0946eed90324862
|
/setup.py
|
8b8595ccd6b832674d4baec332d5ea946438d79d
|
[
"BSD-3-Clause"
] |
permissive
|
skorokithakis/django-loginas
|
43a6202eecb899b4f0cdd2d306a6fbd65a1b4d4c
|
b1d252c04f15af05362ba58ddf3c9db1ee80a908
|
refs/heads/master
| 2023-09-04T10:29:46.426123
| 2023-07-29T17:09:52
| 2023-07-29T17:09:52
| 6,147,933
| 288
| 57
|
BSD-3-Clause
| 2023-07-24T23:06:23
| 2012-10-09T21:15:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,362
|
py
|
setup.py
|
#!/usr/bin/env python
from loginas import __version__
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name="django-loginas",
version=__version__,
author="Stochastic Technologies",
author_email="info@stochastictechnologies.com",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/stochastic-technologies/django-loginas/",
description="""An app to add a "Log in as user" button in the Django user admin page.""",
license="BSD",
keywords="django",
zip_safe=False,
include_package_data=True,
packages=["loginas"],
package_dir={"loginas": "loginas"},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Framework :: Django :: 2.2",
"Framework :: Django :: 3.2",
"Framework :: Django :: 4.0",
"Intended Audience :: Developers",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
],
)
|
64145e9c7ab6a6b50fffa198fe7207ed2a51ce14
|
1ed25da5d1e27cd49fb4a02acfe99aadcf2fae57
|
/pygeoapi/provider/filesystem.py
|
db2a824bea68c34991364452516a77dd89abb1db
|
[
"MIT"
] |
permissive
|
geopython/pygeoapi
|
6d2a7b0e8fe75d0c454a0b2fc3599a0b88c7567f
|
2d3ec88320cf5e1ed47b4b794f40b453bad487e2
|
refs/heads/master
| 2023-09-04T04:30:59.768950
| 2023-09-03T02:00:23
| 2023-09-03T02:00:23
| 121,585,259
| 391
| 245
|
MIT
| 2023-09-13T18:13:00
| 2018-02-15T02:46:27
|
Python
|
UTF-8
|
Python
| false
| false
| 13,113
|
py
|
filesystem.py
|
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2023 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from datetime import datetime
import io
from json import loads
import logging
import os
from pygeoapi.provider.base import (BaseProvider, ProviderConnectionError,
ProviderNotFoundError)
from pygeoapi.util import file_modified_iso8601, get_path_basename, url_join
LOGGER = logging.getLogger(__name__)
class FileSystemProvider(BaseProvider):
"""filesystem Provider"""
def __init__(self, provider_def):
"""
Initialize object
:param provider_def: provider definition
:returns: pygeoapi.provider.filesystem.FileSystemProvider
"""
super().__init__(provider_def)
if not os.path.exists(self.data):
msg = f'Directory does not exist: {self.data}'
LOGGER.error(msg)
raise ProviderConnectionError(msg)
def get_data_path(self, baseurl, urlpath, dirpath):
"""
Gets directory listing or file description or raw file dump
:param baseurl: base URL of endpoint
:param urlpath: base path of URL
:param dirpath: directory basepath (equivalent of URL)
:returns: `dict` of file listing or `dict` of GeoJSON item or raw file
"""
thispath = os.path.join(baseurl, urlpath)
resource_type = None
root_link = None
child_links = []
data_path = os.path.join(self.data, dirpath)
data_path = self.data + dirpath
if '/' not in dirpath: # root
root_link = baseurl
else:
parentpath = url_join(thispath, '.')
child_links.append({
'rel': 'parent',
'href': f'{parentpath}?f=json',
'type': 'application/json'
})
child_links.append({
'rel': 'parent',
'href': parentpath,
'type': 'text/html'
})
depth = dirpath.count('/')
root_path = '/'.replace('/', '../' * depth, 1)
root_link = url_join(thispath, root_path)
content = {
'links': [{
'rel': 'root',
'href': f'{root_link}?f=json',
'type': 'application/json'
}, {
'rel': 'root',
'href': root_link,
'type': 'text/html'
}, {
'rel': 'self',
'href': f'{thispath}?f=json',
'type': 'application/json',
}, {
'rel': 'self',
'href': thispath,
'type': 'text/html'
}
]
}
LOGGER.debug('Checking if path exists as raw file or directory')
if data_path.endswith(tuple(self.file_types)):
resource_type = 'raw_file'
elif os.path.exists(data_path):
resource_type = 'directory'
else:
LOGGER.debug('Checking if path exists as file via file_types')
for ft in self.file_types:
tmp_path = f'{data_path}{ft}'
if os.path.exists(tmp_path):
resource_type = 'file'
data_path = tmp_path
break
if resource_type is None:
msg = f'Resource does not exist: {data_path}'
LOGGER.error(msg)
raise ProviderNotFoundError(msg)
if resource_type == 'raw_file':
with io.open(data_path, 'rb') as fh:
return fh.read()
elif resource_type == 'directory':
content['type'] = 'Catalog'
dirpath2 = os.listdir(data_path)
dirpath2.sort()
for dc in dirpath2:
# TODO: handle a generic directory for tiles
if dc == "tiles":
continue
fullpath = os.path.join(data_path, dc)
filectime = file_modified_iso8601(fullpath)
filesize = os.path.getsize(fullpath)
if os.path.isdir(fullpath):
newpath = os.path.join(baseurl, urlpath, dc)
child_links.append({
'rel': 'child',
'href': newpath,
'type': 'text/html',
'created': filectime,
'entry:type': 'Catalog'
})
elif os.path.isfile(fullpath):
basename, extension = os.path.splitext(dc)
newpath = os.path.join(baseurl, urlpath, basename)
newpath2 = f'{newpath}{extension}'
if extension in self.file_types:
fullpath = os.path.join(data_path, dc)
child_links.append({
'rel': 'item',
'href': newpath,
'title': get_path_basename(newpath2),
'created': filectime,
'file:size': filesize,
'entry:type': 'Item'
})
elif resource_type == 'file':
filename = os.path.basename(data_path)
id_ = os.path.splitext(filename)[0]
if urlpath:
filename = filename.replace(id_, '')
url = f'{baseurl}/{urlpath}{filename}'
filectime = file_modified_iso8601(data_path)
filesize = os.path.getsize(data_path)
content = {
'id': id_,
'type': 'Feature',
'properties': {},
'links': [],
'assets': {}
}
content.update(_describe_file(data_path))
content['assets']['default'] = {
'href': url,
'created': filectime,
'file:size': filesize
}
content['links'].extend(child_links)
return content
def __repr__(self):
return f'<FileSystemProvider> {self.data}'
def _describe_file(filepath):
"""
Helper function to describe a geospatial data
First checks if a sidecar mcf file is available, if so uses that
if not, script will parse the file to retrieve some info from the file
:param filepath: path to file
:returns: `dict` of GeoJSON item
"""
content = {
'bbox': None,
'geometry': None,
'properties': {}
}
mcf_file = f'{os.path.splitext(filepath)[0]}.yml'
if os.path.isfile(mcf_file):
try:
from pygeometa.core import read_mcf, MCFReadError
from pygeometa.schemas.stac import STACItemOutputSchema
md = read_mcf(mcf_file)
stacjson = STACItemOutputSchema.write(STACItemOutputSchema, md)
stacdata = loads(stacjson)
for k, v in stacdata.items():
content[k] = v
except ImportError:
LOGGER.debug('pygeometa not found')
except MCFReadError as err:
LOGGER.warning(f'MCF error: {err}')
else:
LOGGER.debug(f'No mcf found at: {mcf_file}')
if content['geometry'] is None and content['bbox'] is None:
try:
import rasterio
from rasterio.crs import CRS
from rasterio.warp import transform_bounds
except ImportError as err:
LOGGER.warning('rasterio not found')
LOGGER.warning(err)
return content
try:
import fiona
except ImportError as err:
LOGGER.warning('fiona not found')
LOGGER.warning(err)
return content
try: # raster
LOGGER.debug('Testing raster data detection')
d = rasterio.open(filepath)
scrs = CRS(d.crs)
LOGGER.debug(f'CRS: {d.crs}')
LOGGER.debug(f'bounds: {d.bounds}')
LOGGER.debug(f'Is geographic: {scrs.is_geographic}')
if not scrs.is_geographic:
LOGGER.debug('Reprojecting coordinates')
tcrs = CRS.from_epsg(4326)
bnds = transform_bounds(scrs, tcrs,
d.bounds[0], d.bounds[1],
d.bounds[2], d.bounds[3])
content['properties']['projection'] = scrs.to_epsg()
else:
bnds = [d.bounds.left, d.bounds.bottom,
d.bounds.right, d.bounds.top]
content['bbox'] = bnds
content['geometry'] = {
'type': 'Polygon',
'coordinates': [[
[bnds[0], bnds[1]],
[bnds[0], bnds[3]],
[bnds[2], bnds[3]],
[bnds[2], bnds[1]],
[bnds[0], bnds[1]]
]]
}
for k, v in d.tags(d.count).items():
content['properties'][k] = v
if k in ['GRIB_REF_TIME']:
value = int(v.split()[0])
datetime_ = datetime.fromtimestamp(value)
content['properties']['datetime'] = datetime_.isoformat() + 'Z' # noqa
except rasterio.errors.RasterioIOError:
try:
LOGGER.debug('Testing vector data detection')
d = fiona.open(filepath)
LOGGER.debug(f'CRS: {d.crs}')
LOGGER.debug(f'bounds: {d.bounds}')
scrs = CRS(d.crs)
LOGGER.debug(f'CRS: {d.crs}')
LOGGER.debug(f'bounds: {d.bounds}')
LOGGER.debug(f'Is geographic: {scrs.is_geographic}')
if not scrs.is_geographic:
LOGGER.debug('Reprojecting coordinates')
tcrs = CRS.from_epsg(4326)
bnds = transform_bounds(scrs, tcrs,
d.bounds[0], d.bounds[1],
d.bounds[2], d.bounds[3])
content['properties']['projection'] = scrs.to_epsg()
else:
bnds = d.bounds
if d.schema['geometry'] not in [None, 'None']:
content['bbox'] = [
bnds[0],
bnds[1],
bnds[2],
bnds[3]
]
content['geometry'] = {
'type': 'Polygon',
'coordinates': [[
[bnds[0], bnds[1]],
[bnds[0], bnds[3]],
[bnds[2], bnds[3]],
[bnds[2], bnds[1]],
[bnds[0], bnds[1]]
]]
}
for k, v in d.schema['properties'].items():
content['properties'][k] = v
if d.driver == 'ESRI Shapefile':
id_ = os.path.splitext(os.path.basename(filepath))[0]
content['assets'] = {}
for suffix in ['shx', 'dbf', 'prj']:
fullpath = f'{os.path.splitext(filepath)[0]}.{suffix}'
if os.path.exists(fullpath):
filectime = file_modified_iso8601(fullpath)
filesize = os.path.getsize(fullpath)
content['assets'][suffix] = {
'href': f'./{id_}.{suffix}',
'created': filectime,
'file:size': filesize
}
except fiona.errors.DriverError:
LOGGER.debug('Could not detect raster or vector data')
return content
|
5bdbcc3d2b23de1f3fa00999291982f07a0e2822
|
8e6bb9c1a620a162b7d017c2373dd01be54ea86d
|
/bio/deseq2/deseqdataset/test/Snakefile
|
93b960589408a0996eb71e5f6ecf5be1b7486cf5
|
[] |
no_license
|
snakemake/snakemake-wrappers
|
5d0963502c26eb709513567e25422871fe477cf2
|
996bdcf2a96535b967dfa483c363a5496f4b3906
|
refs/heads/master
| 2023-08-19T05:18:44.337503
| 2023-08-18T12:03:38
| 2023-08-18T12:03:38
| 213,319,194
| 184
| 189
| null | 2023-09-12T11:38:35
| 2019-10-07T07:20:59
|
CAP CDS
|
UTF-8
|
Python
| false
| false
| 3,896
|
Snakefile
|
rule test_DESeqDataSet_filtering:
input:
dds="dataset/dds.RDS",
output:
"dds_minimal.RDS",
threads: 1
log:
"logs/DESeqDataSet/txi.log",
params:
formula="~condition", # Required R statistical formula
factor="condition", # Optionally used for relevel
reference_level="A", # Optionally used for relevel
tested_level="B", # Optionally used for relevel
min_counts=0, # Optionally used to filter low counts
extra="", # Optional parameters provided to import function
wrapper:
"master/bio/deseq2/deseqdataset"
rule test_DESeqDataSet_from_tximport:
input:
txi="dataset/txi.RDS",
colData="coldata.tsv",
output:
"dds_txi.RDS",
threads: 1
log:
"logs/DESeqDataSet/txi.log",
params:
formula="~condition", # Required R statistical formula
# factor="condition", # Optionally used for relevel
# reference_level="A", # Optionally used for relevel
# tested_level="B", # Optionally used for relevel
# min_counts=0, # Optionally used to filter low counts
# extra="", # Optional parameters provided to import function
wrapper:
"master/bio/deseq2/deseqdataset"
rule test_DESeqDataSet_from_ranged_se:
input:
se="dataset/se.RDS",
output:
"dds_se.RDS",
threads: 1
log:
"logs/DESeqDataSet/se.log",
params:
formula="~condition", # Required R statistical formula
# factor="condition", # Optionally used for relevel
# reference_level="A", # Optionally used for relevel
# tested_level="B", # Optionally used for relevel
# min_counts=0, # Optionally used to filter low counts
# extra="", # Optional parameters provided to import function
wrapper:
"master/bio/deseq2/deseqdataset"
rule test_DESeqDataSet_from_r_matrix:
input:
matrix="dataset/matrix.RDS",
colData="coldata.tsv",
output:
"dds_rmatrix.RDS",
threads: 1
log:
"logs/DESeqDataSet/r_matrix.log",
params:
formula="~condition", # Required R statistical formula
# factor="condition", # Optionally used for relevel
# reference_level="A", # Optionally used for relevel
# tested_level="B", # Optionally used for relevel
# min_counts=0, # Optionally used to filter low counts
# extra="", # Optional parameters provided to import function
wrapper:
"master/bio/deseq2/deseqdataset"
rule test_DESeqDataSet_from_tsv_matrix:
input:
counts="dataset/counts.tsv",
colData="coldata.tsv",
output:
"dds_matrix.RDS",
threads: 1
log:
"logs/DESeqDataSet/txt_matrix.log",
params:
formula="~condition", # Required R statistical formula
# factor="condition", # Optionally used for relevel
# reference_level="A", # Optionally used for relevel
# tested_level="B", # Optionally used for relevel
# min_counts=0, # Optionally used to filter low counts
# extra="", # Optional parameters provided to import function
wrapper:
"master/bio/deseq2/deseqdataset"
rule test_DESeqDataSet_from_htseqcount:
input:
htseq_dir="dataset/htseq_dir",
sample_table="sample_table.tsv",
output:
"dds_htseq.RDS",
threads: 1
log:
"logs/DESeqDataSet/txt_matrix.log",
params:
formula="~condition", # Required R statistical formula
# factor="condition", # Optionally used for relevel
# reference_level="A", # Optionally used for relevel
# tested_level="B", # Optionally used for relevel
# min_counts=0, # Optionally used to filter low counts
# extra="", # Optional parameters provided to import function
wrapper:
"master/bio/deseq2/deseqdataset"
|
|
078b664107681f834a8e112489d03b54cce5973e
|
312839d6a1fe98ebf6f55a11ff3136c0d1fc4895
|
/support/gadget_upgrade/download_and_checksum_gadget.py
|
77c3481dd60b9a3f9f4b4be9cbf5f2bc5cd82e1c
|
[
"Apache-2.0"
] |
permissive
|
puremourning/vimspector
|
e6e6e647919b68d740f42fcb9a4c8df635afebff
|
4e49a1782e34433410f96602640a05c9ec00a65f
|
refs/heads/master
| 2023-09-01T13:25:41.271070
| 2023-08-18T18:54:11
| 2023-08-18T18:54:11
| 134,156,823
| 4,156
| 235
|
Apache-2.0
| 2023-09-10T18:57:07
| 2018-05-20T14:19:41
|
Vim Script
|
UTF-8
|
Python
| false
| false
| 2,717
|
py
|
download_and_checksum_gadget.py
|
#!/usr/bin/env python3
import sys
import os
import string
import fnmatch
if '--help' in sys.argv:
print( f"Usage: { os.path.basename( __file__ ) } [-v] gadget [gadget2 ...]" )
print( "" )
print( "Each gadget is a glob (fnmatch), so use * to do all " )
exit(0)
VERBOSE = 0
if '-v' in sys.argv:
VERBOSE = 1
sys.argv = list( filter( lambda x: x != "-v", sys.argv ) )
# Gaim access to vimspector libs
sys.path.insert(
1,
os.path.abspath( os.path.join( os.path.dirname( __file__ ),
'..',
'..',
'python3' ) )
)
from vimspector import install, installer, gadgets
gadgets_to_sum = sys.argv[ 1: ]
results = []
for gadget_name in gadgets.GADGETS.keys():
include = False
for requested_gadget in gadgets_to_sum:
if fnmatch.fnmatch( gadget_name, requested_gadget ):
include = True
break
if not include:
if VERBOSE:
print( f"Skipping { gadget_name } (not in { gadgets_to_sum })" )
continue
if VERBOSE:
print( f"Processing { gadget_name }..." )
gadget = gadgets.GADGETS[ gadget_name ]
if 'download' not in gadget:
print(
f"WARNING: Gadget not downloadable (probably a git clone?) {gadget_name}",
file=sys.stderr )
continue
root = os.path.join( os.path.abspath( os.path.dirname( __file__ ) ),
'download' )
last_url = ''
seen_checksums = set()
for OS in 'linux', 'macos', 'windows':
for PLATFORM in 'x86_64', 'arm64', 'x86', 'armv7':
spec = {}
spec.update( gadget.get( 'all', {} ) )
spec.update( gadget.get( OS, {} ) )
spec.update( gadget.get( OS + '_' + PLATFORM, {} ) )
if spec.get( 'checksum', None ):
print( f"WARNING: { PLATFORM } for { OS } for { gadget_name } "
"has a checksum configured already. Probably you forgot to "
"clear it." )
url = string.Template( gadget[ 'download' ][ 'url' ] ).substitute( spec )
if url == last_url:
# Probably not different for this arch
continue
version = spec.get( 'version', 'vUnknown' )
destination = os.path.join( root, gadget_name, OS, PLATFORM, version )
file_path = installer.DownloadFileTo(
url,
destination,
file_name = gadget[ 'download' ].get( 'target' ),
checksum = spec.get( 'checksum' ) )
checksum = installer.GetChecksumSHA254( file_path )
if checksum in seen_checksums:
continue
seen_checksums.add( checksum )
last_url = url
results.append(
f"{ gadget_name } { version } { OS }_{ PLATFORM }: { checksum }" )
for result in results:
print( result )
|
55417e8454ed47e4150e36545eb96ed0ebcd9d6c
|
7c593f4cc70ee56106cc9cce105e6b9e7839431e
|
/objax/module.py
|
d2eade1a8bad9c534b2cc41257f95631196c0079
|
[
"Apache-2.0"
] |
permissive
|
google/objax
|
84e397cafb70813a1e89467f745facf828ed24b8
|
a2d025d9e1da8660a1883404207c41d4327d8c48
|
refs/heads/master
| 2023-09-02T07:04:26.801269
| 2023-06-12T22:12:53
| 2023-06-12T22:12:53
| 288,923,752
| 801
| 80
|
Apache-2.0
| 2023-06-12T22:12:54
| 2020-08-20T06:20:40
|
Python
|
UTF-8
|
Python
| false
| false
| 16,998
|
py
|
module.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ['ForceArgs', 'Function', 'Jit', 'Module', 'ModuleList', 'Parallel', 'Vectorize']
from collections import namedtuple
from typing import Optional, List, Union, Callable, Tuple
import jax
import jax.numpy as jn
import numpy as np
from objax.typing import JaxArray
from objax.util import class_name, override_args_kwargs, positional_args_names, repr_function
from objax.variable import BaseVar, RandomState, VarCollection
class Module:
"""A module is a container to associate variables and functions."""
def vars(self, scope: str = '') -> VarCollection:
"""Collect all the variables (and their names) contained in the module and its submodules.
Important: Variables and modules stored Python structures such as dict or list are not collected. See ModuleList
if you need such a feature.
Args:
scope: string to prefix to the variable names.
Returns:
A VarCollection of all the variables.
"""
vc = VarCollection()
scope += f'({self.__class__.__name__}).'
for k, v in self.__dict__.items():
if isinstance(v, BaseVar):
vc[scope + k] = v
elif isinstance(v, Module):
if k == '__wrapped__':
vc.update(v.vars(scope=scope[:-1]))
else:
vc.update(v.vars(scope=scope + k))
return vc
def __call__(self, *args, **kwargs):
"""Optional module __call__ method, typically a forward pass computation for standard primitives."""
raise NotImplementedError
class ForceArgs(Module):
"""Forces override of arguments of given module."""
ANY = namedtuple('ANY', ())
"""Token used in `ForceArgs.undo` to indicate undo of all values of specific argument."""
@staticmethod
def undo(module: Module, **kwargs):
"""Undo ForceArgs on each submodule of the module. Modifications are done in-place.
Args:
module: module for which to undo ForceArgs.
**kwargs: dictionary of argument overrides to undo.
`name=val` remove override for value `val` of argument `name`.
`name=ForceArgs.ANY` remove all overrides of argument `name`.
If `**kwargs` is empty then all overrides will be undone.
"""
if isinstance(module, ForceArgs):
if not kwargs:
module.forced_kwargs = {}
else:
module.forced_kwargs = {k: v for k, v in module.forced_kwargs.items()
if (k not in kwargs) or (kwargs[k] not in (v, ForceArgs.ANY))}
ForceArgs.undo(module.__wrapped__, **kwargs)
elif isinstance(module, ModuleList):
for idx, v in enumerate(module):
if isinstance(v, Module):
ForceArgs.undo(v, **kwargs)
if isinstance(v, ForceArgs) and not v.forced_kwargs:
module[idx] = v.__wrapped__
else:
for k, v in module.__dict__.items():
if isinstance(v, Module):
ForceArgs.undo(v, **kwargs)
if isinstance(v, ForceArgs) and not v.forced_kwargs:
setattr(module, k, v.__wrapped__)
def __init__(self, module: Module, **kwargs):
"""Initializes ForceArgs by wrapping another module.
Args:
module: module which argument will be overridden.
kwargs: values of keyword arguments which will be forced to use.
"""
self.__wrapped__ = module
self.forced_kwargs = kwargs
def vars(self, scope: str = '') -> VarCollection:
"""Returns the VarCollection of the wrapped module.
Args:
scope: string to prefix to the variable names.
Returns:
A VarCollection of all the variables of wrapped module.
"""
return self.__wrapped__.vars(scope=scope)
def __call__(self, *args, **kwargs):
"""Calls wrapped module using forced args to override wrapped module arguments."""
args, kwargs = override_args_kwargs(self.__wrapped__, args, kwargs, self.forced_kwargs)
return self.__wrapped__(*args, **kwargs)
def __repr__(self):
args = ', '.join(f'{k}={repr(v)}' for k, v in self.forced_kwargs.items())
return f'{class_name(self)}(module={repr_function(self.__wrapped__)}, {args})'
class ModuleList(Module, list):
"""This is a replacement for Python's list that provides a vars() method to return all the variables that it
contains, including the ones contained in the modules and sub-modules in it."""
def vars(self, scope: str = '') -> VarCollection:
"""Collect all the variables (and their names) contained in the list and its submodules.
Args:
scope: string to prefix to the variable names.
Returns:
A VarCollection of all the variables.
"""
vc = VarCollection()
scope += f'({self.__class__.__name__})'
for p, v in enumerate(self):
if isinstance(v, BaseVar):
vc[f'{scope}[{p}]'] = v
elif isinstance(v, Module):
vc.update(v.vars(scope=f'{scope}[{p}]'))
return vc
def __getitem__(self, key: Union[int, slice]):
value = list.__getitem__(self, key)
if isinstance(key, slice):
return ModuleList(value)
return value
def __repr__(self):
def f(x):
if not isinstance(x, Module) and callable(x):
return repr_function(x)
x = repr(x).split('\n')
x = [x[0]] + [' ' + y for y in x[1:]]
return '\n'.join(x)
entries = '\n'.join(f' [{i}] {f(x)}' for i, x in enumerate(self))
return f'{class_name(self)}(\n{entries}\n)'
class Function(Module):
"""Turn a function into a Module by keeping the vars it uses."""
def __init__(self, f: Callable, vc: VarCollection):
"""Function constructor.
Args:
f: the function or the module to represent.
vc: the VarCollection of variables used by the function.
"""
if hasattr(f, '__name__'):
self.vc = VarCollection((f'{{{f.__name__}}}{k}', v) for k, v in vc.items())
else:
self.vc = VarCollection(vc)
self.__wrapped__ = f
def __call__(self, *args, **kwargs):
"""Call the the function."""
return self.__wrapped__(*args, **kwargs)
def vars(self, scope: str = '') -> VarCollection:
"""Return the VarCollection of the variables used by the function."""
if scope:
return VarCollection((scope + k, v) for k, v in self.vc.items())
return VarCollection(self.vc)
@staticmethod
def with_vars(vc: VarCollection):
"""Decorator which turns a function into a module using provided variable collection.
Args:
vc: the VarCollection of variables used by the function.
"""
def from_function(f: Callable):
return Function(f, vc)
return from_function
@staticmethod
def auto_vars(f: Callable):
"""Turns a function into a module by auto detecting used Objax variables. Could be used as a decorator.
WARNING: This is an experimental feature.
It can detect variables used by function in many common cases, but not all cases.
This feature may be removed in the future version of Objax if it appear to be too unreliable.
Args:
f: function which will be converted into a module.
"""
from objax.util.tracing import find_used_variables
return Function(f, find_used_variables(f))
def __repr__(self):
return f'{class_name(self)}(f={repr_function(self.__wrapped__)})'
class Jit(Module):
"""JIT (Just-In-Time) module takes a function or a module and compiles it for faster execution."""
def __init__(self,
f: Union[Module, Callable],
vc: Optional[VarCollection] = None,
static_argnums: Optional[Tuple[int, ...]] = None):
"""Jit constructor.
Args:
f: the function or the module to compile.
vc: the VarCollection of variables used by the function or module. This argument is required for functions.
static_argnums: tuple of indexes of f's input arguments to treat as static (constants)).
A new graph is compiled for each different combination of values for such inputs.
"""
self.static_argnums = static_argnums
if not isinstance(f, Module):
if vc is None:
raise ValueError('You must supply the VarCollection used by the function f.')
f = Function(f, vc)
def jit(tensor_list: List[JaxArray], kwargs, *args):
original_values = self.vc.tensors()
try:
self.vc.assign(tensor_list)
return f(*args, **kwargs), self.vc.tensors()
finally:
self.vc.assign(original_values)
self.vc = f.vars() if vc is None else vc
self._call = jax.jit(jit, static_argnums=tuple(x + 2 for x in sorted(static_argnums or ())))
self.__wrapped__ = f
def __call__(self, *args, **kwargs):
"""Call the compiled version of the function or module."""
output, changes = self._call(self.vc.tensors(), kwargs, *args)
self.vc.assign(changes)
return output
def __repr__(self):
return f'{class_name(self)}(f={self.__wrapped__}, static_argnums={self.static_argnums or None})'
class Parallel(Module):
"""Parallel module takes a function or a module and compiles it for running on multiple devices in parallel."""
def __init__(self,
f: Union[Module, Callable],
vc: Optional[VarCollection] = None,
reduce: Callable[[JaxArray], JaxArray] = jn.concatenate,
axis_name: str = 'device',
static_argnums: Optional[Tuple[int, ...]] = None):
"""Parallel constructor.
Args:
f: the function or the module to compile for parallelism.
vc: the VarCollection of variables used by the function or module. This argument is required for functions.
reduce: the function used reduce the outputs from many devices to a single device value.
axis_name: what name to give to the device dimension, used in conjunction with objax.functional.parallel.
static_argnums: tuple of indexes of f's input arguments to treat as static (constants)).
A new graph is compiled for each different combination of values for such inputs.
"""
if not isinstance(f, Module):
if vc is None:
raise ValueError('You must supply the VarCollection used by the function f.')
f = Function(f, vc)
def pmap(tensor_list: List[jax.Array], random_list: List[jax.Array], *args):
original_values = self.vc.tensors()
try:
self.vc.assign(tensor_list)
self.vc.subset(RandomState).assign(random_list)
return f(*args), self.vc.tensors()
finally:
self.vc.assign(original_values)
static_argnums = sorted(static_argnums or ())
self.axis_name = axis_name
self.ndevices = jax.local_device_count()
self.reduce = reduce
self.static_argnums = frozenset(static_argnums)
self.vc = vc or f.vars()
self._call = jax.pmap(pmap, axis_name=axis_name, static_broadcasted_argnums=[x + 2 for x in static_argnums])
self.__wrapped__ = f
def device_reshape(self, x: JaxArray) -> JaxArray:
"""Utility to reshape an input array in order to broadcast to multiple devices."""
assert hasattr(x, 'ndim'), f'Expected JaxArray, got {type(x)}. If you are trying to pass a scalar to ' \
f'parallel, first convert it to a JaxArray, for example np.float(0.5)'
if x.ndim == 0:
return np.broadcast_to(x, [self.ndevices])
assert x.shape[0] % self.ndevices == 0, f'Must be able to equally divide batch {x.shape} among ' \
f'{self.ndevices} devices, but does not go equally.'
return x.reshape((self.ndevices, x.shape[0] // self.ndevices) + x.shape[1:])
def __call__(self, *args):
"""Call the compiled function or module on multiple devices in parallel.
Important: Make sure you call this function within the scope of VarCollection.replicate() statement.
"""
unreplicated = [k for k, v in self.vc.items()
if not isinstance(v.value, (jax.Array,
jax.interpreters.partial_eval.JaxprTracer,
jax.interpreters.partial_eval.DynamicJaxprTracer))]
assert not unreplicated, \
f'Some variables were not replicated: {unreplicated}.' \
'did you forget to call VarCollection.replicate on them?'
args = [x if i in self.static_argnums
else jax.tree_map(self.device_reshape, [x])[0] for i, x in enumerate(args)]
output, changes = self._call(self.vc.tensors(), self.vc.subset(RandomState).tensors(), *args)
self.vc.assign(changes)
return jax.tree_map(self.reduce, output)
def __repr__(self):
args = dict(f=self.__wrapped__, reduce=repr_function(self.reduce), axis_name=repr(self.axis_name),
static_argnums=tuple(sorted(self.static_argnums)) or None)
args = ', '.join(f'{k}={v}' for k, v in args.items())
return f'{class_name(self)}({args})'
class Vectorize(Module):
"""Vectorize module takes a function or a module and compiles it for running in parallel on a single device."""
def __init__(self,
f: Union[Module, Callable],
vc: Optional[VarCollection] = None,
batch_axis: Tuple[Optional[int], ...] = (0,)):
"""Vectorize constructor.
Args:
f: the function or the module to compile for vectorization.
vc: the VarCollection of variables used by the function or module. This argument is required for functions.
batch_axis: tuple of int or None for each of f's input arguments: the axis to use as batch during
vectorization. Use None to automatically broadcast.
"""
if not isinstance(f, Module):
if vc is None:
raise ValueError('You must supply the VarCollection used by the function f.')
f = Function(f, vc)
def vmap(tensor_list: List[JaxArray], random_list: List[JaxArray], *args):
original_values = self.vc.tensors()
try:
self.vc.assign(tensor_list)
self.vc.subset(RandomState).assign(random_list)
return f(*args), self.vc.tensors()
finally:
self.vc.assign(original_values)
fargs = positional_args_names(f)
assert len(batch_axis) >= len(fargs), f'The batched argument must be specified for all of {f} arguments {fargs}'
self.batch_axis = batch_axis
self.batch_axis_argnums = [(x, v) for x, v in enumerate(batch_axis) if v is not None]
assert self.batch_axis_argnums, f'No arguments to function {f} are vectorizable'
self.vc = vc or f.vars()
self._call = jax.vmap(vmap, (None, 0) + batch_axis)
self.__wrapped__ = f
def __call__(self, *args):
"""Call the vectorized version of the function or module."""
assert len(args) == len(self.batch_axis), f'Number of arguments passed {len(args)} must match ' \
f'batched {len(self.batch_axis)}'
nsplits = args[self.batch_axis_argnums[0][0]].shape[self.batch_axis_argnums[0][1]]
output, changes = self._call(self.vc.tensors(), [v.split(nsplits) for v in self.vc.subset(RandomState)], *args)
for v, u in zip(self.vc, changes):
v.reduce(u)
return output
def __repr__(self):
return f'{class_name(self)}(f={self.__wrapped__}, batch_axis={self.batch_axis})'
|
d9c64c6e586719a4be3b1b39e6d02f8a0f8f93e3
|
0f85c7bfd4f29bcd856adc316cecc097fda744dc
|
/tests/plugins/tcp_mockserver/test_example.py
|
faa83d079beb3d8c2af5d812da25712f5ccaa8a9
|
[
"MIT"
] |
permissive
|
yandex/yandex-taxi-testsuite
|
260f46731c9888a9efcc3372c3d92329f2fb4d56
|
8befda8c13ef58d83b2ea7d0444e34de0f67ac7f
|
refs/heads/develop
| 2023-08-31T23:28:31.874786
| 2023-08-14T16:00:53
| 2023-08-14T16:00:53
| 244,937,107
| 150
| 41
|
MIT
| 2023-09-13T16:34:07
| 2020-03-04T15:35:09
|
Python
|
UTF-8
|
Python
| false
| false
| 985
|
py
|
test_example.py
|
import contextlib
import pytest
@pytest.fixture(scope='session')
async def _tcp_mockserver(create_tcp_mockserver):
"""
Returns base per-session server instance bound to random port.
"""
async with create_tcp_mockserver(host='localhost', port=0) as mockserver:
yield mockserver
@pytest.fixture
def tcp_mockserver(_tcp_mockserver):
"""
Returns per-test mockserver interface.
"""
async def handle_client(reader, writer):
writer.write(b'Hello, world!')
await writer.drain()
writer.close()
with _tcp_mockserver.client_handler(handle_client):
yield
@pytest.fixture
async def tcp_mockserver_connect(_tcp_mockserver):
"""Create connection to the tcp mockserver."""
return _tcp_mockserver.open_connection
async def test_server(tcp_mockserver, tcp_mockserver_connect):
async with tcp_mockserver_connect() as (reader, _):
data = await reader.read()
assert data == b'Hello, world!'
|
9438bda06e5fee4a4e18b146cb49fab2726e0aaa
|
d87964cb5b12f542cd21f1ad7c6ed85b5325dc77
|
/cibuildwheel/bashlex_eval.py
|
c5f8053723faa6b0cadb62870a34e94ddfa5818b
|
[
"BSD-2-Clause"
] |
permissive
|
pypa/cibuildwheel
|
65ddc98e9933ad4c7d408966c06cf4156941b68c
|
ce71f445deee7ac0dabd1ee900d6672370e60478
|
refs/heads/main
| 2023-08-17T02:56:29.658243
| 2023-08-14T16:15:53
| 2023-08-14T16:15:53
| 85,508,223
| 884
| 136
|
NOASSERTION
| 2023-09-13T20:10:43
| 2017-03-19T20:59:00
|
Python
|
UTF-8
|
Python
| false
| false
| 3,996
|
py
|
bashlex_eval.py
|
from __future__ import annotations
import subprocess
from collections.abc import Iterable, Mapping, Sequence
from dataclasses import dataclass
from typing import Callable, Dict, List # noqa: TID251
import bashlex
# a function that takes a command and the environment, and returns the result
EnvironmentExecutor = Callable[[List[str], Dict[str, str]], str]
def local_environment_executor(command: Sequence[str], env: Mapping[str, str]) -> str:
return subprocess.run(command, env=env, text=True, stdout=subprocess.PIPE, check=True).stdout
@dataclass(frozen=True)
class NodeExecutionContext:
environment: dict[str, str]
input: str
executor: EnvironmentExecutor
def evaluate(
value: str, environment: Mapping[str, str], executor: EnvironmentExecutor | None = None
) -> str:
if not value:
# empty string evaluates to empty string
# (but trips up bashlex)
return ""
command_node = bashlex.parsesingle(value)
if len(command_node.parts) != 1:
msg = f"{value!r} has too many parts"
raise ValueError(msg)
value_word_node = command_node.parts[0]
return evaluate_node(
value_word_node,
context=NodeExecutionContext(
environment=dict(environment),
input=value,
executor=executor or local_environment_executor,
),
)
def evaluate_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
if node.kind == "word":
return evaluate_word_node(node, context=context)
elif node.kind == "commandsubstitution":
node_result = evaluate_command_node(node.command, context=context)
# bash removes training newlines in command substitution
return node_result.rstrip()
elif node.kind == "parameter":
return evaluate_parameter_node(node, context=context)
else:
msg = f"Unsupported bash construct: {node.kind!r}"
raise ValueError(msg)
def evaluate_word_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
value: str = node.word
for part in node.parts:
part_string = context.input[part.pos[0] : part.pos[1]]
part_value = evaluate_node(part, context=context)
if part_string not in value:
msg = f"bash parse failed. part {part_string!r} not found in {value!r}. Word was {node.word!r}. Full input was {context.input!r}"
raise RuntimeError(msg)
value = value.replace(part_string, part_value, 1)
return value
def evaluate_command_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
if any(n.kind == "operator" for n in node.parts):
return evaluate_nodes_as_compound_command(node.parts, context=context)
else:
return evaluate_nodes_as_simple_command(node.parts, context=context)
def evaluate_nodes_as_compound_command(
nodes: Sequence[bashlex.ast.node], context: NodeExecutionContext
) -> str:
# bashlex doesn't support any operators besides ';' inside command
# substitutions, so we only need to handle that case. We do so assuming
# that `set -o errexit` is on, because it's easier to code!
result = ""
for node in nodes:
if node.kind == "command":
result += evaluate_command_node(node, context=context)
elif node.kind == "operator":
if node.op != ";":
msg = f"Unsupported bash operator: {node.op!r}"
raise ValueError(msg)
else:
msg = f"Unsupported bash node in compound command: {node.kind!r}"
raise ValueError(msg)
return result
def evaluate_nodes_as_simple_command(
nodes: Iterable[bashlex.ast.node], context: NodeExecutionContext
) -> str:
command = [evaluate_node(part, context=context) for part in nodes]
return context.executor(command, context.environment)
def evaluate_parameter_node(node: bashlex.ast.node, context: NodeExecutionContext) -> str:
return context.environment.get(node.value, "")
|
fb8effbc46a4905fd0b806022b44616c69bc8d43
|
63483feac7b6dcb4182eb7f66a731071f810224e
|
/object_detection/utils.py
|
8634d1091bceccde35f39d630e07284ac5943c9a
|
[
"Apache-2.0"
] |
permissive
|
Cartucho/OpenLabeling
|
5a0788627a7af736eada4ff2ca84aba67831afd9
|
d46c62a319b5d2be7374a90b937840e011945739
|
refs/heads/master
| 2022-08-08T15:10:12.025697
| 2020-05-07T10:22:27
| 2020-05-07T10:22:27
| 118,135,147
| 742
| 245
|
Apache-2.0
| 2022-07-06T19:58:46
| 2018-01-19T14:30:09
|
Python
|
UTF-8
|
Python
| false
| false
| 330
|
py
|
utils.py
|
def format_results(boxes, scores, image_id, cat_id):
results = []
for box, score in zip(boxes, scores):
r = {
"image_id": image_id,
"category_id": cat_id,
"bbox": [float(i) for i in box],
"score": float(score),
}
results.append(r)
return results
|
5dc6cd62ff5dafbe0e08a546b532296ddbff131e
|
9907672fcd81ab73ac63b2a83422a82bf31eadde
|
/yukicoder/tyama_yukicoder472.py
|
76f5a81331ed9189edf2b8ec5b37c4e74c254756
|
[
"0BSD"
] |
permissive
|
cielavenir/procon
|
bbe1974b9bddb51b76d58722a0686a5b477c4456
|
746e1a91f574f20647e8aaaac0d9e6173f741176
|
refs/heads/master
| 2023-06-21T23:11:24.562546
| 2023-06-11T13:15:15
| 2023-06-11T13:15:15
| 7,557,464
| 137
| 136
| null | 2020-10-20T09:35:52
| 2013-01-11T09:40:26
|
C++
|
UTF-8
|
Python
| false
| false
| 405
|
py
|
tyama_yukicoder472.py
|
#!/usr/bin/python
from functools import sys
import sys
sys.setrecursionlimit(1000000)
memo={}
def dfs(v,d,r):
if d==len(v): return 0
if (d,r) not in memo: memo[(d,r)]=reduce(lambda s,i: min(s,dfs(v,d+1,r-i)+v[d][i]), range(min(r,3)+1), 1<<30)
return memo[(d,r)]
n,k=map(int,sys.stdin.readline().split())
v=[list(map(int,sys.stdin.readline().split()))+[1] for _ in range(n)]
print(dfs(v,0,k)/float(n))
|
95d17844c682dd0fa41424564b25b21b02b9c9bb
|
8d34a1f5cd773e7eaf21ed3dd36f5b5180794db5
|
/tests/plot.py
|
fcac0bc3c22e2e5cb10fbb10bbaa6969f8f568a9
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
grantjenks/python-diskcache
|
ff89f7deab24c70a09ff90a8fdbfe4ecac981c1b
|
323787f507a6456c56cce213156a78b17073fe00
|
refs/heads/master
| 2023-09-01T05:46:40.781039
| 2023-08-31T06:10:27
| 2023-08-31T06:10:27
| 51,039,722
| 1,895
| 140
|
NOASSERTION
| 2023-08-31T05:56:39
| 2016-02-03T23:59:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,892
|
py
|
plot.py
|
"""Plot Benchmarks for docs
$ export PYTHONPATH=/Users/grantj/repos/python-diskcache
$ python tests/plot.py --show tests/timings_core_p1.txt
"""
import argparse
import collections as co
import re
import sys
import matplotlib.pyplot as plt
def parse_timing(timing, limit):
"""Parse timing."""
if timing.endswith('ms'):
value = float(timing[:-2]) * 1e-3
elif timing.endswith('us'):
value = float(timing[:-2]) * 1e-6
else:
assert timing.endswith('s')
value = float(timing[:-1])
return 0.0 if value > limit else value * 1e6
def parse_row(row, line):
"""Parse row."""
return [val.strip() for val in row.match(line).groups()]
def parse_data(infile):
"""Parse data from `infile`."""
blocks = re.compile(' '.join(['=' * 9] * 8))
dashes = re.compile('^-{79}$')
title = re.compile('^Timings for (.*)$')
row = re.compile(' '.join(['(.{9})'] * 7) + ' (.{8,9})')
lines = infile.readlines()
data = co.OrderedDict()
index = 0
while index < len(lines):
line = lines[index]
if blocks.match(line):
try:
name = title.match(lines[index + 1]).group(1)
except Exception:
index += 1
continue
data[name] = {}
assert dashes.match(lines[index + 2])
cols = parse_row(row, lines[index + 3])
assert blocks.match(lines[index + 4])
get_row = parse_row(row, lines[index + 5])
assert get_row[0] == 'get'
set_row = parse_row(row, lines[index + 6])
assert set_row[0] == 'set'
delete_row = parse_row(row, lines[index + 7])
assert delete_row[0] == 'delete'
assert blocks.match(lines[index + 9])
data[name]['get'] = dict(zip(cols, get_row))
data[name]['set'] = dict(zip(cols, set_row))
data[name]['delete'] = dict(zip(cols, delete_row))
index += 10
else:
index += 1
return data
def make_plot(data, action, save=False, show=False, limit=0.005):
"""Make plot."""
fig, ax = plt.subplots(figsize=(8, 10))
colors = ['#ff7f00', '#377eb8', '#4daf4a', '#984ea3', '#e41a1c']
width = 0.15
ticks = ('Median', 'P90', 'P99')
index = (0, 1, 2)
names = list(data)
bars = []
for pos, (name, color) in enumerate(zip(names, colors)):
bars.append(
ax.bar(
[val + pos * width for val in index],
[
parse_timing(data[name][action][tick], limit)
for tick in ticks
],
width,
color=color,
)
)
ax.set_ylabel('Time (microseconds)')
ax.set_title('"%s" Time vs Percentile' % action)
ax.set_xticks([val + width * (len(data) / 2) for val in index])
ax.set_xticklabels(ticks)
box = ax.get_position()
ax.set_position(
[box.x0, box.y0 + box.height * 0.2, box.width, box.height * 0.8]
)
ax.legend(
[bar[0] for bar in bars],
names,
loc='lower center',
bbox_to_anchor=(0.5, -0.25),
)
if show:
plt.show()
if save:
plt.savefig('%s-%s.png' % (save, action), dpi=120, bbox_inches='tight')
plt.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'infile',
type=argparse.FileType('r'),
default=sys.stdin,
)
parser.add_argument('-l', '--limit', type=float, default=0.005)
parser.add_argument('-s', '--save')
parser.add_argument('--show', action='store_true')
args = parser.parse_args()
data = parse_data(args.infile)
for action in ['get', 'set', 'delete']:
make_plot(data, action, args.save, args.show, args.limit)
if __name__ == '__main__':
main()
|
4e95797ee7b9894af9684b5ef980a87410b3b0c6
|
14d02788a5e9fbf0f98a6d0f76cdfb376f22fa74
|
/src/retype/__main__.py
|
c0d81676f39fed8f84641a0e1c7c600179a7b537
|
[
"MIT"
] |
permissive
|
ambv/retype
|
cab384828bdff77883dcfe8872403e1159ee088f
|
8d534bf80345b1d4b7df512eaf13c19bbcccc7c5
|
refs/heads/main
| 2023-08-25T12:22:56.628291
| 2022-08-14T07:12:06
| 2022-08-14T07:12:06
| 84,633,594
| 139
| 22
|
MIT
| 2022-08-14T07:01:24
| 2017-03-11T07:54:58
|
Python
|
UTF-8
|
Python
| false
| false
| 2,494
|
py
|
__main__.py
|
from __future__ import annotations
import sys
from functools import partial
from pathlib import Path
import click
from . import retype_path
from .config import ReApplyFlags
from .version import __version__
Directory = partial(
click.Path,
exists=True,
file_okay=False,
dir_okay=True,
readable=True,
writable=False,
)
@click.command()
@click.option(
"-p",
"--pyi-dir",
type=Directory(),
default="types",
help="Where to find .pyi stubs.",
show_default=True,
)
@click.option(
"-t",
"--target-dir",
type=Directory(exists=False, writable=True),
default="typed-src",
help="Where to write annotated sources.",
show_default=True,
)
@click.option(
"-i",
"--incremental",
is_flag=True,
help="Allow for missing type annotations in both stubs and the source.",
)
@click.option("-q", "--quiet", is_flag=True, help="Don't emit warnings, just errors.")
@click.option(
"-a", "--replace-any", is_flag=True, help="Allow replacing Any annotations."
)
@click.option(
"--hg", is_flag=True, help="Post-process files to preserve implicit byte literals."
)
@click.option("--traceback", is_flag=True, help="Show a Python traceback on error.")
@click.argument("src", nargs=-1, type=Directory(file_okay=True))
@click.version_option(version=__version__)
def main(src, pyi_dir, target_dir, incremental, quiet, replace_any, hg, traceback):
"""Re-apply type annotations from .pyi stubs to your codebase."""
exit_code = 0
for src_entry in src:
for file, error, exc_type, tb in retype_path(
src=Path(src_entry),
pyi_dir=Path(pyi_dir),
targets=Path(target_dir),
src_explicitly_given=True,
quiet=quiet,
hg=hg,
flags=ReApplyFlags(replace_any=replace_any, incremental=incremental),
):
print(f"error: {file}: {error}", file=sys.stderr)
if traceback:
print("Traceback (most recent call last):", file=sys.stderr)
for line in tb:
print(line, file=sys.stderr, end="")
print(f"{exc_type.__name__}: {error}", file=sys.stderr)
exit_code += 1
if not src and not quiet:
print("warning: no sources given", file=sys.stderr)
# According to http://tldp.org/LDP/abs/html/index.html starting with 126
# we have special return codes.
sys.exit(min(exit_code, 125))
if __name__ == "__main__":
main()
|
fc5dc6dd33ea7e4c2e41d03d825043c5db974b09
|
c46754b9600a12df4f9d7a6320dfc19aa96b1e1d
|
/src/transformers/models/convbert/tokenization_convbert_fast.py
|
07447bb6a17caa7d92f5c604830f3db5ed2dfb8b
|
[
"Apache-2.0"
] |
permissive
|
huggingface/transformers
|
ccd52a0d7c59e5f13205f32fd96f55743ebc8814
|
4fa0aff21ee083d0197a898cdf17ff476fae2ac3
|
refs/heads/main
| 2023-09-05T19:47:38.981127
| 2023-09-05T19:21:33
| 2023-09-05T19:21:33
| 155,220,641
| 102,193
| 22,284
|
Apache-2.0
| 2023-09-14T20:44:49
| 2018-10-29T13:56:00
|
Python
|
UTF-8
|
Python
| false
| false
| 8,765
|
py
|
tokenization_convbert_fast.py
|
# coding=utf-8
# Copyright The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for ConvBERT."""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
PRETRAINED_INIT_CONFIGURATION = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
# Copied from transformers.models.bert.tokenization_bert_fast.BertTokenizerFast with bert-base-cased->YituTech/conv-bert-base, Bert->ConvBert, BERT->ConvBERT
class ConvBertTokenizerFast(PreTrainedTokenizerFast):
r"""
Construct a "fast" ConvBERT tokenizer (backed by HuggingFace's *tokenizers* library). Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
clean_text (`bool`, *optional*, defaults to `True`):
Whether or not to clean the text before tokenization by removing any control characters and replacing all
whitespaces by the classic one.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters. This should likely be deactivated for Japanese (see [this
issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original ConvBERT).
wordpieces_prefix (`str`, *optional*, defaults to `"##"`):
The prefix for subwords.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
slow_tokenizer_class = ConvBertTokenizer
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
do_lower_case=True,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
**kwargs,
):
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
do_lower_case=do_lower_case,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
**kwargs,
)
normalizer_state = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase", do_lower_case) != do_lower_case
or normalizer_state.get("strip_accents", strip_accents) != strip_accents
or normalizer_state.get("handle_chinese_chars", tokenize_chinese_chars) != tokenize_chinese_chars
):
normalizer_class = getattr(normalizers, normalizer_state.pop("type"))
normalizer_state["lowercase"] = do_lower_case
normalizer_state["strip_accents"] = strip_accents
normalizer_state["handle_chinese_chars"] = tokenize_chinese_chars
self.backend_tokenizer.normalizer = normalizer_class(**normalizer_state)
self.do_lower_case = do_lower_case
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A ConvBERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
output = [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
if token_ids_1:
output += token_ids_1 + [self.sep_token_id]
return output
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. A ConvBERT
sequence pair mask has the following format:
```
0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1
| first sequence | second sequence |
```
If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s).
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
files = self._tokenizer.model.save(save_directory, name=filename_prefix)
return tuple(files)
|
fc262b474009f4ba608156b2a54ed6d3fcceb5ee
|
9940f6579e010bb7c1fa13885c49bbaf6164723b
|
/lbry/schema/mime_types.py
|
62505be04fb9a4b61664aec912d44b2544093659
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
lbryio/lbry-sdk
|
feaf1143b178b496a9d81c99faf51fac60e6fed1
|
eb5da9511e162ef1080cb34af2ee087383cfa94a
|
refs/heads/master
| 2023-08-18T13:06:16.106204
| 2023-02-07T18:50:25
| 2023-04-03T17:34:36
| 41,103,286
| 5,272
| 291
|
MIT
| 2023-06-28T16:36:20
| 2015-08-20T15:24:10
|
Python
|
UTF-8
|
Python
| false
| false
| 8,694
|
py
|
mime_types.py
|
import os
import filetype
import logging
types_map = {
# http://www.iana.org/assignments/media-types
# Type mapping for automated metadata extraction (video, audio, image, document, binary, model)
'.a': ('application/octet-stream', 'binary'),
'.ai': ('application/postscript', 'image'),
'.aif': ('audio/x-aiff', 'audio'),
'.aifc': ('audio/x-aiff', 'audio'),
'.aiff': ('audio/x-aiff', 'audio'),
'.au': ('audio/basic', 'audio'),
'.avi': ('video/x-msvideo', 'video'),
'.bat': ('text/plain', 'document'),
'.bcpio': ('application/x-bcpio', 'binary'),
'.bin': ('application/octet-stream', 'binary'),
'.bmp': ('image/bmp', 'image'),
'.c': ('text/plain', 'document'),
'.cdf': ('application/x-netcdf', 'binary'),
'.cpio': ('application/x-cpio', 'binary'),
'.csh': ('application/x-csh', 'binary'),
'.css': ('text/css', 'document'),
'.csv': ('text/csv', 'document'),
'.dll': ('application/octet-stream', 'binary'),
'.doc': ('application/msword', 'document'),
'.dot': ('application/msword', 'document'),
'.dvi': ('application/x-dvi', 'binary'),
'.eml': ('message/rfc822', 'document'),
'.eps': ('application/postscript', 'document'),
'.epub': ('application/epub+zip', 'document'),
'.etx': ('text/x-setext', 'document'),
'.exe': ('application/octet-stream', 'binary'),
'.gif': ('image/gif', 'image'),
'.gtar': ('application/x-gtar', 'binary'),
'.h': ('text/plain', 'document'),
'.hdf': ('application/x-hdf', 'binary'),
'.htm': ('text/html', 'document'),
'.html': ('text/html', 'document'),
'.ico': ('image/vnd.microsoft.icon', 'image'),
'.ief': ('image/ief', 'image'),
'.iges': ('model/iges', 'model'),
'.jpe': ('image/jpeg', 'image'),
'.jpeg': ('image/jpeg', 'image'),
'.jpg': ('image/jpeg', 'image'),
'.js': ('application/javascript', 'document'),
'.json': ('application/json', 'document'),
'.ksh': ('text/plain', 'document'),
'.latex': ('application/x-latex', 'binary'),
'.m1v': ('video/mpeg', 'video'),
'.m3u': ('application/x-mpegurl', 'audio'),
'.m3u8': ('application/x-mpegurl', 'video'),
'.man': ('application/x-troff-man', 'document'),
'.markdown': ('text/markdown', 'document'),
'.md': ('text/markdown', 'document'),
'.me': ('application/x-troff-me', 'binary'),
'.mht': ('message/rfc822', 'document'),
'.mhtml': ('message/rfc822', 'document'),
'.mif': ('application/x-mif', 'binary'),
'.mov': ('video/quicktime', 'video'),
'.movie': ('video/x-sgi-movie', 'video'),
'.mp2': ('audio/mpeg', 'audio'),
'.mp3': ('audio/mpeg', 'audio'),
'.mp4': ('video/mp4', 'video'),
'.mpa': ('video/mpeg', 'video'),
'.mpd': ('application/dash+xml', 'video'),
'.mpe': ('video/mpeg', 'video'),
'.mpeg': ('video/mpeg', 'video'),
'.mpg': ('video/mpeg', 'video'),
'.ms': ('application/x-troff-ms', 'binary'),
'.m4s': ('video/iso.segment', 'binary'),
'.nc': ('application/x-netcdf', 'binary'),
'.nws': ('message/rfc822', 'document'),
'.o': ('application/octet-stream', 'binary'),
'.obj': ('application/octet-stream', 'model'),
'.oda': ('application/oda', 'binary'),
'.p12': ('application/x-pkcs12', 'binary'),
'.p7c': ('application/pkcs7-mime', 'binary'),
'.pbm': ('image/x-portable-bitmap', 'image'),
'.pdf': ('application/pdf', 'document'),
'.pfx': ('application/x-pkcs12', 'binary'),
'.pgm': ('image/x-portable-graymap', 'image'),
'.pl': ('text/plain', 'document'),
'.png': ('image/png', 'image'),
'.pnm': ('image/x-portable-anymap', 'image'),
'.pot': ('application/vnd.ms-powerpoint', 'document'),
'.ppa': ('application/vnd.ms-powerpoint', 'document'),
'.ppm': ('image/x-portable-pixmap', 'image'),
'.pps': ('application/vnd.ms-powerpoint', 'document'),
'.ppt': ('application/vnd.ms-powerpoint', 'document'),
'.ps': ('application/postscript', 'document'),
'.pwz': ('application/vnd.ms-powerpoint', 'document'),
'.py': ('text/x-python', 'document'),
'.pyc': ('application/x-python-code', 'binary'),
'.pyo': ('application/x-python-code', 'binary'),
'.qt': ('video/quicktime', 'video'),
'.ra': ('audio/x-pn-realaudio', 'audio'),
'.ram': ('application/x-pn-realaudio', 'audio'),
'.ras': ('image/x-cmu-raster', 'image'),
'.rdf': ('application/xml', 'binary'),
'.rgb': ('image/x-rgb', 'image'),
'.roff': ('application/x-troff', 'binary'),
'.rtx': ('text/richtext', 'document'),
'.sgm': ('text/x-sgml', 'document'),
'.sgml': ('text/x-sgml', 'document'),
'.sh': ('application/x-sh', 'document'),
'.shar': ('application/x-shar', 'binary'),
'.snd': ('audio/basic', 'audio'),
'.so': ('application/octet-stream', 'binary'),
'.src': ('application/x-wais-source', 'binary'),
'.stl': ('model/stl', 'model'),
'.sv4cpio': ('application/x-sv4cpio', 'binary'),
'.sv4crc': ('application/x-sv4crc', 'binary'),
'.svg': ('image/svg+xml', 'image'),
'.swf': ('application/x-shockwave-flash', 'binary'),
'.t': ('application/x-troff', 'binary'),
'.tar': ('application/x-tar', 'binary'),
'.tcl': ('application/x-tcl', 'binary'),
'.tex': ('application/x-tex', 'binary'),
'.texi': ('application/x-texinfo', 'binary'),
'.texinfo': ('application/x-texinfo', 'binary'),
'.tif': ('image/tiff', 'image'),
'.tiff': ('image/tiff', 'image'),
'.tr': ('application/x-troff', 'binary'),
'.ts': ('video/mp2t', 'video'),
'.tsv': ('text/tab-separated-values', 'document'),
'.txt': ('text/plain', 'document'),
'.ustar': ('application/x-ustar', 'binary'),
'.vcf': ('text/x-vcard', 'document'),
'.vtt': ('text/vtt', 'document'),
'.wav': ('audio/x-wav', 'audio'),
'.webm': ('video/webm', 'video'),
'.wiz': ('application/msword', 'document'),
'.wsdl': ('application/xml', 'document'),
'.xbm': ('image/x-xbitmap', 'image'),
'.xlb': ('application/vnd.ms-excel', 'document'),
'.xls': ('application/vnd.ms-excel', 'document'),
'.xml': ('text/xml', 'document'),
'.xpdl': ('application/xml', 'document'),
'.xpm': ('image/x-xpixmap', 'image'),
'.xsl': ('application/xml', 'document'),
'.xwd': ('image/x-xwindowdump', 'image'),
'.zip': ('application/zip', 'binary'),
# These are non-standard types, commonly found in the wild.
'.cbr': ('application/vnd.comicbook-rar', 'document'),
'.cbz': ('application/vnd.comicbook+zip', 'document'),
'.flac': ('audio/flac', 'audio'),
'.lbry': ('application/x-ext-lbry', 'document'),
'.m4a': ('audio/mp4', 'audio'),
'.m4v': ('video/m4v', 'video'),
'.mid': ('audio/midi', 'audio'),
'.midi': ('audio/midi', 'audio'),
'.mkv': ('video/x-matroska', 'video'),
'.mobi': ('application/x-mobipocket-ebook', 'document'),
'.oga': ('audio/ogg', 'audio'),
'.ogv': ('video/ogg', 'video'),
'.ogg': ('video/ogg', 'video'),
'.pct': ('image/pict', 'image'),
'.pic': ('image/pict', 'image'),
'.pict': ('image/pict', 'image'),
'.prc': ('application/x-mobipocket-ebook', 'document'),
'.rtf': ('application/rtf', 'document'),
'.xul': ('text/xul', 'document'),
# microsoft is special and has its own 'standard'
# https://docs.microsoft.com/en-us/windows/desktop/wmp/file-name-extensions
'.wmv': ('video/x-ms-wmv', 'video')
}
# maps detected extensions to the possible analogs
# i.e. .cbz file is actually a .zip
synonyms_map = {
'.zip': ['.cbz'],
'.rar': ['.cbr'],
'.ar': ['.a']
}
log = logging.getLogger(__name__)
def guess_media_type(path):
_, ext = os.path.splitext(path)
extension = ext.strip().lower()
try:
kind = filetype.guess(path)
if kind:
real_extension = f".{kind.extension}"
if extension != real_extension:
if extension:
log.warning(f"file extension does not match it's contents: {path}, identified as {real_extension}")
else:
log.debug(f"file {path} does not have extension, identified by it's contents as {real_extension}")
if extension not in synonyms_map.get(real_extension, []):
extension = real_extension
except OSError as error:
pass
if extension[1:]:
if extension in types_map:
return types_map[extension]
return f'application/x-ext-{extension[1:]}', 'binary'
return 'application/octet-stream', 'binary'
def guess_stream_type(media_type):
for media, stream in types_map.values():
if media == media_type:
return stream
return 'binary'
|
54902c33c39722a318fbaf56d21789404b8e438f
|
7c045b4aae3ba8979422822b7b811afcde14710b
|
/zfit/util/temporary.py
|
77134925a45084bbec38f913fa24942a313c0b15
|
[
"BSD-3-Clause"
] |
permissive
|
zfit/zfit
|
204d27cfdde030b5ab6420d8cfe4dc12bbc040aa
|
fc1cc3859db4160e1c5713e490e842a545cc14c2
|
refs/heads/develop
| 2023-08-08T01:51:15.468478
| 2023-08-06T11:17:29
| 2023-08-06T11:17:45
| 126,311,570
| 164
| 55
|
BSD-3-Clause
| 2023-08-02T11:38:02
| 2018-03-22T09:31:20
|
Python
|
UTF-8
|
Python
| false
| false
| 2,040
|
py
|
temporary.py
|
# Copyright (c) 2022 zfit
from __future__ import annotations
from collections.abc import Callable
from typing import Any
class TemporarilySet:
def __init__(self, value: Any, setter: Callable, getter: Callable):
"""Temporarily set `value` with `setter` and reset to the old value after leaving the context.
This class can be used to have a setter that can permanently set a value *as well as* just
for the time inside a context manager. The usage is as follows:
>>> class SimpleX:
>>> def __init__(self):
>>> self.x = None
>>> def _set_x(self, x):
>>> self.x = x
>>> def get_x(self):
>>> return self.x
>>> def set_x(self, x):
>>> return TemporarilySet(value=x, setter=self._set_x, getter=self.get_x)
>>> simple_x = SimpleX()
Now we can either set x permanently
>>> simple_x.set_x(42)
>>> print(simple.x)
42
or temporarily
>>> with simple_x.set_x(13) as value:
>>> print("Value from contextmanager:", value)
>>> print("simple_x.get_x():", simple_x.get_x())
13
13
and is afterwards unset again
>>> print(simple.x)
42
Args:
value: The value to be (temporarily) set (and returned if a context manager is applied).
setter: The setter function with a signature that is compatible to the call:
`setter(value, *setter_args, **setter_kwargs)`
getter: The getter function with a signature that is compatible to the call:
`getter(*getter_args, **getter_kwargs)`
"""
self.setter = setter
self.getter = getter
self.value = value
self.old_value = self.getter()
self.setter(self.value)
def __enter__(self):
return self.value
def __exit__(self, exc_type, exc_val, exc_tb):
self.setter(self.old_value)
|
d8162bd37efb122a80e5196b94c5162aa03305dd
|
93f5395aff38d77d8717c604c027db97c59990ad
|
/pylib/algorithm.py
|
80e3e744276cef1e5a3c8a855d12012da3638439
|
[] |
no_license
|
lilydjwg/winterpy
|
690d3e8ed4f73855838886945626896c9ec54a61
|
2294a258074b8262d6f7482023f6d126189f9bc0
|
refs/heads/master
| 2023-08-22T23:14:54.067358
| 2023-06-05T03:25:25
| 2023-06-05T03:25:25
| 1,648,844
| 198
| 69
| null | 2023-06-05T03:25:26
| 2011-04-22T07:41:15
|
Python
|
UTF-8
|
Python
| false
| false
| 2,821
|
py
|
algorithm.py
|
'''一些算法'''
import bisect
def LevenshteinDistance(s, t):
'''字符串相似度算法(Levenshtein Distance算法)
一个字符串可以通过增加一个字符,删除一个字符,替换一个字符得到另外一个
字符串,假设,我们把从字符串A转换成字符串B,前面3种操作所执行的最少
次数称为AB相似度
这算法是由俄国科学家Levenshtein提出的。
Step Description
1 Set n to be the length of s.
Set m to be the length of t.
If n = 0, return m and exit.
If m = 0, return n and exit.
Construct a matrix containing 0..m rows and 0..n columns.
2 Initialize the first row to 0..n.
Initialize the first column to 0..m.
3 Examine each character of s (i from 1 to n).
4 Examine each character of t (j from 1 to m).
5 If s[i] equals t[j], the cost is 0.
If s[i] doesn't equal t[j], the cost is 1.
6 Set cell d[i,j] of the matrix equal to the minimum of:
a. The cell immediately above plus 1: d[i-1,j] + 1.
b. The cell immediately to the left plus 1: d[i,j-1] + 1.
c. The cell diagonally above and to the left plus the cost:
d[i-1,j-1] + cost.
7 After the iteration steps (3, 4, 5, 6) are complete, the distance is
found in cell d[n,m]. '''
m, n = len(s), len(t)
if not (m and n):
return m or n
# 构造矩阵
matrix = [[0 for i in range(n+1)] for j in range(m+1)]
matrix[0] = list(range(n+1))
for i in range(m+1):
matrix[i][0] = i
for i in range(m):
for j in range(n):
cost = int(s[i] != t[j])
# 因为 Python 的字符索引从 0 开始
matrix[i+1][j+1] = min(
matrix[i][j+1] + 1, # a.
matrix[i+1][j] + 1, # b.
matrix[i][j] + cost # c.
)
return matrix[m][n]
difference = LevenshteinDistance
def mprint(matrix, width=3):
'''打印矩阵'''
for i in matrix:
for j in i:
print('{0:>{1}}'.format(j, width), end='')
print()
def nmin(s, howmany):
'''选取 howmany 个最小项
来源于 Python2.6 的文档
(tutorial/stdlib2.html#tools-for-working-with-lists)'''
from heapq import heapify, heappop
heapify(s) # rearrange the list into heap order
# fetch the smallest entries
return [heappop(s) for i in range(howmany)]
def between(seq, start, end):
'''获取 seq 中 start 和 end 之间的项
seq 应当已经排序过,并且是递增的'''
l = bisect.bisect_left(seq, start)
if l < 0:
l = 0
r = bisect.bisect_right(seq, end)
return seq[l:r]
def 球面坐标到直角坐标(r, alpha, beta):
from math import cos, sin
x = r * cos(beta) * cos(alpha)
y = r * cos(beta) * sin(alpha)
z = r * sin(beta)
return (x, y, z)
def md5(string):
'''求 string (UTF-8) 的 md5 (hex 表示)'''
import hashlib
m = hashlib.md5()
m.update(string.encode('utf-8'))
return m.hexdigest()
|
729d1d15e0a9eaad35bc3ca0061f62aba6f5fa94
|
10da9e96fdc5fc227222b7523155bde02fb58150
|
/qlib_server/config.py
|
75bda57d959f39ae69cf12692b47a14a6383d047
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
microsoft/qlib-server
|
fa82365db5f5b553b181d3cf6d426c641b432d3d
|
6e7f326e23262062c81361a5d5642da88050f7e3
|
refs/heads/main
| 2023-07-06T02:31:11.539686
| 2022-07-08T02:15:09
| 2022-07-08T02:15:09
| 292,248,698
| 216
| 68
|
MIT
| 2022-07-08T02:15:10
| 2020-09-02T10:11:49
|
Python
|
UTF-8
|
Python
| false
| false
| 3,629
|
py
|
config.py
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import qlib
# TODO: fix the default config to common settings while releasing.
_server_config = {
# flask port
"flask_server": "172.23.233.89",
"flask_port": 9710,
"flask_ping_interval": 1.0,
# rabitmq server
"queue_host": "10.150.144.154",
"queue_user": "guest",
"queue_pwd": "guest",
"task_queue": "my_task_queue",
"message_queue": "my_message_queue",
"max_process": 10,
"max_concurrency": 10,
"inactivity_timeout": 5,
# cache update
"auto_update": False,
"update_time": "23:45",
# support qlib version
"client_version": ">=0.4.0",
# logging
"logging_level": "DEBUG",
# provider_uri
"provider_uri": "/data1/csdesign",
# cache dir name
"dataset_cache_dir_name": "dataset_cache",
"features_cache_dir_name": "features_cache",
# redis
"redis_host": "10.150.144.154",
"redis_port": 6379,
"redis_task_db": 1,
}
LoggingConfig = {
"logging_config": {
"version": 1,
"formatters": {
"logger_format": {
"format": "[%(process)s:%(threadName)s](%(asctime)s) %(levelname)s - "
"%(name)s - [%(filename)s:%(lineno)d] - %(message)s"
}
},
"filters": {"log_filter": {"()": "qlib_server.log.LogFilter", "param": [".*?WARN: data not found for.*?"]}},
"handlers": {
"console": {"class": "logging.StreamHandler", "level": "DEBUG", "formatter": "logger_format"},
"file": {
"class": "logging.FileHandler",
"level": "DEBUG",
"mode": "w",
"filename": "qlib_server.log",
"formatter": "logger_format",
},
"others": {"class": "logging.StreamHandler", "level": "WARNING", "formatter": "logger_format"},
"other_file": {
"class": "logging.FileHandler",
"level": "WARNING",
"mode": "w",
"filename": "qlib_server_other_module.log",
"formatter": "logger_format",
},
},
"loggers": {
"qlib": {
"level": "DEBUG",
"handlers": [
"console",
# 'file'
],
}
},
"root": {
"handlers": [
"others",
# 'other_file'
]
},
}
}
_default_config = dict(_server_config, **LoggingConfig)
class Config:
def __getitem__(self, key):
return _default_config[key]
def __getattr__(self, attr):
try:
return _default_config[attr]
except KeyError:
raise AttributeError(f"No attr name {attr}")
def __setitem__(self, key, value):
_default_config[key] = value
def __setattr__(self, attr, value):
_default_config[attr] = value
# global config
C = Config()
def init(conf, logging_config=None):
"""set_config
:param conf: A dict-like object
:param logging_config: logging config
"""
# config the files
for key, val in conf.items():
C[key] = val
qlib.init(
"server",
provider_uri=C["provider_uri"],
logging_level=C["logging_level"],
logging_config=logging_config,
dataset_cache_dir_name=C["dataset_cache_dir_name"],
features_cache_dir_name=C["features_cache_dir_name"],
redis_task_db=C["redis_task_db"],
redis_port=C["redis_port"],
redis_host=C["redis_host"],
)
|
194dc3ba2a628b49573e3a8e91c14b379de3c116
|
f11912913e0e4ff9ad032e52fe9452187f6f54d5
|
/yys.py
|
95f6e79420de17cc430828f959f167c99bc8cb43
|
[] |
no_license
|
lisai9093/YYS
|
daeaba90bb6f3b5abb218359c3e944cd2f4fd5d4
|
15f14f36fd4dd4138cceaac9ac44b678b6d8bbdc
|
refs/heads/master
| 2023-08-05T08:55:49.649209
| 2023-08-03T07:11:20
| 2023-08-03T07:11:20
| 207,671,389
| 273
| 76
| null | 2023-03-20T05:35:21
| 2019-09-10T22:04:50
|
Python
|
UTF-8
|
Python
| false
| false
| 44,842
|
py
|
yys.py
|
import cv2,time,random,os, datetime
import os,sys,pyautogui, traceback
import numpy as np
import mss
import action
#检测系统
print('操作系统:', sys.platform)
if sys.platform=='darwin':
scalar=True
else:
scalar=False
# 读取文件 精度控制 显示名字
imgs = action.load_imgs()
#pyautogui.PAUSE = 0.05
pyautogui.FAILSAFE=False
start_time = time.time()
#print('程序启动,现在时间', time.ctime())
#截屏,并裁剪以加速
upleft = (0, 0)
if scalar==True:
downright = (1136,700)
else:
downright = (1200, 700)
a,b = upleft
c,d = downright
monitor = {"top": b, "left": a, "width": c, "height": d}
start = time.time()
#constants
last_click=None
#以上启动,载入设置
##########################################################
def select_mode():
global start
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print("运行时间:{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
print (datetime.datetime.now())
print('''\n菜单: 鼠标移动到最右侧中止并返回菜单页面,0退出
1 结界突破
2 御魂(司机)
3 御魂(打手)
4 御魂(单刷)
5 探索(司机)
6 探索(打手)
7 探索(单刷)
8 百鬼夜行
9 自动斗技
10 当前活动
11 结界自动合卡(太阴和伞室内)
12 厕纸抽卡
13 蓝蛋升级
14 秘境召唤
15 妖气封印和秘闻
16 契灵(单刷)
17 Debug模式
''')
action.alarm(1)
raw = input("选择功能模式:")
try:
index = int(raw)
except:
print('请输入数字')
select_mode()
mode = [0, tupo, yuhun, yuhun2, yuhundanren,\
gouliang, gouliang2, gouliang3,\
baigui, douji, huodong,\
card, chouka, shengxing, mijing, yaoqi,\
qilingdanren, debug]
try:
command = mode[index]
except:
print('数字超出范围')
select_mode()
if index==0:
quit()
else:
start = time.time()
command()
##########################################################
#结节突破
def tupo():
global last_click
count=0 #总次数
cishu = 0
refresh=0
liaotu=None
while True : #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
#im = np.array(mss.mss().grab(monitor))
#screen = cv2.cvtColor(im, cv2.COLOR_BGRA2BGR)
#print(scalar)
screen=action.screenshot(monitor)
#cv2.imshow("Image", screen)
#cv2.waitKey(0)
#寮突破判断
if liaotu==None:
want = imgs['liaotupo']
size = want[0].shape
h, w , ___ = size
pts = action.locate(screen,want,0)
if not len(pts) == 0:
liaotu=True
print('寮突破')
want = imgs['gerentupo']
size = want[0].shape
h, w , ___ = size
pts = action.locate(screen,want,0)
if not len(pts) == 0:
liaotu=False
print('个人突破')
if liaotu==False:
if cishu >= 31:
print('进攻次数上限')
select_mode()
want = imgs['jingonghuise']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0 and liaotu==True:
refresh=refresh+1
print('进攻CD,暂停5分钟')
t=60*5
time.sleep(t)
#奖励
for i in ['jujue','queding',\
'shibai','ying','jiangli','jixu',\
'jingong','jingong2',\
'lingxunzhang','lingxunzhang2','lingxunzhang4',\
'shuaxin','zhunbei']:
#print(i)
want=imgs[i]
size = want[0].shape
h, w , ___ = size
target=screen
pts=action.locate(target,want,0)
if not len(pts)==0:
if last_click==i:
if i=='jingong' or i=='jingong2':
refresh=refresh+7
else:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
t = random.randint(15,50) / 100
if i == 'shibai':
if cishu>0:
cishu = cishu - 1
if count>0:
count = count - 1
print('进攻总次数:',count)
t = random.randint(50,100) / 100
elif i=='jingong' or i=='jingong2':
if refresh==0:
cishu = cishu + 1
count=count+1
print('进攻总次数:',count)
t = random.randint(500,800) / 100
elif i=='lingxunzhang' or i=='lingxunzhang2' or i=='lingxunzhang4':
print('选择结界。。。',i)
t = random.randint(50,150) / 100
else:
print('突破中。。。',i)
time.sleep(t)
break
########################################################
#御魂司机
def yuhun():
global last_click
cishu=0
refresh=0
while True :
#鼠标移到最右侧中止
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#print('screen shot ok',time.ctime())
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足')
select_mode()
#自动点击通关结束后的页面
for i in ['jujue','tiaozhan','tiaozhan2',\
'moren','queding','querenyuhun','ying','jiangli',\
'jixu','shibai']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
elif i=='querenyuhun':
refresh=refresh+2
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
if i == 'tiaozhan' or i=='tiaozhan2':
if refresh==0:
cishu=cishu+1
print('挑战次数:',cishu)
t = random.randint(50,150) / 100
else:
print('挑战中。。。',i)
t = random.randint(50,100) / 100
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
time.sleep(t)
break
########################################################
#御魂打手
def yuhun2():
global last_click
cishu=0
refresh=0
while True :
#鼠标移到最右侧中止
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足')
select_mode()
#如果队友推出则自己也退出
want = imgs['tiaozhanhuise']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('队友已退出')
want = imgs['likaiduiwu']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
t = random.randint(15,30) / 100
time.sleep(t)
#自动点击通关结束后的页面
for i in ['jujue','moren','queding','querenyuhun',\
'ying','jiangli','jixu',\
'jieshou2','jieshou','shibai']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
elif i=='querenyuhun':
refresh=refresh+2
else:
refresh=0
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
elif refresh==0 and i=='jiangli' and not last_click=='querenyuhun':
#print('last',last_click)
cishu=cishu+1
print('挑战次数:',cishu)
print('挑战中。。。',i)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
last_click=i
t = random.randint(15,30) / 100
time.sleep(t)
break
########################################################
#御魂单人
def yuhundanren():
global last_click
cishu=0
refresh=0
while True : #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足')
select_mode()
for i in ['jujue','querenyuhun','ying','jiangli','jixu',\
'tiaozhan','tiaozhan2','tiaozhan3','queding','tancha','shibai']:
want=imgs[i]
size = want[0].shape
h, w , ___ = size
target=screen
pts=action.locate(target,want,0)
if not len(pts)==0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('挑战中。。。',i)
if i == 'tiaozhan' or i=='tiaozhan2' or i=='tiaozhan3' or i=='tancha':
if refresh==0:
cishu=cishu+1
print('挑战次数:',cishu)
t = random.randint(150,300) / 100
else:
t = random.randint(15,30) / 100
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
time.sleep(t)
break
########################################################
#探索司机
def gouliang():
global last_click
count=0
refresh=0
while True: #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足 ')
select_mode()
want = imgs['queren']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('确认退出')
try:
queding = pts[1]
except:
queding = pts[0]
xy = action.cheat(queding, w, h)
pyautogui.click(xy)
pyautogui.moveTo(xy)
t = random.randint(15,30) / 100
time.sleep(t)
#设定目标,开始查找
#进入后
want=imgs['guding']
#x1 = (785, 606)
#x2 = downright
#target = action.cut(screen, x1, x2)
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('正在地图中')
want = imgs['left']
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if scalar:
right=(854/2, 528/2)
else:
right = (854, 527)
right = action.cheat(right, 10, 10)
pyautogui.click(right)
t = random.randint(50,80) / 100
time.sleep(t)
continue
for i in ['boss', 'jian']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
if refresh==0:
count=count+1
print('点击小怪',i)
print('探索次数:',count)
xx = action.cheat(pts[0], w, h)
pyautogui.click(xx)
time.sleep(0.5)
break
if i=='jian' and len(pts)==0:
for i in ['queren', 'tuichu']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
#x1,x2 = upleft, (965, 522)
#target = action.cut(screen, x1, x2)
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('退出中',i)
try:
queding = pts[1]
except:
queding = pts[0]
queding = action.cheat(queding, w, h)
pyautogui.click(queding)
t = random.randint(50,80) / 100
time.sleep(t)
break
continue
for i in ['jujue','queding','ying','querenyuhun',\
'jiangli','jixu',\
'tiaozhan','ditu']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('领取奖励',i)
xy = action.cheat(pts[0], w, h )
pyautogui.click(xy)
if i=='queding':
t = random.randint(150,200) / 100
else:
t = random.randint(15,30) / 100
time.sleep(t)
break
########################################################
#探索打手
def gouliang2():
global last_click
refresh=0
while True: #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足 ')
select_mode()
#进入后
want = imgs['guding']
pts = action.locate(screen,want,0)
if not len(pts) == 0:
print('正在地图中')
want = imgs['xiao']
pts = action.locate(screen,want,0)
if not len(pts) == 0:
print('组队状态中')
else:
print('退出重新组队')
for i in ['queren', 'queren2','tuichu']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
pts = action.locate(screen,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('退出中',i)
try:
queding = pts[1]
except:
queding = pts[0]
queding = action.cheat(queding, w, h)
pyautogui.click(queding)
t = random.randint(50,80) / 100
time.sleep(t)
break
continue
for i in ['jujue','jieshou','querenyuhun','ying',\
'jiangli','jixu']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if i=='jieshou':
a,b=pts[0]
if a<50:
break
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('领取奖励',i)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
if i=='jieshou' or i=='jieshou1':
t = random.randint(15,30) / 100
else:
t = random.randint(15,30) / 100
time.sleep(t)
break
########################################################
#探索单人
def gouliang3():
global last_click
count=0
refresh=0
while True: #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足 ')
select_mode()
want = imgs['queren']
size = want[0].shape
h, w , ___ = size
target = screen
#x1,x2 = upleft, (965, 522)
#target = action.cut(screen, x1, x2)
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('确认退出')
try:
queding = pts[1]
except:
queding = pts[0]
xy = action.cheat(queding, w, h)
pyautogui.click(xy)
pyautogui.moveTo(xy)
t = random.randint(15,30) / 100
time.sleep(t)
#设定目标,开始查找
#进入后
want=imgs['guding']
#x1 = (785, 606)
#x2 = downright
#target = action.cut(screen, x1, x2)
pts = action.locate(screen,want,0)
if not len(pts) == 0:
print('正在地图中')
want = imgs['left']
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if scalar:
right=(854/2, 528/2)
else:
right = (854, 527)
right = action.cheat(right, 10, 10)
pyautogui.click(right)
t = random.randint(50,80) / 100
time.sleep(t)
continue
for i in ['boss', 'jian']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
if refresh==0:
count=count+1
print('点击小怪',i)
print('探索次数:',count)
if count>500:
print('次数上限')
select_mode()
xx = action.cheat(pts[0], w, h)
pyautogui.click(xx)
time.sleep(0.5)
break
if len(pts)==0:
for i in ['queren','queren2','tuichu']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
pts = action.locate(screen,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('退出中',i)
try:
queding = pts[1]
except:
queding = pts[0]
queding = action.cheat(queding, w, h)
pyautogui.click(queding)
t = random.randint(50,80) / 100
time.sleep(t)
break
continue
for i in ['jujue','querenyuhun',\
'tansuo','ying','jiangli','jixu','c28','ditu']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('领取奖励',i)
xy = action.cheat(pts[0], w, h )
pyautogui.click(xy)
t = random.randint(15,30) / 100
time.sleep(t)
break
########################################################
#百鬼
def baigui():
global last_click
refresh=0
cishu=0
while True: #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#设定目标,开始查找
#进入后
for i in ['baigui','gailv','douzihuoqu','miaozhun','baiguijieshu']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
refresh=0
print('点击',i)
xy = action.cheat(pts[0], w, h )
pyautogui.click(xy)
t = random.randint(15,30) / 100
time.sleep(t)
continue
want=imgs['inbaigui']
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
#print('正在百鬼中')
want = imgs['blank']
target = screen
pts = action.locate(target,want,0)
if len(pts) == 0:
refresh=0
#小怪出现!
print('点击小怪')
pts2 = (640, 450)
xx = action.cheat(pts2, 100, 80)
pyautogui.click(xx)
time.sleep(0.5)
continue
i='jinru'
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
cishu=cishu+1
print('进入百鬼:',cishu)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
pyautogui.moveTo(xy)
t = random.randint(10,20) / 100
time.sleep(t)
i='kaishi'
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
refresh=0
print('选择押注界面')
i='ya'
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts2 = action.locate(target,want,0)
if not len(pts2) == 0:
print('点击开始: ',pts[0])
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
pyautogui.moveTo(xy)
t = random.randint(15,30) / 100
time.sleep(t)
else:
#选择押注
index=random.randint(0,2)
pts2 = (300+index*340, 500)
print('选择押注: ',index)
xy = action.cheat(pts2, w, h-10 )
pyautogui.click(xy)
pyautogui.moveTo(xy)
t = random.randint(50,100) / 100
time.sleep(t)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
pyautogui.moveTo(xy)
t = random.randint(15,30) / 100
time.sleep(t)
########################################################
#斗技
def douji():
global last_click
doujipaidui=0
refresh=0
while True: #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
for i in ['jujue','shoudong','zidong','queren',\
'douji','douji3','douji4',\
'doujiqueren','doujiend','ying','jixu',\
'zhunbei','zhunbei2',\
'doujiquxiao']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
if i=='douji' or i=='douji4':
doujipaidui=0
print('斗技开始',i)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
t = random.randint(15,30) / 100
time.sleep(t)
break
elif i=='doujiquxiao':
refresh=0
doujipaidui=doujipaidui+1
print('斗技搜索:',doujipaidui)
if doujipaidui>5:
doujipaidui=0
print('取消搜索')
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
t = random.randint(15,30) / 100
time.sleep(t)
break
else:
print('斗技中。。。',i)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
t = random.randint(50,100) / 100
time.sleep(t)
break
########################################################
#当前活动
def huodong():
global last_click
count=0
refresh=0
while True: #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足 ')
select_mode()
for i in ['jujue','querenyuhun','queding',\
'hdtiaozhan','ying','hdqueding',\
'shibai','jixu','hdend','liaotianguanbi']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if i=='hdjiacheng':
refresh=0
elif last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
print('活动中',i)
if refresh>6:
print('进攻次数上限')
select_mode()
t = 0
if i=='hdtiaozhan':
if refresh==0:
count=count+1
print('挑战次数:',count)
t=1
if i=='hdfaxian' or i=='hdfaxian2':
t=5
#if i=='hdend':
# if refresh==0:
# print('疲劳度满,休息10分钟')
#t = 10*60
#time.sleep(t)
xy = action.cheat(pts[0], w, h)
pyautogui.click(xy)
time.sleep(t)
##########################################################
#合成结界卡
def card():
global last_click
refresh=0
while True:
#鼠标移到右侧中止
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
for i in ['taiyin2','sanshinei','taiyin3']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('结界卡*',i)
xy = action.cheat(pts[0], w/2, h-10)
pyautogui.click(xy)
break
if len(pts) == 0:
print('结界卡不足')
select_mode()
for i in range(2):
#截屏
im = np.array(mss.mss().grab(monitor))
screen = cv2.cvtColor(im, cv2.COLOR_BGRA2BGR)
want = imgs['taiyin']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if len(pts) == 0:
print('结界卡不足')
select_mode()
else:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click='taiyin'
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('结界卡',i)
xy = action.cheat(pts[0], w/2, h-10 )
pyautogui.click(xy)
pyautogui.moveTo(xy)
#截屏
screen=action.screenshot(monitor)
want = imgs['hecheng']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click='hecheng'
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('合成中。。。')
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
pyautogui.moveTo(xy)
time.sleep(1)
##########################################################
#抽卡
def chouka():
global last_click
count=0
while True:
#鼠标移到右侧中止
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
want = imgs['zaicizhaohuan']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if count>200:
print('次数上限')
select_mode()
count=count+1
print('抽卡中。。。',count)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
#t = random.randint(1,3) / 100
#time.sleep(t)
##########################################################
#蓝蛋升级
def shengxing():
global last_click
count=0
refresh=0
while True:
#鼠标移到右侧中止
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
for i in ['jineng','jixushengxing',\
'jixuyucheng','querenshengxing']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('升级中。。。',i)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
if i=='querenshengxing':
if refresh==0:
count=count+1
print('升级个数:',count)
t = random.randint(250,350) / 100
else:
t = random.randint(20,100) / 100
time.sleep(t)
##########################################################
#秘境召唤
def mijing():
global last_click
refresh=0
while True:
#鼠标移到右侧中止
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#检测聊天界面
want = imgs['liaotianguanbi']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
#print('搜索秘境车中。。。')
for i in ['jujue','mijingzhaohuan','mijingzhaohuan2']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('秘境召唤。。。',i)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
#t = random.randint(10,100) / 100
#time.sleep(t)
break
else:
for i in ['jujue','canjia','liaotian']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
if i=='canjia':
print('加入秘境召唤!',i)
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
t = random.randint(10,30) / 100
time.sleep(t)
break
########################################################
#妖气封印和秘闻
def yaoqi():
global last_click
count=0
refresh=0
while True: #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#委派任务
for i in ['jujue','jiangli','jixu','zhunbei',\
'shibai','zidongpipei','zudui2',\
'ying','tiaozhan3','tiaozhan4']:
want = imgs[i]
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
if i=='zidongpipei' or i=='tiaozhan3' or i=='tiaozhan4':
if refresh==0:
count=count+1
print('次数:',count)
t=100/100
elif i=='shibai':
print('自动结束')
select_mode()
else:
print('活动中。。。',i)
t = random.randint(30,80) / 100
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
time.sleep(t)
break
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足')
select_mode()
########################################################
#契灵单人
def qilingdanren():
global last_click
cishu=0
refresh=0
while True : #直到取消,或者出错
if pyautogui.position()[0] >= pyautogui.size()[0] * 0.98:
select_mode()
#截屏
screen=action.screenshot(monitor)
#体力不足
want = imgs['notili']
size = want[0].shape
h, w , ___ = size
target = screen
pts = action.locate(target,want,0)
if not len(pts) == 0:
print('体力不足')
select_mode()
for i in ['jujue','ying','jiangli','jixu','queding',\
'qiling1','mingqi','queren3',\
'tiaozhan5','shibai','xiaozhiren']:
want=imgs[i]
size = want[0].shape
h, w , ___ = size
target=screen
pts=action.locate(target,want,0)
if not len(pts)==0:
if last_click==i:
refresh=refresh+1
else:
refresh=0
last_click=i
#print('重复次数:',refresh)
if refresh>6:
print('进攻次数上限')
select_mode()
print('挑战中。。。',i)
if i=='tancha' or i=='tiaozhan5':
if refresh==0:
cishu=cishu+1
print('挑战次数:',cishu)
t = random.randint(50,150) / 100
elif i=='queren3':
t = random.randint(350,450) / 100
else:
t = random.randint(15,30) / 100
xy = action.cheat(pts[0], w, h-10 )
pyautogui.click(xy)
time.sleep(t)
break
##################################################################
def debug():
#截屏
screen=action.screenshot(monitor)
cv2.imshow("Image", screen)
print('点击截图,按任意键返回')
cv2.waitKey(0)
cv2.destroyAllWindows()
cv2.waitKey(1)
select_mode()
####################################################
if __name__ == '__main__':
select_mode()
|
d549d3a0695aab9dcf9fedc6c532d45961f0f9b7
|
e17660bcf07fe3221a18dc3da68f06c85c40cbf9
|
/src/py2app/__main__.py
|
ceeecc8eb8e782c4fd024cc2e2a021c939d19df5
|
[
"MIT",
"Python-2.0"
] |
permissive
|
ronaldoussoren/py2app
|
bca832cab41b9a365342d400aed4ebbbe80bed0c
|
e9c7a88f34d79c41a3a344ccc14cd97c24904b9f
|
refs/heads/master
| 2023-09-01T05:14:53.388393
| 2023-04-16T08:34:06
| 2023-04-16T08:34:06
| 233,826,136
| 292
| 34
|
NOASSERTION
| 2023-08-30T17:54:44
| 2020-01-14T11:27:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,291
|
py
|
__main__.py
|
import argparse
import pathlib
import sys
try:
import tomllib
except ImportError:
import tomli as tomllib
from . import _builder, _config, _progress
def parse_arguments(argv):
parser = argparse.ArgumentParser(
prog=f"{sys.executable} -mpy2app", description="Build macOS executable bundles"
)
parser.add_argument(
"--pyproject-toml",
dest="pyproject",
default="pyproject.toml",
metavar="FILE",
type=pathlib.Path,
help="pyproject.toml path",
)
parser.add_argument(
"--semi-standalone",
dest="build_type",
default=None,
action="store_const",
const=_config.BuildType.SEMI_STANDALONE,
help="build a semi-standalone bundle",
)
parser.add_argument(
"--alias",
dest="build_type",
default=_config.BuildType.SEMI_STANDALONE,
action="store_const",
const=_config.BuildType.ALIAS,
help="build an alias bundle",
)
args = parser.parse_args(argv)
try:
with open(args.pyproject, "rb") as stream:
contents = tomllib.load(stream)
except OSError as exc:
print(f"Cannot open {str(args.pyproject)!r}: {exc}", file=sys.stderr)
sys.exit(1)
try:
config = _config.parse_pyproject(contents, args.pyproject.parent)
except _config.ConfigurationError as exc:
print(f"{args.pyproject}: {exc}", file=sys.stderr)
sys.exit(1)
# XXX: I don't particularly like poking directly in '_locals'
if args.build_type is not None:
config._local["build-type"] = args.build_type
return config
def main():
config = parse_arguments(sys.argv[1:])
progress = _progress.Progress()
task_id = progress.add_task("Processing bundles", len(config.bundles))
ok = True
for bundle in config.bundles:
progress.update(
task_id,
current=f"{bundle.build_type.value} {'plugin' if bundle.plugin else 'application'} {bundle.name!r}",
)
ok = _builder.build_bundle(config, bundle, progress) and ok
progress.step_task(task_id)
progress.update(task_id, current="")
progress._progress.stop()
if not ok:
raise SystemExit(1)
if __name__ == "__main__":
main()
|
8aac95e21d67c74f324c2020f5939640dbd469f6
|
ab7098a9f2f1363bae0b325fcdb0877cbb730699
|
/examples/src/main/python/python_example.py
|
dcbe975027f4df9e2601fd2aaf93136cc3a06d31
|
[
"Apache-2.0"
] |
permissive
|
AbsaOSS/spline-spark-agent
|
06ca449b0d8337324011812dd8dc6e905f688a42
|
0ed969ad4aee0518f4d1f83aab88b6130e431b16
|
refs/heads/develop
| 2023-08-31T09:19:40.314900
| 2023-08-28T12:35:30
| 2023-08-29T09:37:52
| 231,394,159
| 141
| 88
|
Apache-2.0
| 2023-09-14T10:09:44
| 2020-01-02T14:06:46
|
Scala
|
UTF-8
|
Python
| false
| false
| 1,181
|
py
|
python_example.py
|
#
# Copyright 2017 ABSA Group Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Enable Spline tracking.
# For Spark 2.3+ we recommend the codeless approach to enable Spline - by setting spark.sql.queryExecutionListeners
# (See: examples/README.md)
# Otherwise execute the following method to enable Spline manually.
sc._jvm.za.co.absa.spline.harvester \
.SparkLineageInitializer.enableLineageTracking(spark._jsparkSession)
# Execute a Spark job as usual:
spark.read \
.option("header", "true") \
.option("inferschema", "true") \
.csv("data/input/batch/wikidata.csv") \
.write \
.mode('overwrite') \
.csv("data/output/batch/python-sample.csv")
|
afb756ba55e6d5532e5d80afdd2b72471e080db5
|
818322f58e5243fbffb016a8f3b440394e1e0115
|
/tests/plugin/resolver_test.py
|
f30c76ff34178991ec2920c99697f206af6377e4
|
[
"Apache-2.0"
] |
permissive
|
aws-cloudformation/cloudformation-cli-python-plugin
|
8a4c92d0f2d82014632d16f0309e5e4eba2feb9c
|
a672fb3f42de5ab751ae0e437b739b0db1a7ab36
|
refs/heads/master
| 2023-08-17T13:22:11.525308
| 2023-08-15T21:02:33
| 2023-08-15T21:02:33
| 162,644,504
| 101
| 40
|
Apache-2.0
| 2023-08-15T21:02:35
| 2018-12-21T00:13:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,524
|
py
|
resolver_test.py
|
import pytest
from rpdk.core.jsonutils.resolver import ContainerType, ResolvedType
from rpdk.python.resolver import PRIMITIVE_TYPES, contains_model, translate_type
RESOLVED_TYPES = [
(ResolvedType(ContainerType.PRIMITIVE, item_type), native_type)
for item_type, native_type in PRIMITIVE_TYPES.items()
]
def test_translate_type_model_typevar_not_resource_model():
traslated = translate_type(ResolvedType(ContainerType.MODEL, "Foo"))
assert traslated == '"_Foo"'
def test_translate_type_model_typevar_main_resource_model():
traslated = translate_type(ResolvedType(ContainerType.MODEL, "ResourceModel"))
assert traslated == '"_ResourceModel"'
@pytest.mark.parametrize("resolved_type,native_type", RESOLVED_TYPES)
def test_translate_type_primitive(resolved_type, native_type):
assert translate_type(resolved_type) == native_type
@pytest.mark.parametrize("resolved_type,native_type", RESOLVED_TYPES)
def test_translate_type_dict(resolved_type, native_type):
traslated = translate_type(ResolvedType(ContainerType.DICT, resolved_type))
assert traslated == f"MutableMapping[str, {native_type}]"
@pytest.mark.parametrize("resolved_type,native_type", RESOLVED_TYPES)
def test_translate_type_list(resolved_type, native_type):
traslated = translate_type(ResolvedType(ContainerType.LIST, resolved_type))
assert traslated == f"Sequence[{native_type}]"
@pytest.mark.parametrize("resolved_type,native_type", RESOLVED_TYPES)
def test_translate_type_set(resolved_type, native_type):
traslated = translate_type(ResolvedType(ContainerType.SET, resolved_type))
assert traslated == f"AbstractSet[{native_type}]"
@pytest.mark.parametrize("resolved_type,_native_type", RESOLVED_TYPES)
def test_translate_type_unknown(resolved_type, _native_type):
with pytest.raises(ValueError):
translate_type(ResolvedType("foo", resolved_type))
@pytest.mark.parametrize("resolved_type,_native_type", RESOLVED_TYPES)
def test_contains_model_list_containing_primitive(resolved_type, _native_type):
assert contains_model(ResolvedType(ContainerType.LIST, resolved_type)) is False
def test_contains_model_list_containing_model():
resolved_type = ResolvedType(
ContainerType.LIST,
ResolvedType(ContainerType.LIST, ResolvedType(ContainerType.MODEL, "Foo")),
)
assert contains_model(resolved_type) is True
def test_translate_type_multiple():
traslated = translate_type(ResolvedType(ContainerType.MULTIPLE, "multiple"))
assert traslated == "Any"
|
62770fb24570eee4c00c52667ff22e7c36e93704
|
9935c1a1142a19d72dd8ca5b8a8ad2a70e2a7edb
|
/Plugins/Aspose_Words_Java_for_Jython/asposewords/quickstart/FindAndReplace.py
|
1e14bb0299980640061630ea9d4cb2c0fd58228f
|
[
"MIT"
] |
permissive
|
aspose-words/Aspose.Words-for-Java
|
705ad9a8047b8d9b7986dd5569859af24632afc0
|
2dceb8acb5547bbc0a62c49587b97fd4f3159b36
|
refs/heads/master
| 2023-08-18T08:43:51.900921
| 2023-05-15T13:48:56
| 2023-05-15T13:48:56
| 2,849,872
| 347
| 194
|
MIT
| 2023-02-01T13:45:06
| 2011-11-25T13:43:55
|
Java
|
UTF-8
|
Python
| false
| false
| 726
|
py
|
FindAndReplace.py
|
from asposewords import Settings
from com.aspose.words import Document
class FindAndReplace:
def __init__(self):
dataDir = Settings.dataDir + 'quickstart/'
doc = Document(dataDir + 'ReplaceSimple.doc')
# Check the text of the document.
print "Original document text: " + doc.getRange().getText()
# Replace the text in the document.
doc.getRange().replace("_CustomerName_", "James Bond", False, False)
# Check the replacement was made.
print "Document text after replace: " + doc.getRange().getText()
doc.save(dataDir + 'ReplaceSimple_Out.doc')
if __name__ == '__main__':
FindAndReplace()
|
90da3e1425a9404cbea0d3bfe5a6288301480c01
|
c5ddaaa915829e946762f83a610a3ef43ad1f190
|
/src/pip/_vendor/distlib.pyi
|
ea94b159a69cb34c0121d1253ded7f0d62747e95
|
[
"MIT",
"LicenseRef-scancode-python-cwi",
"LicenseRef-scancode-other-copyleft",
"Python-2.0",
"BSD-3-Clause",
"0BSD",
"GPL-1.0-or-later",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
pypa/pip
|
4cebef8b5e8f69921a4cbd37d5b2021fd3200a16
|
0778c1c153da7da457b56df55fb77cbba08dfb0c
|
refs/heads/main
| 2023-08-15T01:51:24.039937
| 2023-08-14T06:22:21
| 2023-08-14T06:22:21
| 1,446,467
| 8,612
| 3,321
|
MIT
| 2023-09-12T07:08:11
| 2011-03-06T14:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 21
|
pyi
|
distlib.pyi
|
from distlib import *
|
d829478538b13b384478deee6eb4b47e266360e1
|
c9ff14ff176600169b6e9f6490ab32f5c3af60e0
|
/jcvi/graphics/assembly.py
|
448c3b57ab3f6333dc89a7e5a4491add69a9bcdd
|
[
"BSD-3-Clause",
"BSD-2-Clause"
] |
permissive
|
tanghaibao/jcvi
|
c7a070692d53784a34378e19e435cb9a86d2cd2e
|
695bd2eee98b14118b54fc37e38cd0222ce6a5e9
|
refs/heads/main
| 2023-09-01T01:22:04.353148
| 2023-08-30T01:59:11
| 2023-08-30T01:59:11
| 1,130,393
| 641
| 193
|
BSD-2-Clause
| 2023-09-01T03:17:24
| 2010-12-01T23:18:02
|
Python
|
UTF-8
|
Python
| false
| false
| 15,265
|
py
|
assembly.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Assembly QC plots, including general statistics, base and mate coverages, and
scaffolding consistencies.
"""
import sys
import logging
import os.path as op
from jcvi.formats.fasta import Fasta
from jcvi.formats.bed import Bed, BedLine
from jcvi.formats.sizes import Sizes
from jcvi.assembly.base import calculate_A50
from jcvi.assembly.coverage import Coverage
from jcvi.graphics.base import plt, Rectangle, set_human_base_axis, savefig
from jcvi.utils.cbook import thousands
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update
def main():
actions = (
("A50", "compare A50 graphics for a set of FASTA files"),
("coverage", "plot coverage from a set of BED files"),
("qc", "performs QC graphics on given contig/scaffold"),
("scaffold", "plot the alignment of the scaffold to other evidences"),
("covlen", "plot coverage vs length"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def covlen(args):
"""
%prog covlen covfile fastafile
Plot coverage vs length. `covfile` is two-column listing contig id and
depth of coverage.
"""
import numpy as np
import pandas as pd
import seaborn as sns
from jcvi.formats.base import DictFile
p = OptionParser(covlen.__doc__)
p.add_option("--maxsize", default=1000000, type="int", help="Max contig size")
p.add_option("--maxcov", default=100, type="int", help="Max contig size")
p.add_option("--color", default="m", help="Color of the data points")
p.add_option(
"--kind",
default="scatter",
choices=("scatter", "reg", "resid", "kde", "hex"),
help="Kind of plot to draw",
)
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 2:
sys.exit(not p.print_help())
covfile, fastafile = args
cov = DictFile(covfile, cast=float)
s = Sizes(fastafile)
data = []
maxsize, maxcov = opts.maxsize, opts.maxcov
for ctg, size in s.iter_sizes():
c = cov.get(ctg, 0)
if size > maxsize:
continue
if c > maxcov:
continue
data.append((size, c))
x, y = zip(*data)
x = np.array(x)
y = np.array(y)
logging.debug("X size {0}, Y size {1}".format(x.size, y.size))
df = pd.DataFrame()
xlab, ylab = "Length", "Coverage of depth (X)"
df[xlab] = x
df[ylab] = y
sns.jointplot(
xlab,
ylab,
kind=opts.kind,
data=df,
xlim=(0, maxsize),
ylim=(0, maxcov),
stat_func=None,
edgecolor="w",
color=opts.color,
)
figname = covfile + ".pdf"
savefig(figname, dpi=iopts.dpi, iopts=iopts)
def coverage(args):
"""
%prog coverage fastafile ctg bedfile1 bedfile2 ..
Plot coverage from a set of BED files that contain the read mappings. The
paired read span will be converted to a new bedfile that contain the happy
mates. ctg is the chr/scf/ctg that you want to plot the histogram on.
If the bedfiles already contain the clone spans, turn on --spans.
"""
from jcvi.formats.bed import mates, bedpe
p = OptionParser(coverage.__doc__)
p.add_option("--ymax", default=None, type="int", help="Limit ymax")
p.add_option(
"--spans",
default=False,
action="store_true",
help="BED files already contain clone spans",
)
opts, args, iopts = p.set_image_options(args, figsize="8x5")
if len(args) < 3:
sys.exit(not p.print_help())
fastafile, ctg = args[0:2]
bedfiles = args[2:]
sizes = Sizes(fastafile)
size = sizes.mapping[ctg]
plt.figure(1, (iopts.w, iopts.h))
ax = plt.gca()
bins = 100 # smooth the curve
lines = []
legends = []
not_covered = []
yy = 0.9
for bedfile, c in zip(bedfiles, "rgbcky"):
if not opts.spans:
pf = bedfile.rsplit(".", 1)[0]
matesfile = pf + ".mates"
if need_update(bedfile, matesfile):
matesfile, matesbedfile = mates([bedfile, "--lib"])
bedspanfile = pf + ".spans.bed"
if need_update(matesfile, bedspanfile):
bedpefile, bedspanfile = bedpe(
[bedfile, "--span", "--mates={0}".format(matesfile)]
)
bedfile = bedspanfile
bedsum = Bed(bedfile).sum(seqid=ctg)
notcoveredbases = size - bedsum
legend = bedfile.split(".")[0]
msg = "{0}: {1} bp not covered".format(legend, thousands(notcoveredbases))
not_covered.append(msg)
print(msg, file=sys.stderr)
ax.text(0.1, yy, msg, color=c, size=9, transform=ax.transAxes)
yy -= 0.08
cov = Coverage(bedfile, sizes.filename)
x, y = cov.get_plot_data(ctg, bins=bins)
(line,) = ax.plot(x, y, "-", color=c, lw=2, alpha=0.5)
lines.append(line)
legends.append(legend)
leg = ax.legend(lines, legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
ylabel = "Average depth per {0}Kb".format(size / bins / 1000)
ax.set_xlim(0, size)
ax.set_ylim(0, opts.ymax)
ax.set_xlabel(ctg)
ax.set_ylabel(ylabel)
set_human_base_axis(ax)
figname = "{0}.{1}.pdf".format(fastafile, ctg)
savefig(figname, dpi=iopts.dpi, iopts=iopts)
def scaffolding(ax, scaffoldID, blastf, qsizes, ssizes, qbed, sbed, highlights=None):
from jcvi.graphics.blastplot import blastplot
# qsizes, qbed are properties for the evidences
# ssizes, sbed are properties for the current scaffoldID
blastplot(
ax,
blastf,
qsizes,
ssizes,
qbed,
sbed,
style="circle",
insetLabels=True,
stripNames=True,
highlights=highlights,
)
# FPC_scf.bed => FPC
fname = qbed.filename.split(".")[0].split("_")[0]
xtitle = fname
if xtitle == "FPC":
ax.set_xticklabels([""] * len(ax.get_xticklabels()))
ax.set_xlabel(xtitle, color="g")
for x in ax.get_xticklines():
x.set_visible(False)
def plot_one_scaffold(
scaffoldID, ssizes, sbed, trios, imagename, iopts, highlights=None
):
ntrios = len(trios)
fig = plt.figure(1, (14, 8))
plt.cla()
plt.clf()
root = fig.add_axes([0, 0, 1, 1])
axes = [fig.add_subplot(1, ntrios, x) for x in range(1, ntrios + 1)]
scafsize = ssizes.get_size(scaffoldID)
for trio, ax in zip(trios, axes):
blastf, qsizes, qbed = trio
scaffolding(
ax, scaffoldID, blastf, qsizes, ssizes, qbed, sbed, highlights=highlights
)
root.text(
0.5,
0.95,
"{0} (size={1})".format(scaffoldID, thousands(scafsize)),
size=18,
ha="center",
color="b",
)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
savefig(imagename, dpi=iopts.dpi, iopts=iopts)
def scaffold(args):
"""
%prog scaffold scaffold.fasta synteny.blast synteny.sizes synteny.bed
physicalmap.blast physicalmap.sizes physicalmap.bed
As evaluation of scaffolding, visualize external line of evidences:
* Plot synteny to an external genome
* Plot alignments to physical map
* Plot alignments to genetic map (TODO)
Each trio defines one panel to be plotted. blastfile defines the matchings
between the evidences vs scaffolds. Then the evidence sizes, and evidence
bed to plot dot plots.
This script will plot a dot in the dot plot in the corresponding location
the plots are one contig/scaffold per plot.
"""
from more_itertools import grouper
p = OptionParser(scaffold.__doc__)
p.add_option(
"--cutoff",
type="int",
default=1000000,
help="Plot scaffolds with size larger than",
)
p.add_option(
"--highlights",
help="A set of regions in BED format to highlight",
)
opts, args, iopts = p.set_image_options(args, figsize="14x8", dpi=150)
if len(args) < 4 or len(args) % 3 != 1:
sys.exit(not p.print_help())
highlights = opts.highlights
scafsizes = Sizes(args[0])
trios = list(grouper(args[1:], 3))
trios = [(a, Sizes(b), Bed(c)) for a, b, c in trios]
if highlights:
hlbed = Bed(highlights)
for scaffoldID, scafsize in scafsizes.iter_sizes():
if scafsize < opts.cutoff:
continue
logging.debug("Loading {0} (size={1})".format(scaffoldID, thousands(scafsize)))
tmpname = scaffoldID + ".sizes"
tmp = open(tmpname, "w")
tmp.write("{0}\t{1}".format(scaffoldID, scafsize))
tmp.close()
tmpsizes = Sizes(tmpname)
tmpsizes.close(clean=True)
if highlights:
subhighlights = list(hlbed.sub_bed(scaffoldID))
imagename = ".".join((scaffoldID, opts.format))
plot_one_scaffold(
scaffoldID,
tmpsizes,
None,
trios,
imagename,
iopts,
highlights=subhighlights,
)
def qc(args):
"""
%prog qc prefix
Expects data files including:
1. `prefix.bedpe` draws Bezier curve between paired reads
2. `prefix.sizes` draws length of the contig/scaffold
3. `prefix.gaps.bed` mark the position of the gaps in sequence
4. `prefix.bed.coverage` plots the base coverage
5. `prefix.pairs.bed.coverage` plots the clone coverage
See assembly.coverage.posmap() for the generation of these files.
"""
from jcvi.graphics.glyph import Bezier
p = OptionParser(qc.__doc__)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(p.print_help())
(prefix,) = args
scf = prefix
# All these files *must* be present in the current folder
fastafile = prefix + ".fasta"
sizesfile = prefix + ".sizes"
gapsbedfile = prefix + ".gaps.bed"
bedfile = prefix + ".bed"
bedpefile = prefix + ".bedpe"
pairsbedfile = prefix + ".pairs.bed"
sizes = Sizes(fastafile).mapping
size = sizes[scf]
fig = plt.figure(1, (8, 5))
root = fig.add_axes([0, 0, 1, 1])
# the scaffold
root.add_patch(Rectangle((0.1, 0.15), 0.8, 0.03, fc="k"))
# basecoverage and matecoverage
ax = fig.add_axes([0.1, 0.45, 0.8, 0.45])
bins = 200 # Smooth the curve
basecoverage = Coverage(bedfile, sizesfile)
matecoverage = Coverage(pairsbedfile, sizesfile)
x, y = basecoverage.get_plot_data(scf, bins=bins)
(baseline,) = ax.plot(x, y, "g-")
x, y = matecoverage.get_plot_data(scf, bins=bins)
(mateline,) = ax.plot(x, y, "r-")
legends = ("Base coverage", "Mate coverage")
leg = ax.legend((baseline, mateline), legends, shadow=True, fancybox=True)
leg.get_frame().set_alpha(0.5)
ax.set_xlim(0, size)
# draw the read pairs
fp = open(bedpefile)
pairs = []
for row in fp:
scf, astart, aend, scf, bstart, bend, clonename = row.split()
astart, bstart = int(astart), int(bstart)
aend, bend = int(aend), int(bend)
start = min(astart, bstart) + 1
end = max(aend, bend)
pairs.append((start, end))
bpratio = 0.8 / size
cutoff = 1000 # inserts smaller than this are not plotted
# this convert from base => x-coordinate
pos = lambda x: (0.1 + x * bpratio)
ypos = 0.15 + 0.03
for start, end in pairs:
dist = end - start
if dist < cutoff:
continue
dist = min(dist, 10000)
# 10Kb == .25 canvas height
height = 0.25 * dist / 10000
xstart = pos(start)
xend = pos(end)
p0 = (xstart, ypos)
p1 = (xstart, ypos + height)
p2 = (xend, ypos + height)
p3 = (xend, ypos)
Bezier(root, p0, p1, p2, p3)
# gaps on the scaffold
fp = open(gapsbedfile)
for row in fp:
b = BedLine(row)
start, end = b.start, b.end
xstart = pos(start)
xend = pos(end)
root.add_patch(Rectangle((xstart, 0.15), xend - xstart, 0.03, fc="w"))
root.text(0.5, 0.1, scf, color="b", ha="center")
warn_msg = "Only the inserts > {0}bp are shown".format(cutoff)
root.text(0.5, 0.1, scf, color="b", ha="center")
root.text(0.5, 0.05, warn_msg, color="gray", ha="center")
# clean up and output
set_human_base_axis(ax)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
figname = prefix + ".pdf"
savefig(figname, dpi=300)
def generate_plot(filename, rplot="A50.rplot", rpdf="A50.pdf"):
from jcvi.apps.r import RTemplate
rplot_template = """
library(ggplot2)
data <- read.table("$rplot", header=T, sep="\t")
g <- ggplot(data, aes(x=index, y=cumsize, group=fasta))
g + geom_line(aes(colour=fasta)) +
xlab("Contigs") + ylab("Cumulative size (Mb)") +
opts(title="A50 plot", legend.position="top")
ggsave(file="$rpdf")
"""
rtemplate = RTemplate(rplot_template, locals())
rtemplate.run()
def A50(args):
"""
%prog A50 contigs_A.fasta contigs_B.fasta ...
Plots A50 graphics, see blog post (http://blog.malde.org/index.php/a50/)
"""
p = OptionParser(A50.__doc__)
p.add_option(
"--overwrite",
default=False,
action="store_true",
help="overwrite .rplot file if exists",
)
p.add_option(
"--cutoff",
default=0,
type="int",
dest="cutoff",
help="use contigs above certain size",
)
p.add_option(
"--stepsize",
default=10,
type="int",
dest="stepsize",
help="stepsize for the distribution",
)
opts, args = p.parse_args(args)
if not args:
sys.exit(p.print_help())
import numpy as np
from jcvi.utils.table import loadtable
stepsize = opts.stepsize # use stepsize to speed up drawing
rplot = "A50.rplot"
if not op.exists(rplot) or opts.overwrite:
fw = open(rplot, "w")
header = "\t".join(("index", "cumsize", "fasta"))
statsheader = ("Fasta", "L50", "N50", "Min", "Max", "Average", "Sum", "Counts")
statsrows = []
print(header, file=fw)
for fastafile in args:
f = Fasta(fastafile, index=False)
ctgsizes = [length for k, length in f.itersizes()]
ctgsizes = np.array(ctgsizes)
a50, l50, n50 = calculate_A50(ctgsizes, cutoff=opts.cutoff)
cmin, cmax, cmean = min(ctgsizes), max(ctgsizes), np.mean(ctgsizes)
csum, counts = np.sum(ctgsizes), len(ctgsizes)
cmean = int(round(cmean))
statsrows.append((fastafile, l50, n50, cmin, cmax, cmean, csum, counts))
logging.debug("`{0}` ctgsizes: {1}".format(fastafile, ctgsizes))
tag = "{0} (L50={1})".format(op.basename(fastafile).rsplit(".", 1)[0], l50)
logging.debug(tag)
for i, s in zip(range(0, len(a50), stepsize), a50[::stepsize]):
print("\t".join((str(i), str(s / 1000000.0), tag)), file=fw)
fw.close()
table = loadtable(statsheader, statsrows)
print(table, file=sys.stderr)
generate_plot(rplot)
if __name__ == "__main__":
main()
|
e9c266b538da600bf18350bc9fa356d925dd03f3
|
2ad93a1cf25a580fe980482d2d17a657de3b2523
|
/django-stubs/db/backends/oracle/client.pyi
|
a8e42a9e9fc26f6852f72e4ca278ca6fa3cf8e8b
|
[
"MIT"
] |
permissive
|
typeddjango/django-stubs
|
f35dfcb001e54694a0a1e8c0afcc6e6a3d130c32
|
0117348c3c7713f25f96b46e53ebdeed7bdba544
|
refs/heads/master
| 2023-08-25T19:42:52.707151
| 2023-08-23T15:13:25
| 2023-08-23T15:13:25
| 142,779,680
| 1,133
| 376
|
MIT
| 2023-09-13T19:05:06
| 2018-07-29T17:08:50
|
Python
|
UTF-8
|
Python
| false
| false
| 546
|
pyi
|
client.pyi
|
from collections.abc import Iterable
from typing import Any
from django.db.backends.base.client import BaseDatabaseClient
from django.db.backends.oracle.base import DatabaseWrapper
class DatabaseClient(BaseDatabaseClient):
connection: DatabaseWrapper
executable_name: str
wrapper_name: str
@staticmethod
def connect_string(settings_dict: dict[str, Any]) -> str: ...
@classmethod
def settings_to_cmd_args_env(
cls, settings_dict: dict[str, Any], parameters: Iterable[str]
) -> tuple[list[str], None]: ...
|
4c6408e06a77912e25613df567dad0785232e0b3
|
79131be69e6eb23e900dcce9162eb625804bb747
|
/apprise/plugins/NotifyBark.py
|
596a9a87d0021f55a0eea2625c230290bbcc8ba9
|
[
"BSD-3-Clause"
] |
permissive
|
caronc/apprise
|
c806c05636466c1f895b6c2d5221df8e34ab250e
|
be3baed7e3d33bae973f1714df4ebbf65aa33f85
|
refs/heads/master
| 2023-08-28T06:40:03.718237
| 2023-08-27T20:54:21
| 2023-08-27T20:54:21
| 112,024,290
| 8,426
| 370
|
BSD-3-Clause
| 2023-09-10T20:17:34
| 2017-11-25T18:06:38
|
Python
|
UTF-8
|
Python
| false
| false
| 15,903
|
py
|
NotifyBark.py
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Apprise - Push Notification Library.
# Copyright (c) 2023, Chris Caron <lead2gold@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# API: https://github.com/Finb/bark-server/blob/master/docs/API_V2.md#python
#
import requests
import json
from .NotifyBase import NotifyBase
from ..URLBase import PrivacyMode
from ..common import NotifyImageSize
from ..common import NotifyType
from ..utils import parse_list
from ..utils import parse_bool
from ..AppriseLocale import gettext_lazy as _
# Sounds generated off of: https://github.com/Finb/Bark/tree/master/Sounds
BARK_SOUNDS = (
"alarm.caf",
"anticipate.caf",
"bell.caf",
"birdsong.caf",
"bloom.caf",
"calypso.caf",
"chime.caf",
"choo.caf",
"descent.caf",
"electronic.caf",
"fanfare.caf",
"glass.caf",
"gotosleep.caf",
"healthnotification.caf",
"horn.caf",
"ladder.caf",
"mailsent.caf",
"minuet.caf",
"multiwayinvitation.caf",
"newmail.caf",
"newsflash.caf",
"noir.caf",
"paymentsuccess.caf",
"shake.caf",
"sherwoodforest.caf",
"silence.caf",
"spell.caf",
"suspense.caf",
"telegraph.caf",
"tiptoes.caf",
"typewriters.caf",
"update.caf",
)
# Supported Level Entries
class NotifyBarkLevel:
"""
Defines the Bark Level options
"""
ACTIVE = 'active'
TIME_SENSITIVE = 'timeSensitive'
PASSIVE = 'passive'
BARK_LEVELS = (
NotifyBarkLevel.ACTIVE,
NotifyBarkLevel.TIME_SENSITIVE,
NotifyBarkLevel.PASSIVE,
)
class NotifyBark(NotifyBase):
"""
A wrapper for Notify Bark Notifications
"""
# The default descriptive name associated with the Notification
service_name = 'Bark'
# The services URL
service_url = 'https://github.com/Finb/Bark'
# The default protocol
protocol = 'bark'
# The default secure protocol
secure_protocol = 'barks'
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_bark'
# Allows the user to specify the NotifyImageSize object; this is supported
# through the webhook
image_size = NotifyImageSize.XY_128
# Define object templates
templates = (
'{schema}://{host}/{targets}',
'{schema}://{host}:{port}/{targets}',
'{schema}://{user}:{password}@{host}/{targets}',
'{schema}://{user}:{password}@{host}:{port}/{targets}',
)
# Define our template arguments
template_tokens = dict(NotifyBase.template_tokens, **{
'host': {
'name': _('Hostname'),
'type': 'string',
'required': True,
},
'port': {
'name': _('Port'),
'type': 'int',
'min': 1,
'max': 65535,
},
'user': {
'name': _('Username'),
'type': 'string',
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
},
'target_device': {
'name': _('Target Device'),
'type': 'string',
'map_to': 'targets',
},
'targets': {
'name': _('Targets'),
'type': 'list:string',
'required': True,
},
})
# Define our template arguments
template_args = dict(NotifyBase.template_args, **{
'to': {
'alias_of': 'targets',
},
'sound': {
'name': _('Sound'),
'type': 'choice:string',
'values': BARK_SOUNDS,
},
'level': {
'name': _('Level'),
'type': 'choice:string',
'values': BARK_LEVELS,
},
'click': {
'name': _('Click'),
'type': 'string',
},
'badge': {
'name': _('Badge'),
'type': 'int',
'min': 0,
},
'category': {
'name': _('Category'),
'type': 'string',
},
'group': {
'name': _('Group'),
'type': 'string',
},
'image': {
'name': _('Include Image'),
'type': 'bool',
'default': True,
'map_to': 'include_image',
},
})
def __init__(self, targets=None, include_image=True, sound=None,
category=None, group=None, level=None, click=None,
badge=None, **kwargs):
"""
Initialize Notify Bark Object
"""
super().__init__(**kwargs)
# Prepare our URL
self.notify_url = '%s://%s%s/push' % (
'https' if self.secure else 'http',
self.host,
':{}'.format(self.port)
if (self.port and isinstance(self.port, int)) else '',
)
# Assign our category
self.category = \
category if isinstance(category, str) else None
# Assign our group
self.group = group if isinstance(group, str) else None
# Initialize device list
self.targets = parse_list(targets)
# Place an image inline with the message body
self.include_image = include_image
# A clickthrough option for notifications
self.click = click
# Badge
try:
# Acquire our badge count if we can:
# - We accept both the integer form as well as a string
# representation
self.badge = int(badge)
if self.badge < 0:
raise ValueError()
except TypeError:
# NoneType means use Default; this is an okay exception
self.badge = None
except ValueError:
self.badge = None
self.logger.warning(
'The specified Bark badge ({}) is not valid ', badge)
# Sound (easy-lookup)
self.sound = None if not sound else next(
(f for f in BARK_SOUNDS if f.startswith(sound.lower())), None)
if sound and not self.sound:
self.logger.warning(
'The specified Bark sound ({}) was not found ', sound)
# Level
self.level = None if not level else next(
(f for f in BARK_LEVELS if f[0] == level[0]), None)
if level and not self.level:
self.logger.warning(
'The specified Bark level ({}) is not valid ', level)
return
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Bark Notification
"""
# error tracking (used for function return)
has_error = False
if not self.targets:
# We have nothing to notify; we're done
self.logger.warning('There are no Bark devices to notify')
return False
# Prepare our headers
headers = {
'User-Agent': self.app_id,
'Content-Type': 'application/json; charset=utf-8',
}
# Prepare our payload (sample below)
# {
# "body": "Test Bark Server",
# "device_key": "nysrshcqielvoxsa",
# "title": "bleem",
# "category": "category",
# "sound": "minuet.caf",
# "badge": 1,
# "icon": "https://day.app/assets/images/avatar.jpg",
# "group": "test",
# "url": "https://mritd.com"
# }
payload = {
'title': title if title else self.app_desc,
'body': body,
}
# Acquire our image url if configured to do so
image_url = None if not self.include_image else \
self.image_url(notify_type)
if image_url:
payload['icon'] = image_url
if self.sound:
payload['sound'] = self.sound
if self.click:
payload['url'] = self.click
if self.badge:
payload['badge'] = self.badge
if self.level:
payload['level'] = self.level
if self.category:
payload['category'] = self.category
if self.group:
payload['group'] = self.group
auth = None
if self.user:
auth = (self.user, self.password)
# Create a copy of the targets
targets = list(self.targets)
while len(targets) > 0:
# Retrieve our device key
target = targets.pop()
payload['device_key'] = target
self.logger.debug('Bark POST URL: %s (cert_verify=%r)' % (
self.notify_url, self.verify_certificate,
))
self.logger.debug('Bark Payload: %s' % str(payload))
# Always call throttle before any remote server i/o is made
self.throttle()
try:
r = requests.post(
self.notify_url,
data=json.dumps(payload),
headers=headers,
auth=auth,
verify=self.verify_certificate,
timeout=self.request_timeout,
)
if r.status_code != requests.codes.ok:
# We had a problem
status_str = \
NotifyBark.http_response_code_lookup(
r.status_code)
self.logger.warning(
'Failed to send Bark notification to {}: '
'{}{}error={}.'.format(
target,
status_str,
', ' if status_str else '',
r.status_code))
self.logger.debug(
'Response Details:\r\n{}'.format(r.content))
# Mark our failure
has_error = True
continue
else:
self.logger.info(
'Sent Bark notification to {}.'.format(target))
except requests.RequestException as e:
self.logger.warning(
'A Connection error occurred sending Bark '
'notification to {}.'.format(target))
self.logger.debug('Socket Exception: %s' % str(e))
# Mark our failure
has_error = True
continue
return not has_error
def url(self, privacy=False, *args, **kwargs):
"""
Returns the URL built dynamically based on specified arguments.
"""
# Define any URL parameters
params = {
'image': 'yes' if self.include_image else 'no',
}
if self.sound:
params['sound'] = self.sound
if self.click:
params['click'] = self.click
if self.badge:
params['badge'] = str(self.badge)
if self.level:
params['level'] = self.level
if self.category:
params['category'] = self.category
if self.group:
params['group'] = self.group
# Extend our parameters
params.update(self.url_parameters(privacy=privacy, *args, **kwargs))
# Determine Authentication
auth = ''
if self.user and self.password:
auth = '{user}:{password}@'.format(
user=NotifyBark.quote(self.user, safe=''),
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
)
elif self.user:
auth = '{user}@'.format(
user=NotifyBark.quote(self.user, safe=''),
)
default_port = 443 if self.secure else 80
return '{schema}://{auth}{hostname}{port}/{targets}?{params}'.format(
schema=self.secure_protocol if self.secure else self.protocol,
auth=auth,
# never encode hostname since we're expecting it to be a valid one
hostname=self.host,
port='' if self.port is None or self.port == default_port
else ':{}'.format(self.port),
targets='/'.join(
[NotifyBark.quote('{}'.format(x)) for x in self.targets]),
params=NotifyBark.urlencode(params),
)
def __len__(self):
"""
Returns the number of targets associated with this notification
"""
return len(self.targets)
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to re-instantiate this object.
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Apply our targets
results['targets'] = NotifyBark.split_path(results['fullpath'])
# Category
if 'category' in results['qsd'] and results['qsd']['category']:
results['category'] = NotifyBark.unquote(
results['qsd']['category'].strip())
# Group
if 'group' in results['qsd'] and results['qsd']['group']:
results['group'] = NotifyBark.unquote(
results['qsd']['group'].strip())
# Badge
if 'badge' in results['qsd'] and results['qsd']['badge']:
results['badge'] = NotifyBark.unquote(
results['qsd']['badge'].strip())
# Level
if 'level' in results['qsd'] and results['qsd']['level']:
results['level'] = NotifyBark.unquote(
results['qsd']['level'].strip())
# Click (URL)
if 'click' in results['qsd'] and results['qsd']['click']:
results['click'] = NotifyBark.unquote(
results['qsd']['click'].strip())
# Sound
if 'sound' in results['qsd'] and results['qsd']['sound']:
results['sound'] = NotifyBark.unquote(
results['qsd']['sound'].strip())
# The 'to' makes it easier to use yaml configuration
if 'to' in results['qsd'] and len(results['qsd']['to']):
results['targets'] += \
NotifyBark.parse_list(results['qsd']['to'])
# use image= for consistency with the other plugins
results['include_image'] = \
parse_bool(results['qsd'].get('image', True))
return results
|
35aeda4fd2b93247d3ddefe06f95a5bcca6b9417
|
224a034669068398e59962d6470fb72dbe20e8c9
|
/src/lightkurve/io/everest.py
|
d7be1efc593e929216ccb12f25869e0d118bb0e3
|
[
"MIT"
] |
permissive
|
lightkurve/lightkurve
|
b892b54ffbf3cb956f88300cb7d72b7e99fefdbf
|
7d485b69e9bbe58a1e7ba8d988387dc5d469ab36
|
refs/heads/main
| 2023-08-28T05:20:55.072927
| 2023-08-22T20:42:53
| 2023-08-22T20:42:53
| 118,387,904
| 148
| 66
|
MIT
| 2023-09-14T02:24:36
| 2018-01-22T00:49:59
|
Python
|
UTF-8
|
Python
| false
| false
| 2,316
|
py
|
everest.py
|
"""Reader for K2 EVEREST light curves."""
from ..lightcurve import KeplerLightCurve
from ..utils import KeplerQualityFlags
from .generic import read_generic_lightcurve
def read_everest_lightcurve(
filename, flux_column="flux", quality_bitmask="default", **kwargs
):
"""Read an EVEREST light curve file.
More information: https://archive.stsci.edu/hlsp/everest
Parameters
----------
filename : str
Local path or remote url of a Kepler light curve FITS file.
flux_column : 'pdcsap_flux' or 'sap_flux'
Which column in the FITS file contains the preferred flux data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with severe quality issues will be ignored
(`quality_bitmask=1130799`).
* "hard": more conservative choice of flags to ignore
(`quality_bitmask=1664431`). This is known to remove good data.
* "hardest": removes all data that has been flagged
(`quality_bitmask=2096639`). This mask is not recommended.
See the :class:`KeplerQualityFlags` class for details on the bitmasks.
Returns
-------
lc : `KeplerLightCurve`
A populated light curve object.
"""
lc = read_generic_lightcurve(
filename,
flux_column=flux_column,
quality_column="quality",
cadenceno_column="cadn",
time_format="bkjd",
)
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
quality_mask = KeplerQualityFlags.create_quality_mask(
quality_array=lc["quality"], bitmask=quality_bitmask
)
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "EVEREST"
lc.meta["TARGETID"] = lc.meta.get("KEPLERID")
lc.meta["QUALITY_BITMASK"] = quality_bitmask
lc.meta["QUALITY_MASK"] = quality_mask
return KeplerLightCurve(data=lc, **kwargs)
|
f9bc662e89582eaa370f468f62be5601f886fc57
|
6e0da41ccb571287b67637a7693b48e0d2323676
|
/google_assistant/rootfs/usr/bin/hassio_oauth.py
|
e2c3730d1c0fb5b0137c0e8d8a7780df831a2126
|
[
"Apache-2.0"
] |
permissive
|
home-assistant/addons
|
0152a7cea8b59c5ef02973a555110cd62bec9460
|
bb8e979ad12759ce02adf7a830fe09aa1d3455df
|
refs/heads/master
| 2023-09-04T10:13:58.489171
| 2023-09-01T08:21:14
| 2023-09-01T08:21:14
| 85,435,841
| 910
| 1,048
|
Apache-2.0
| 2023-09-14T15:05:35
| 2017-03-18T22:23:14
|
Shell
|
UTF-8
|
Python
| false
| false
| 4,546
|
py
|
hassio_oauth.py
|
"""Run small webservice for oath."""
import json
import sys
from pathlib import Path
import threading
import time
import cherrypy
from requests_oauthlib import OAuth2Session
from google.oauth2.credentials import Credentials
HEADERS = str("""
<link rel="icon" href="/static/favicon.ico?v=1">
<link href="/static/css/style.css" rel="stylesheet">
<link href="https://fonts.googleapis.com/css2?family=Roboto&display=swap" rel="stylesheet">
""")
class oauth2Site(object):
"""Website for handling oauth2."""
def __init__(self, user_data, cred_file):
"""Init webpage."""
self.cred_file = cred_file
self.user_data = user_data
self.oauth2 = OAuth2Session(
self.user_data['client_id'],
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
scope="https://www.googleapis.com/auth/assistant-sdk-prototype"
)
self.auth_url, _ = self.oauth2.authorization_url(self.user_data['auth_uri'], access_type='offline', prompt='consent')
@cherrypy.expose
def index(self):
"""Landing page."""
return str("""<html>
<head>{headers}</head>
<body>
<form method="get" action="token">
<div class="card">
<div class="card-content">
<img src="/static/logo.png" alt="Google Assistant Logo" />
<h1>Google Assistant SDK</h1>
<p>Initial setup</p>
<ol>
<li><a href="{url}" target="_blank">Get a code from Google here</a></li>
<li><input type="text" value="" name="token" placeholder="Paste the code here" /></li>
</ol>
</div>
<div class="card-actions">
<button type="submit">CONNECT</button>
</div>
</div>
</form>
</body>
</html>""").format(url=self.auth_url, headers=HEADERS)
@cherrypy.expose
def token(self, token):
"""Read access token and process it."""
try:
self.oauth2.fetch_token(self.user_data['token_uri'], client_secret=self.user_data['client_secret'], code=token)
except Exception as e:
cherrypy.log("Error with the given token: {error}".format(error=str(e)))
cherrypy.log("Restarting authentication process.")
raise cherrypy.HTTPRedirect('/')
# create credentials
credentials = Credentials(
self.oauth2.token['access_token'],
refresh_token=self.oauth2.token.get('refresh_token'),
token_uri=self.user_data['token_uri'],
client_id=self.user_data['client_id'],
client_secret=self.user_data['client_secret'],
scopes=self.oauth2.scope
)
# write credentials json file
with self.cred_file.open('w') as json_file:
json_file.write(json.dumps({
'refresh_token': credentials.refresh_token,
'token_uri': credentials.token_uri,
'client_id': credentials.client_id,
'client_secret': credentials.client_secret,
'scopes': credentials.scopes,
}))
threading.Thread(target=self.exit_app).start()
return str("""<html>
<head>{headers}</head>
<body>
<div class="card">
<div class="card-content">
<img src="/static/logo.png" alt="Google Assistant Logo" />
<h1>Google Assistant SDK</h1>
<p>Setup completed.</p>
<p>You can now close this window.</p>
</div>
</div>
</body>
</html>""").format(url=self.auth_url, headers=HEADERS)
def exit_app(self):
time.sleep(2)
cherrypy.engine.exit()
def hide_access_logs():
"""Hide file access logging for cleaner logs"""
access_log = cherrypy.log.access_log
for handler in tuple(access_log.handlers):
access_log.removeHandler(handler)
if __name__ == '__main__':
oauth_json = Path(sys.argv[1])
cred_json = Path(sys.argv[2])
with oauth_json.open('r') as data:
user_data = json.load(data)['installed']
hide_access_logs()
cherrypy.config.update({'server.socket_port': 9324, 'server.socket_host': '0.0.0.0'})
cherrypy.quickstart(oauth2Site(user_data, cred_json), config={
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': '/usr/share/public'
}
})
|
e9ce57fec92fb50a215f208ffebed6a9edcba7e7
|
8165a3055f53d754955c7201e32c8fcdb124c1c1
|
/celerite/terms.py
|
e2fe3246dd73d13ae5846f03253089e30fc45464
|
[
"MIT"
] |
permissive
|
dfm/celerite
|
103bc71892d1718df76b66bcf75946e6f7630664
|
9c85cb2ce87969e1e4ea5dfc4afd2e04fb3f70bd
|
refs/heads/main
| 2023-09-01T18:05:11.870876
| 2023-08-14T12:36:49
| 2023-08-14T12:36:49
| 63,601,480
| 181
| 40
|
MIT
| 2023-09-11T01:52:44
| 2016-07-18T12:33:37
|
C++
|
UTF-8
|
Python
| false
| false
| 17,837
|
py
|
terms.py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
try:
import autograd # NOQA
except ImportError:
import numpy as np
HAS_AUTOGRAD = False
else:
import autograd.numpy as np
from autograd import jacobian, elementwise_grad
HAS_AUTOGRAD = True
from itertools import chain, product
from .modeling import Model, ModelSet
from .solver import get_kernel_value, get_psd_value, check_coefficients
__all__ = [
"Term", "TermProduct", "TermSum",
"JitterTerm", "RealTerm", "ComplexTerm", "SHOTerm", "Matern32Term",
]
class Term(Model):
"""
The abstract base "term" that is the superclass of all other terms
Subclasses should overload the :func:`terms.Term.get_real_coefficients`
and :func:`terms.Term.get_complex_coefficients` methods.
"""
_has_jitter = False
_has_coeffs = True
@property
def terms(self):
"""A list of all the terms included in a sum of terms"""
return [self]
def get_value(self, tau):
"""
Compute the value of the term for an array of lags
Args:
tau (array[...]): An array of lags where the term should be
evaluated.
Returns:
The value of the term for each ``tau``. This will have the same
shape as ``tau``.
"""
tau = np.asarray(tau)
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag) = self.coefficients
k = get_kernel_value(
alpha_real, beta_real,
alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
tau.flatten(),
)
return np.asarray(k).reshape(tau.shape)
def get_psd(self, omega):
"""
Compute the PSD of the term for an array of angular frequencies
Args:
omega (array[...]): An array of frequencies where the PSD should
be evaluated.
Returns:
The value of the PSD for each ``omega``. This will have the same
shape as ``omega``.
"""
w = np.asarray(omega)
(alpha_real, beta_real, alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag) = self.coefficients
p = get_psd_value(
alpha_real, beta_real,
alpha_complex_real, alpha_complex_imag,
beta_complex_real, beta_complex_imag,
w.flatten(),
)
return p.reshape(w.shape)
def check_parameters(self):
"""
Check for negative power in the PSD using Sturm's theorem
Returns:
``True`` for valid parameters.
"""
return check_coefficients(*(self.coefficients))
def __add__(self, b):
return TermSum(self, b)
def __radd__(self, b):
return TermSum(b, self)
def __mul__(self, b):
return TermProduct(self, b)
def __rmul__(self, b):
return TermProduct(b, self)
def get_real_coefficients(self, params):
"""
Get the arrays ``alpha_real`` and ``beta_real``
This method should be overloaded by subclasses to return the arrays
``alpha_real`` and ``beta_real`` given the current parameter settings.
By default, this term is empty.
Returns:
(array[j_real], array[j_real]): ``alpha_real`` and ``beta_real``
as described above.
"""
return np.empty(0), np.empty(0)
def get_complex_coefficients(self, params):
"""
Get the arrays ``alpha_complex_*`` and ``beta_complex_*``
This method should be overloaded by subclasses to return the arrays
``alpha_complex_real``, ``alpha_complex_imag``, ``beta_complex_real``,
and ``beta_complex_imag`` given the current parameter settings. By
default, this term is empty.
Returns:
(array[j_complex], array[j_complex], array[j_complex],
array[j_complex]): ``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real``, and ``beta_complex_imag`` as described
above. ``alpha_complex_imag`` can be omitted and it will be
assumed to be zero.
"""
return np.empty(0), np.empty(0), np.empty(0), np.empty(0)
def get_all_coefficients(self, params=None):
if params is None:
params = self.get_parameter_vector(include_frozen=True)
r = self.get_real_coefficients(params)
c = self.get_complex_coefficients(params)
if len(c) == 3:
c = (c[0], np.zeros_like(c[0]), c[1], c[2])
return list(map(np.atleast_1d, chain(r, c)))
@property
def coefficients(self):
"""
All of the coefficient arrays
This property is the concatenation of the results from
:func:`terms.Term.get_real_coefficients` and
:func:`terms.Term.get_complex_coefficients` but it will always return
a tuple of length 6, even if ``alpha_complex_imag`` was omitted from
``get_complex_coefficients``.
Returns:
(array[j_real], array[j_real], array[j_complex], array[j_complex],
array[j_complex], array[j_complex]): ``alpha_real``, ``beta_real``,
``alpha_complex_real``, ``alpha_complex_imag``,
``beta_complex_real``, and ``beta_complex_imag`` as described
above.
Raises:
ValueError: For invalid dimensions for the coefficients.
"""
vector = self.get_parameter_vector(include_frozen=True)
pars = self.get_all_coefficients(vector)
if len(pars) != 6:
raise ValueError("there must be 6 coefficient blocks")
if any(len(p.shape) != 1 for p in pars):
raise ValueError("coefficient blocks must be 1D")
if len(pars[0]) != len(pars[1]):
raise ValueError("coefficient blocks must have the same shape")
if any(len(pars[2]) != len(p) for p in pars[3:]):
raise ValueError("coefficient blocks must have the same shape")
return pars
def get_jitter(self, params):
return 0.0
@property
def jitter(self):
return self.get_jitter(self.get_parameter_vector(include_frozen=True))
def get_jitter_jacobian(self, include_frozen=False):
if not HAS_AUTOGRAD:
raise ImportError("'autograd' must be installed to compute "
"gradients")
jac = elementwise_grad(self.get_jitter)
jac = jac(self.get_parameter_vector(include_frozen=True))
if include_frozen:
return jac
return jac[self.unfrozen_mask]
def get_coeffs_jacobian(self, include_frozen=False):
if not HAS_AUTOGRAD:
raise ImportError("'autograd' must be installed to compute "
"gradients")
jac = jacobian(lambda p: np.concatenate(self.get_all_coefficients(p)))
jac = jac(self.get_parameter_vector(include_frozen=True)).T
if include_frozen:
return jac
return jac[self.unfrozen_mask]
class TermProduct(Term, ModelSet):
def __init__(self, k1, k2):
if k1._has_jitter or k2._has_jitter:
raise ValueError("Products are not implemented for terms with "
"jitter")
super(TermProduct, self).__init__([("k1", k1), ("k2", k2)])
def __repr__(self):
return " * ".join(map("{0}".format, (self.models["k1"],
self.models["k2"])))
@property
def terms(self):
return [self]
def get_all_coefficients(self, params=None):
if params is None:
params = self.get_parameter_vector(include_frozen=True)
n = self.models["k1"].full_size
c1 = self.models["k1"].get_all_coefficients(params[:n])
c2 = self.models["k2"].get_all_coefficients(params[n:])
# First compute real terms
ar = []
cr = []
gen = product(zip(c1[0], c1[1]), zip(c2[0], c2[1]))
for i, ((aj, cj), (ak, ck)) in enumerate(gen):
ar.append(aj * ak)
cr.append(cj + ck)
# Then the complex terms
ac = []
bc = []
cc = []
dc = []
# real * complex
gen = product(zip(c1[0], c1[1]), zip(*(c2[2:])))
gen = chain(gen, product(zip(c2[0], c2[1]), zip(*(c1[2:]))))
for i, ((aj, cj), (ak, bk, ck, dk)) in enumerate(gen):
ac.append(aj * ak)
bc.append(aj * bk)
cc.append(cj + ck)
dc.append(dk)
# complex * complex
gen = product(zip(*(c1[2:])), zip(*(c2[2:])))
for i, ((aj, bj, cj, dj), (ak, bk, ck, dk)) in enumerate(gen):
ac.append(0.5 * (aj * ak + bj * bk))
bc.append(0.5 * (bj * ak - aj * bk))
cc.append(cj + ck)
dc.append(dj - dk)
ac.append(0.5 * (aj * ak - bj * bk))
bc.append(0.5 * (bj * ak + aj * bk))
cc.append(cj + ck)
dc.append(dj + dk)
return list(map(np.array, (ar, cr, ac, bc, cc, dc)))
class TermSum(Term, ModelSet):
def __init__(self, *terms):
models = []
for term in terms:
models += term.terms
super(TermSum, self).__init__([("terms[{0}]".format(i), t)
for i, t in enumerate(models)])
def __repr__(self):
return "(" + " + ".join(map("{0}".format, self.terms)) + ")"
@property
def terms(self):
return list(self.models.values())
@property
def _has_jitter(self):
return any(t._has_jitter for t in self.models.values())
@property
def _has_coeffs(self):
return any(t._has_coeffs for t in self.models.values())
def get_all_coefficients(self, params=None):
if params is None:
params = self.get_parameter_vector(include_frozen=True)
coeffs = []
n = 0
for t in self.models.values():
d = t.full_size
coeffs.append(t.get_all_coefficients(params[n:n+d]))
n += d
return [np.concatenate(a) for a in zip(*coeffs)]
def get_jitter(self, params=None):
if params is None:
params = self.get_parameter_vector(include_frozen=True)
jitter = 0.0
n = 0
for t in self.models.values():
d = t.full_size
jitter += t.get_jitter(params[n:n+d])
n += d
return jitter
class JitterTerm(Term):
r"""
A diagonal jitter or "white noise" term
This term has the form
.. math::
k(\tau_{n,m}) = \sigma^2\,\delta_{n,m}
with the parameter ``log_sigma``.
.. note:: Jitter is never used in :func:`celerite.GP.predict` and or
:func:`celerite.terms.Term.get_psd`. If you want to compute the jitter
power, it is ``term.jitter * N``, where ``N`` is the number of data
points.
Args:
log_sigma (float): The log of the amplitude of the white noise.
"""
_has_jitter = True
_has_coeffs = False
parameter_names = ("log_sigma", )
def __repr__(self):
return "JitterTerm({0.log_sigma})".format(self)
def get_jitter(self, params):
return np.exp(2.0 * params[0])
class RealTerm(Term):
r"""
The simplest celerite term
This term has the form
.. math::
k(\tau) = a_j\,e^{-c_j\,\tau}
with the parameters ``log_a`` and ``log_c``.
Strictly speaking, for a sum of terms, the parameter ``a`` could be
allowed to go negative but since it is somewhat subtle to ensure positive
definiteness, we require that the amplitude be positive through this
interface. Advanced users can build a custom term that has negative
coefficients but care should be taken to ensure positivity.
Args:
log_a (float): The log of the amplitude of the term.
log_c (float): The log of the exponent of the term.
"""
parameter_names = ("log_a", "log_c")
def __repr__(self):
return "RealTerm({0.log_a}, {0.log_c})".format(self)
def get_real_coefficients(self, params):
log_a, log_c = params
return np.exp(log_a), np.exp(log_c)
class ComplexTerm(Term):
r"""
A general celerite term
This term has the form
.. math::
k(\tau) = \frac{1}{2}\,\left[(a_j + b_j)\,e^{-(c_j+d_j)\,\tau}
+ (a_j - b_j)\,e^{-(c_j-d_j)\,\tau}\right]
with the parameters ``log_a``, ``log_b``, ``log_c``, and ``log_d``.
The parameter ``log_b`` can be omitted and it will be assumed to be zero.
This term will only correspond to a positive definite kernel (on its own)
if :math:`a_j\,c_j \ge b_j\,d_j` and the ``log_prior`` method checks for
this constraint.
Args:
log_a (float): The log of the real part of amplitude.
log_b (float): The log of the imaginary part of amplitude.
log_c (float): The log of the real part of the exponent.
log_d (float): The log of the imaginary part of exponent.
"""
def __init__(self, *args, **kwargs):
if len(args) == 4 or "log_b" in kwargs:
self.fit_b = True
self.parameter_names = ("log_a", "log_b", "log_c", "log_d")
else:
self.fit_b = False
self.parameter_names = ("log_a", "log_c", "log_d")
super(ComplexTerm, self).__init__(*args, **kwargs)
def __repr__(self):
if not self.fit_b:
return "ComplexTerm({0.log_a}, {0.log_c}, {0.log_d})".format(self)
return ("ComplexTerm({0.log_a}, {0.log_b}, {0.log_c}, {0.log_d})"
.format(self))
def get_complex_coefficients(self, params):
if not self.fit_b:
log_a, log_c, log_d = params
return (
np.exp(log_a), 0.0, np.exp(log_c), np.exp(log_d)
)
log_a, log_b, log_c, log_d = params
return (
np.exp(log_a), np.exp(log_b), np.exp(log_c), np.exp(log_d)
)
def log_prior(self):
# Constraint required for term to be positive definite. Can be relaxed
# with multiple terms but must be treated carefully.
if self.fit_b and self.log_a + self.log_c < self.log_b + self.log_d:
return -np.inf
return super(ComplexTerm, self).log_prior()
# def get_dcoeffs_dparams(self):
# if self.fit_b:
# return np.diag(self.get_complex_coefficients())
# c = self.get_complex_coefficients()
# result = np.zeros((3, 4))
# result[0, 0] = c[0]
# result[1, 2] = c[2]
# result[2, 3] = c[3]
# return result
class SHOTerm(Term):
r"""
A term representing a stochastically-driven, damped harmonic oscillator
The PSD of this term is
.. math::
S(\omega) = \sqrt{\frac{2}{\pi}} \frac{S_0\,\omega_0^4}
{(\omega^2-{\omega_0}^2)^2 + {\omega_0}^2\,\omega^2/Q^2}
with the parameters ``log_S0``, ``log_Q``, and ``log_omega0``.
Args:
log_S0 (float): The log of the parameter :math:`S_0`.
log_Q (float): The log of the parameter :math:`Q`.
log_omega0 (float): The log of the parameter :math:`\omega_0`.
"""
parameter_names = ("log_S0", "log_Q", "log_omega0")
def __repr__(self):
return "SHOTerm({0.log_S0}, {0.log_Q}, {0.log_omega0})".format(self)
def get_real_coefficients(self, params):
log_S0, log_Q, log_omega0 = params
Q = np.exp(log_Q)
if Q >= 0.5:
return np.empty(0), np.empty(0)
S0 = np.exp(log_S0)
w0 = np.exp(log_omega0)
f = np.sqrt(1.0 - 4.0 * Q**2)
return (
0.5*S0*w0*Q*np.array([1.0+1.0/f, 1.0-1.0/f]),
0.5*w0/Q*np.array([1.0-f, 1.0+f])
)
def get_complex_coefficients(self, params):
log_S0, log_Q, log_omega0 = params
Q = np.exp(log_Q)
if Q < 0.5:
return np.empty(0), np.empty(0), np.empty(0), np.empty(0)
S0 = np.exp(log_S0)
w0 = np.exp(log_omega0)
f = np.sqrt(4.0 * Q**2-1)
return (
S0 * w0 * Q,
S0 * w0 * Q / f,
0.5 * w0 / Q,
0.5 * w0 / Q * f,
)
class Matern32Term(Term):
r"""
A term that approximates a Matern-3/2 function
This term is defined as
.. math::
k(\tau) = \sigma^2\,\left[
\left(1+1/\epsilon\right)\,e^{-(1-\epsilon)\sqrt{3}\,\tau/\rho}
\left(1-1/\epsilon\right)\,e^{-(1+\epsilon)\sqrt{3}\,\tau/\rho}
\right]
with the parameters ``log_sigma`` and ``log_rho``. The parameter ``eps``
controls the quality of the approximation since, in the limit
:math:`\epsilon \to 0` this becomes the Matern-3/2 function
.. math::
\lim_{\epsilon \to 0} k(\tau) = \sigma^2\,\left(1+
\frac{\sqrt{3}\,\tau}{\rho}\right)\,
\exp\left(-\frac{\sqrt{3}\,\tau}{\rho}\right)
Args:
log_sigma (float): The log of the parameter :math:`\sigma`.
log_rho (float): The log of the parameter :math:`\rho`.
eps (Optional[float]): The value of the parameter :math:`\epsilon`.
(default: `0.01`)
"""
parameter_names = ("log_sigma", "log_rho")
def __init__(self, *args, **kwargs):
eps = kwargs.pop("eps", 0.01)
super(Matern32Term, self).__init__(*args, **kwargs)
self.eps = eps
def __repr__(self):
return "Matern32Term({0.log_sigma}, {0.log_rho}, eps={0.eps})" \
.format(self)
def get_complex_coefficients(self, params):
log_sigma, log_rho = params
w0 = np.sqrt(3.0) * np.exp(-log_rho)
S0 = np.exp(2.0 * log_sigma) / w0
return (w0*S0, w0*w0*S0/self.eps, w0, self.eps)
|
f36e862b90cb29933fc100ee82749b20e8af0609
|
833ef1cc5cbd5cf76da144d10d393e30976d9185
|
/froide/foisite/migrations/0001_initial.py
|
502ae21a9d6ee0a389c6412ed06a0ee1806b78fa
|
[
"MIT"
] |
permissive
|
okfde/froide
|
d022407ec30bf018e6ca587ae9df0b73a8625edf
|
16e3c69b333fc82cb1e52378fc003ddf071152a7
|
refs/heads/main
| 2023-08-31T08:02:23.343743
| 2023-08-29T07:01:03
| 2023-08-29T07:01:03
| 1,700,944
| 230
| 48
|
MIT
| 2023-09-13T09:10:40
| 2011-05-04T12:20:51
|
Python
|
UTF-8
|
Python
| false
| false
| 1,363
|
py
|
0001_initial.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.CreateModel(
name="FoiSite",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"country_code",
models.CharField(max_length=5, verbose_name="Country Code"),
),
(
"country_name",
models.CharField(max_length=255, verbose_name="Country Name"),
),
("name", models.CharField(max_length=255, verbose_name="Name")),
("url", models.CharField(max_length=255, verbose_name="URL")),
("text", models.TextField(verbose_name="Text", blank=True)),
("enabled", models.BooleanField(default=True, verbose_name="Enabled")),
],
options={
"verbose_name": "FOI Site",
"verbose_name_plural": "FOI Sites",
},
),
]
|
895e9c5413de99b9d8d5f980b316b3926c52a665
|
7378aaee27ef676db95dce7702c48f8643c63313
|
/grow/commands/subcommands/build.py
|
6496ccfe3ee744906e2e0df3a8333c6f8b0d17d1
|
[
"MIT"
] |
permissive
|
grow/grow
|
323fa25c7690643bf170cc4558fffdfbd406ac76
|
17471c436621ebfd978b51225fa4de05367a53e1
|
refs/heads/main
| 2023-06-15T09:51:08.288251
| 2022-07-21T16:19:33
| 2022-07-21T16:19:33
| 12,899,663
| 352
| 56
|
MIT
| 2023-02-08T02:35:36
| 2013-09-17T15:51:40
|
Python
|
UTF-8
|
Python
| false
| false
| 4,553
|
py
|
build.py
|
"""Command for building pods into static deployments."""
import os
import click
from grow.commands import shared
from grow.common import bulk_errors
from grow.common import rc_config
from grow.common import utils
from grow.deployments import stats
from grow.deployments.destinations import local as local_destination
from grow.extensions import hooks
from grow.performance import docs_loader
from grow.pods import pods
from grow.rendering import renderer
from grow import storage
CFG = rc_config.RC_CONFIG.prefixed('grow.build')
# pylint: disable=too-many-locals
@click.command()
@shared.pod_path_argument
@click.option('--clear-cache',
default=CFG.get('clear-cache', False), is_flag=True,
help='Clear the pod cache before building.')
@click.option('--file', '--pod-path', 'pod_paths',
help='Build only pages affected by content files.', multiple=True)
@click.option('--locate-untranslated',
default=CFG.get('locate-untranslated', False), is_flag=True,
help='Shows untranslated message information.')
@shared.locale_option(help_text='Filter build routes to specific locale.')
@shared.deployment_option(CFG)
@shared.out_dir_option(CFG)
@shared.preprocess_option(CFG)
@shared.threaded_option(CFG)
@shared.shards_option
@shared.shard_option
@shared.work_dir_option
@shared.routes_file_option()
def build(pod_path, out_dir, preprocess, clear_cache, pod_paths,
locate_untranslated, deployment, threaded, locale, shards, shard,
work_dir, routes_file):
"""Generates static files and dumps them to a local destination."""
root = os.path.abspath(os.path.join(os.getcwd(), pod_path))
out_dir = out_dir or os.path.join(root, 'build')
pod = pods.Pod(root, storage=storage.FileStorage)
if not pod_paths or clear_cache:
# Clear the cache when building all, only force if the flag is used.
pod.podcache.reset(force=clear_cache)
deployment_obj = None
if deployment:
deployment_obj = pod.get_deployment(deployment)
pod.set_env(deployment_obj.config.env)
if preprocess:
with pod.profile.timer('grow_preprocess'):
pod.preprocess()
if locate_untranslated:
pod.enable(pod.FEATURE_TRANSLATION_STATS)
try:
with pod.profile.timer('grow_build'):
config = local_destination.Config(out_dir=out_dir)
# When using a specific deployment env need to also copy over.
if deployment_obj:
config.env = deployment_obj.config.env
destination = local_destination.LocalDestination(config)
destination.pod = pod
repo = utils.get_git_repo(pod.root)
pod.router.use_simple()
is_partial = bool(pod_paths) or bool(locale)
if pod_paths:
pod_paths = [pod.clean_pod_path(path) for path in pod_paths]
pod.router.add_pod_paths(pod_paths)
elif routes_file:
pod.router.from_data(pod.read_json(routes_file))
else:
pod.router.add_all()
if locale:
pod.router.filter('whitelist', locales=list(locale))
# Shard the routes when using sharding.
if shards and shard:
is_partial = True
pod.router.shard(shards, shard)
if not work_dir:
# Preload the documents used by the paths after filtering.
docs_loader.DocsLoader.load_from_routes(pod, pod.router.routes)
paths = pod.router.routes.paths
content_generator = renderer.Renderer.rendered_docs(
pod, pod.router.routes, source_dir=work_dir,
use_threading=threaded)
content_generator = hooks.generator_wrapper(
pod, 'pre_deploy', content_generator, 'build')
stats_obj = stats.Stats(pod, paths=paths)
destination.deploy(
content_generator, stats=stats_obj, repo=repo, confirm=False,
test=False, is_partial=is_partial)
pod.podcache.write()
except bulk_errors.BulkErrors as err:
# Write the podcache files even when there are rendering errors.
pod.podcache.write()
bulk_errors.display_bulk_errors(err)
raise click.Abort()
except pods.Error as err:
raise click.ClickException(str(err))
if locate_untranslated:
pod.translation_stats.pretty_print()
destination.export_untranslated_catalogs()
return pod
|
22fb02adff7b5987369380da9ec6a64ad7a7c9bf
|
45e376ae66b78b17788b1d3575b334b2cb1d0b1c
|
/checkov/terraform/checks/resource/kubernetes/Secrets.py
|
28b0ac5bec8093c00bf312c342b47fa334641f1e
|
[
"Apache-2.0"
] |
permissive
|
bridgecrewio/checkov
|
aeb8febed2ed90e61d5755f8f9d80b125362644d
|
e64cbd27ffb6f09c2c9f081b45b7a821a3aa1a4d
|
refs/heads/main
| 2023-08-31T06:57:21.990147
| 2023-08-30T23:01:47
| 2023-08-30T23:01:47
| 224,386,599
| 5,929
| 1,056
|
Apache-2.0
| 2023-09-14T20:10:23
| 2019-11-27T08:55:14
|
Python
|
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
Secrets.py
|
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
class Secrets(BaseResourceCheck):
def __init__(self):
# CIS-1.5 5.4.1
name = "Prefer using secrets as files over secrets as environment variables"
id = "CKV_K8S_35"
supported_resources = ['kubernetes_pod', "kubernetes_pod_v1",
'kubernetes_deployment', 'kubernetes_deployment_v1']
categories = [CheckCategories.GENERAL_SECURITY]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf) -> CheckResult:
if "spec" not in conf:
self.evaluated_keys = [""]
return CheckResult.FAILED
spec = conf['spec'][0]
evaluated_keys_path = "spec"
if not spec:
return CheckResult.UNKNOWN
template = spec.get("template")
if template and isinstance(template, list):
template = template[0]
template_spec = template.get("spec")
if template_spec and isinstance(template_spec, list):
spec = template_spec[0]
evaluated_keys_path = f'{evaluated_keys_path}/[0]/template/[0]/spec'
containers = spec.get("container")
if containers:
for idx, container in enumerate(containers):
if not isinstance(container, dict):
return CheckResult.UNKNOWN
if container.get("env") and isinstance(container.get("env"), list):
env = container.get("env")[0]
for idy, e in enumerate(env):
if "value_from" in e:
if isinstance(env.get("value_from"), list):
value_from = env.get("value_from")[0]
if value_from.get("secret_key_ref"):
self.evaluated_keys = \
[f"{evaluated_keys_path}/[0]/container/[{idx}]/env/[{idy}]/value_from/secret_key_ref"]
return CheckResult.FAILED
if container.get("env_from") and isinstance(container.get("env_from"), list):
env_from = container.get("env_from")[0]
for idy, ef in enumerate(env_from):
if "secret_ref" in ef:
self.evaluated_keys = \
[f"{evaluated_keys_path}/[0]/container/[{idx}]/env_from/[{idy}]/secret_ref"]
return CheckResult.FAILED
return CheckResult.PASSED
check = Secrets()
|
edbc37a3295947fa42cc1a61406f921278d172ba
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/messenger/gui/scaleform/meta/channelcomponentmeta.py
|
3db1c398fd53e222f76fee00de5efb165caee350
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 1,344
|
py
|
channelcomponentmeta.py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/messenger/gui/Scaleform/meta/ChannelComponentMeta.py
from gui.Scaleform.framework.entities.BaseDAAPIComponent import BaseDAAPIComponent
class ChannelComponentMeta(BaseDAAPIComponent):
def isJoined(self):
self._printOverrideError('isJoined')
def sendMessage(self, message):
self._printOverrideError('sendMessage')
def getHistory(self):
self._printOverrideError('getHistory')
def getMessageMaxLength(self):
self._printOverrideError('getMessageMaxLength')
def onLinkClick(self, linkCode):
self._printOverrideError('onLinkClick')
def as_notifyInfoChangedS(self):
return self.flashObject.as_notifyInfoChanged() if self._isDAAPIInited() else None
def as_setJoinedS(self, flag):
return self.flashObject.as_setJoined(flag) if self._isDAAPIInited() else None
def as_addMessageS(self, message):
return self.flashObject.as_addMessage(message) if self._isDAAPIInited() else None
def as_getLastUnsentMessageS(self):
return self.flashObject.as_getLastUnsentMessage() if self._isDAAPIInited() else None
def as_setLastUnsentMessageS(self, message):
return self.flashObject.as_setLastUnsentMessage(message) if self._isDAAPIInited() else None
|
83bbfad9d994ccd65d25177a609e132df2ee29a0
|
1742b6719b988e5519373002305e31d28b8bd691
|
/sdk/python/pulumi_aws/ec2/get_vpc_endpoint.py
|
78effb4d887cc5507f6a7c126aaf956b12ad9956
|
[
"BSD-3-Clause",
"Apache-2.0",
"MPL-2.0"
] |
permissive
|
pulumi/pulumi-aws
|
4f7fdb4a816c5ea357cff2c2e3b613c006e49f1a
|
42b0a0abdf6c14da248da22f8c4530af06e67b98
|
refs/heads/master
| 2023-08-03T23:08:34.520280
| 2023-08-01T18:09:58
| 2023-08-01T18:09:58
| 97,484,940
| 384
| 171
|
Apache-2.0
| 2023-09-14T14:48:40
| 2017-07-17T14:20:33
|
Java
|
UTF-8
|
Python
| false
| false
| 16,367
|
py
|
get_vpc_endpoint.py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = [
'GetVpcEndpointResult',
'AwaitableGetVpcEndpointResult',
'get_vpc_endpoint',
'get_vpc_endpoint_output',
]
@pulumi.output_type
class GetVpcEndpointResult:
"""
A collection of values returned by getVpcEndpoint.
"""
def __init__(__self__, arn=None, cidr_blocks=None, dns_entries=None, dns_options=None, filters=None, id=None, ip_address_type=None, network_interface_ids=None, owner_id=None, policy=None, prefix_list_id=None, private_dns_enabled=None, requester_managed=None, route_table_ids=None, security_group_ids=None, service_name=None, state=None, subnet_ids=None, tags=None, vpc_endpoint_type=None, vpc_id=None):
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
pulumi.set(__self__, "arn", arn)
if cidr_blocks and not isinstance(cidr_blocks, list):
raise TypeError("Expected argument 'cidr_blocks' to be a list")
pulumi.set(__self__, "cidr_blocks", cidr_blocks)
if dns_entries and not isinstance(dns_entries, list):
raise TypeError("Expected argument 'dns_entries' to be a list")
pulumi.set(__self__, "dns_entries", dns_entries)
if dns_options and not isinstance(dns_options, list):
raise TypeError("Expected argument 'dns_options' to be a list")
pulumi.set(__self__, "dns_options", dns_options)
if filters and not isinstance(filters, list):
raise TypeError("Expected argument 'filters' to be a list")
pulumi.set(__self__, "filters", filters)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if ip_address_type and not isinstance(ip_address_type, str):
raise TypeError("Expected argument 'ip_address_type' to be a str")
pulumi.set(__self__, "ip_address_type", ip_address_type)
if network_interface_ids and not isinstance(network_interface_ids, list):
raise TypeError("Expected argument 'network_interface_ids' to be a list")
pulumi.set(__self__, "network_interface_ids", network_interface_ids)
if owner_id and not isinstance(owner_id, str):
raise TypeError("Expected argument 'owner_id' to be a str")
pulumi.set(__self__, "owner_id", owner_id)
if policy and not isinstance(policy, str):
raise TypeError("Expected argument 'policy' to be a str")
pulumi.set(__self__, "policy", policy)
if prefix_list_id and not isinstance(prefix_list_id, str):
raise TypeError("Expected argument 'prefix_list_id' to be a str")
pulumi.set(__self__, "prefix_list_id", prefix_list_id)
if private_dns_enabled and not isinstance(private_dns_enabled, bool):
raise TypeError("Expected argument 'private_dns_enabled' to be a bool")
pulumi.set(__self__, "private_dns_enabled", private_dns_enabled)
if requester_managed and not isinstance(requester_managed, bool):
raise TypeError("Expected argument 'requester_managed' to be a bool")
pulumi.set(__self__, "requester_managed", requester_managed)
if route_table_ids and not isinstance(route_table_ids, list):
raise TypeError("Expected argument 'route_table_ids' to be a list")
pulumi.set(__self__, "route_table_ids", route_table_ids)
if security_group_ids and not isinstance(security_group_ids, list):
raise TypeError("Expected argument 'security_group_ids' to be a list")
pulumi.set(__self__, "security_group_ids", security_group_ids)
if service_name and not isinstance(service_name, str):
raise TypeError("Expected argument 'service_name' to be a str")
pulumi.set(__self__, "service_name", service_name)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if subnet_ids and not isinstance(subnet_ids, list):
raise TypeError("Expected argument 'subnet_ids' to be a list")
pulumi.set(__self__, "subnet_ids", subnet_ids)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if vpc_endpoint_type and not isinstance(vpc_endpoint_type, str):
raise TypeError("Expected argument 'vpc_endpoint_type' to be a str")
pulumi.set(__self__, "vpc_endpoint_type", vpc_endpoint_type)
if vpc_id and not isinstance(vpc_id, str):
raise TypeError("Expected argument 'vpc_id' to be a str")
pulumi.set(__self__, "vpc_id", vpc_id)
@property
@pulumi.getter
def arn(self) -> str:
"""
ARN of the VPC endpoint.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="cidrBlocks")
def cidr_blocks(self) -> Sequence[str]:
"""
List of CIDR blocks for the exposed AWS service. Applicable for endpoints of type `Gateway`.
"""
return pulumi.get(self, "cidr_blocks")
@property
@pulumi.getter(name="dnsEntries")
def dns_entries(self) -> Sequence['outputs.GetVpcEndpointDnsEntryResult']:
"""
DNS entries for the VPC Endpoint. Applicable for endpoints of type `Interface`. DNS blocks are documented below.
"""
return pulumi.get(self, "dns_entries")
@property
@pulumi.getter(name="dnsOptions")
def dns_options(self) -> Sequence['outputs.GetVpcEndpointDnsOptionResult']:
return pulumi.get(self, "dns_options")
@property
@pulumi.getter
def filters(self) -> Optional[Sequence['outputs.GetVpcEndpointFilterResult']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddressType")
def ip_address_type(self) -> str:
return pulumi.get(self, "ip_address_type")
@property
@pulumi.getter(name="networkInterfaceIds")
def network_interface_ids(self) -> Sequence[str]:
"""
One or more network interfaces for the VPC Endpoint. Applicable for endpoints of type `Interface`.
"""
return pulumi.get(self, "network_interface_ids")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> str:
"""
ID of the AWS account that owns the VPC endpoint.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter
def policy(self) -> str:
"""
Policy document associated with the VPC Endpoint. Applicable for endpoints of type `Gateway`.
"""
return pulumi.get(self, "policy")
@property
@pulumi.getter(name="prefixListId")
def prefix_list_id(self) -> str:
"""
Prefix list ID of the exposed AWS service. Applicable for endpoints of type `Gateway`.
"""
return pulumi.get(self, "prefix_list_id")
@property
@pulumi.getter(name="privateDnsEnabled")
def private_dns_enabled(self) -> bool:
"""
Whether or not the VPC is associated with a private hosted zone - `true` or `false`. Applicable for endpoints of type `Interface`.
"""
return pulumi.get(self, "private_dns_enabled")
@property
@pulumi.getter(name="requesterManaged")
def requester_managed(self) -> bool:
"""
Whether or not the VPC Endpoint is being managed by its service - `true` or `false`.
"""
return pulumi.get(self, "requester_managed")
@property
@pulumi.getter(name="routeTableIds")
def route_table_ids(self) -> Sequence[str]:
"""
One or more route tables associated with the VPC Endpoint. Applicable for endpoints of type `Gateway`.
"""
return pulumi.get(self, "route_table_ids")
@property
@pulumi.getter(name="securityGroupIds")
def security_group_ids(self) -> Sequence[str]:
"""
One or more security groups associated with the network interfaces. Applicable for endpoints of type `Interface`.
"""
return pulumi.get(self, "security_group_ids")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> str:
return pulumi.get(self, "service_name")
@property
@pulumi.getter
def state(self) -> str:
return pulumi.get(self, "state")
@property
@pulumi.getter(name="subnetIds")
def subnet_ids(self) -> Sequence[str]:
"""
One or more subnets in which the VPC Endpoint is located. Applicable for endpoints of type `Interface`.
"""
return pulumi.get(self, "subnet_ids")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="vpcEndpointType")
def vpc_endpoint_type(self) -> str:
"""
VPC Endpoint type, `Gateway` or `Interface`.
"""
return pulumi.get(self, "vpc_endpoint_type")
@property
@pulumi.getter(name="vpcId")
def vpc_id(self) -> str:
return pulumi.get(self, "vpc_id")
class AwaitableGetVpcEndpointResult(GetVpcEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetVpcEndpointResult(
arn=self.arn,
cidr_blocks=self.cidr_blocks,
dns_entries=self.dns_entries,
dns_options=self.dns_options,
filters=self.filters,
id=self.id,
ip_address_type=self.ip_address_type,
network_interface_ids=self.network_interface_ids,
owner_id=self.owner_id,
policy=self.policy,
prefix_list_id=self.prefix_list_id,
private_dns_enabled=self.private_dns_enabled,
requester_managed=self.requester_managed,
route_table_ids=self.route_table_ids,
security_group_ids=self.security_group_ids,
service_name=self.service_name,
state=self.state,
subnet_ids=self.subnet_ids,
tags=self.tags,
vpc_endpoint_type=self.vpc_endpoint_type,
vpc_id=self.vpc_id)
def get_vpc_endpoint(filters: Optional[Sequence[pulumi.InputType['GetVpcEndpointFilterArgs']]] = None,
id: Optional[str] = None,
service_name: Optional[str] = None,
state: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
vpc_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVpcEndpointResult:
"""
The VPC Endpoint data source provides details about
a specific VPC endpoint.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
s3 = aws.ec2.get_vpc_endpoint(vpc_id=aws_vpc["foo"]["id"],
service_name="com.amazonaws.us-west-2.s3")
private_s3 = aws.ec2.VpcEndpointRouteTableAssociation("privateS3",
vpc_endpoint_id=s3.id,
route_table_id=aws_route_table["private"]["id"])
```
:param Sequence[pulumi.InputType['GetVpcEndpointFilterArgs']] filters: Custom filter block as described below.
:param str id: ID of the specific VPC Endpoint to retrieve.
:param str service_name: Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws.<region>.<service>` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker.<region>.notebook`).
:param str state: State of the specific VPC Endpoint to retrieve.
:param Mapping[str, str] tags: Map of tags, each pair of which must exactly match
a pair on the specific VPC Endpoint to retrieve.
:param str vpc_id: ID of the VPC in which the specific VPC Endpoint is used.
More complex filters can be expressed using one or more `filter` sub-blocks,
which take the following arguments:
"""
__args__ = dict()
__args__['filters'] = filters
__args__['id'] = id
__args__['serviceName'] = service_name
__args__['state'] = state
__args__['tags'] = tags
__args__['vpcId'] = vpc_id
opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)
__ret__ = pulumi.runtime.invoke('aws:ec2/getVpcEndpoint:getVpcEndpoint', __args__, opts=opts, typ=GetVpcEndpointResult).value
return AwaitableGetVpcEndpointResult(
arn=pulumi.get(__ret__, 'arn'),
cidr_blocks=pulumi.get(__ret__, 'cidr_blocks'),
dns_entries=pulumi.get(__ret__, 'dns_entries'),
dns_options=pulumi.get(__ret__, 'dns_options'),
filters=pulumi.get(__ret__, 'filters'),
id=pulumi.get(__ret__, 'id'),
ip_address_type=pulumi.get(__ret__, 'ip_address_type'),
network_interface_ids=pulumi.get(__ret__, 'network_interface_ids'),
owner_id=pulumi.get(__ret__, 'owner_id'),
policy=pulumi.get(__ret__, 'policy'),
prefix_list_id=pulumi.get(__ret__, 'prefix_list_id'),
private_dns_enabled=pulumi.get(__ret__, 'private_dns_enabled'),
requester_managed=pulumi.get(__ret__, 'requester_managed'),
route_table_ids=pulumi.get(__ret__, 'route_table_ids'),
security_group_ids=pulumi.get(__ret__, 'security_group_ids'),
service_name=pulumi.get(__ret__, 'service_name'),
state=pulumi.get(__ret__, 'state'),
subnet_ids=pulumi.get(__ret__, 'subnet_ids'),
tags=pulumi.get(__ret__, 'tags'),
vpc_endpoint_type=pulumi.get(__ret__, 'vpc_endpoint_type'),
vpc_id=pulumi.get(__ret__, 'vpc_id'))
@_utilities.lift_output_func(get_vpc_endpoint)
def get_vpc_endpoint_output(filters: Optional[pulumi.Input[Optional[Sequence[pulumi.InputType['GetVpcEndpointFilterArgs']]]]] = None,
id: Optional[pulumi.Input[Optional[str]]] = None,
service_name: Optional[pulumi.Input[Optional[str]]] = None,
state: Optional[pulumi.Input[Optional[str]]] = None,
tags: Optional[pulumi.Input[Optional[Mapping[str, str]]]] = None,
vpc_id: Optional[pulumi.Input[Optional[str]]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetVpcEndpointResult]:
"""
The VPC Endpoint data source provides details about
a specific VPC endpoint.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
s3 = aws.ec2.get_vpc_endpoint(vpc_id=aws_vpc["foo"]["id"],
service_name="com.amazonaws.us-west-2.s3")
private_s3 = aws.ec2.VpcEndpointRouteTableAssociation("privateS3",
vpc_endpoint_id=s3.id,
route_table_id=aws_route_table["private"]["id"])
```
:param Sequence[pulumi.InputType['GetVpcEndpointFilterArgs']] filters: Custom filter block as described below.
:param str id: ID of the specific VPC Endpoint to retrieve.
:param str service_name: Service name of the specific VPC Endpoint to retrieve. For AWS services the service name is usually in the form `com.amazonaws.<region>.<service>` (the SageMaker Notebook service is an exception to this rule, the service name is in the form `aws.sagemaker.<region>.notebook`).
:param str state: State of the specific VPC Endpoint to retrieve.
:param Mapping[str, str] tags: Map of tags, each pair of which must exactly match
a pair on the specific VPC Endpoint to retrieve.
:param str vpc_id: ID of the VPC in which the specific VPC Endpoint is used.
More complex filters can be expressed using one or more `filter` sub-blocks,
which take the following arguments:
"""
...
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.