hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c3e28b9b4e22d45e6bc49dc9b089760647d975c
| 969
|
py
|
Python
|
exercises/en/exc_03_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 2,085
|
2019-04-17T13:10:40.000Z
|
2022-03-30T21:51:46.000Z
|
exercises/en/exc_03_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 79
|
2019-04-18T14:42:55.000Z
|
2022-03-07T08:15:43.000Z
|
exercises/en/exc_03_07.py
|
Jette16/spacy-course
|
32df0c8f6192de6c9daba89740a28c0537e4d6a0
|
[
"MIT"
] | 361
|
2019-04-17T13:34:32.000Z
|
2022-03-28T04:42:45.000Z
|
import spacy
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
nlp = spacy.load("en_core_web_sm")
animals = ["Golden Retriever", "cat", "turtle", "Rattus norvegicus"]
animal_patterns = list(nlp.pipe(animals))
print("animal_patterns:", animal_patterns)
matcher = PhraseMatcher(nlp.vocab)
matcher.add("ANIMAL", None, *animal_patterns)
# Define the custom component
def animal_component(doc):
# Apply the matcher to the doc
matches = ____
# Create a Span for each match and assign the label "ANIMAL"
spans = [Span(____, ____, ___, label=____) for match_id, start, end in matches]
# Overwrite the doc.ents with the matched spans
doc.ents = spans
return doc
# Add the component to the pipeline after the "ner" component
____.____(____, ____=____)
print(nlp.pipe_names)
# Process the text and print the text and label for the doc.ents
doc = nlp("I have a cat and a Golden Retriever")
print([(____, ____) for ent in ____])
| 32.3
| 83
| 0.734778
|
import spacy
from spacy.matcher import PhraseMatcher
from spacy.tokens import Span
nlp = spacy.load("en_core_web_sm")
animals = ["Golden Retriever", "cat", "turtle", "Rattus norvegicus"]
animal_patterns = list(nlp.pipe(animals))
print("animal_patterns:", animal_patterns)
matcher = PhraseMatcher(nlp.vocab)
matcher.add("ANIMAL", None, *animal_patterns)
def animal_component(doc):
matches = ____
spans = [Span(____, ____, ___, label=____) for match_id, start, end in matches]
doc.ents = spans
return doc
____.____(____, ____=____)
print(nlp.pipe_names)
doc = nlp("I have a cat and a Golden Retriever")
print([(____, ____) for ent in ____])
| true
| true
|
1c3e28f5a566ed14d744c671218a676d561e1fb3
| 2,754
|
py
|
Python
|
tests/tag/test_tag_sticker.py
|
annihilatorrrr/sticker-finder
|
873468f8de26cc32d1de9b688140569b8086ab5b
|
[
"MIT"
] | 82
|
2018-11-13T05:39:44.000Z
|
2022-01-18T17:08:44.000Z
|
tests/tag/test_tag_sticker.py
|
annihilatorrrr/sticker-finder
|
873468f8de26cc32d1de9b688140569b8086ab5b
|
[
"MIT"
] | 25
|
2018-12-02T18:45:52.000Z
|
2022-03-21T22:54:19.000Z
|
tests/tag/test_tag_sticker.py
|
annihilatorrrr/sticker-finder
|
873468f8de26cc32d1de9b688140569b8086ab5b
|
[
"MIT"
] | 23
|
2019-01-22T20:04:50.000Z
|
2022-02-01T14:57:28.000Z
|
"""Test the normal tagging process."""
from tests.helper import assert_sticker_contains_tags
from stickerfinder.models import Tag
from stickerfinder.logic.tag import tag_sticker
def test_add_tags(session, user, sticker_set):
"""Add new tags to a sticker."""
for sticker in sticker_set.stickers:
# Create a new tag for each sticker
tag_sticker(session, f"tag-{sticker.file_id}", sticker, user)
session.commit()
# Ensure that the mallicious user actually replaced the tag
for sticker in sticker_set.stickers:
assert sticker.tags[0].name == f"tag-{sticker.file_id}"
# User got a new change
assert len(user.changes) == len(sticker_set.stickers)
for sticker in sticker_set.stickers:
# Create a new tag for each sticker
tag_sticker(session, f"tag-2-{sticker.file_id}", sticker, user)
session.commit()
# Ensure that the mallicious user actually replaced the tag
for sticker in sticker_set.stickers:
assert_sticker_contains_tags(
sticker, [f"tag-{sticker.file_id}", f"tag-2-{sticker.file_id}"]
)
assert len(user.changes) == len(sticker_set.stickers) * 2
def test_replace_sticker_tags(session, user, sticker_set, tags):
"""Replace tags of a sticker."""
for sticker in sticker_set.stickers:
# Replace the existing tag
tag_sticker(session, f"new-tag-{sticker.file_id}", sticker, user, replace=True)
session.commit()
# Ensure the tag has been replaced
for sticker in sticker_set.stickers:
assert len(sticker.tags) == 1
assert sticker.tags[0].name == f"new-tag-{sticker.file_id}"
assert len(user.changes) == len(sticker_set.stickers) * 2
def test_add_duplicate_sticker_tags_in_other_language(session, user, sticker_set):
"""Add the same tag to a sticker, but in different languages.
The tag should be converted from international to default,
if somebody tags in default, but not the other way around.
"""
# User should tag in not default language first
user.international = True
sticker = sticker_set.stickers[0]
tag_sticker(session, "language-test-tag", sticker, user)
session.commit()
tag = session.query(Tag).get("language-test-tag")
assert tag.international
# Add same tag to sticker, but this time in default language
user.international = False
tag_sticker(session, "language-test-tag", sticker, user)
assert not tag.international
assert len(user.changes) == 1
# Now tag in the not default language again. This shouldn't change anything now
user.international = True
tag_sticker(session, "language-test-tag", sticker, user)
assert not tag.international
assert len(user.changes) == 1
| 33.585366
| 87
| 0.702251
|
from tests.helper import assert_sticker_contains_tags
from stickerfinder.models import Tag
from stickerfinder.logic.tag import tag_sticker
def test_add_tags(session, user, sticker_set):
for sticker in sticker_set.stickers:
tag_sticker(session, f"tag-{sticker.file_id}", sticker, user)
session.commit()
for sticker in sticker_set.stickers:
assert sticker.tags[0].name == f"tag-{sticker.file_id}"
assert len(user.changes) == len(sticker_set.stickers)
for sticker in sticker_set.stickers:
tag_sticker(session, f"tag-2-{sticker.file_id}", sticker, user)
session.commit()
for sticker in sticker_set.stickers:
assert_sticker_contains_tags(
sticker, [f"tag-{sticker.file_id}", f"tag-2-{sticker.file_id}"]
)
assert len(user.changes) == len(sticker_set.stickers) * 2
def test_replace_sticker_tags(session, user, sticker_set, tags):
for sticker in sticker_set.stickers:
tag_sticker(session, f"new-tag-{sticker.file_id}", sticker, user, replace=True)
session.commit()
for sticker in sticker_set.stickers:
assert len(sticker.tags) == 1
assert sticker.tags[0].name == f"new-tag-{sticker.file_id}"
assert len(user.changes) == len(sticker_set.stickers) * 2
def test_add_duplicate_sticker_tags_in_other_language(session, user, sticker_set):
user.international = True
sticker = sticker_set.stickers[0]
tag_sticker(session, "language-test-tag", sticker, user)
session.commit()
tag = session.query(Tag).get("language-test-tag")
assert tag.international
user.international = False
tag_sticker(session, "language-test-tag", sticker, user)
assert not tag.international
assert len(user.changes) == 1
user.international = True
tag_sticker(session, "language-test-tag", sticker, user)
assert not tag.international
assert len(user.changes) == 1
| true
| true
|
1c3e2932ba0ff0ebb282143338f70a8062bf3935
| 914
|
py
|
Python
|
build/navigation/costmap_2d/cmake/costmap_2d-genmsg-context.py
|
lty1994/ros_project
|
d55ce07c592d545f9a43330fa6bf96af6651575f
|
[
"BSD-2-Clause"
] | null | null | null |
build/navigation/costmap_2d/cmake/costmap_2d-genmsg-context.py
|
lty1994/ros_project
|
d55ce07c592d545f9a43330fa6bf96af6651575f
|
[
"BSD-2-Clause"
] | null | null | null |
build/navigation/costmap_2d/cmake/costmap_2d-genmsg-context.py
|
lty1994/ros_project
|
d55ce07c592d545f9a43330fa6bf96af6651575f
|
[
"BSD-2-Clause"
] | null | null | null |
# generated from genmsg/cmake/pkg-genmsg.context.in
messages_str = "/home/autolabor/catkin_ws/src/navigation/costmap_2d/msg/VoxelGrid.msg"
services_str = ""
pkg_name = "costmap_2d"
dependencies_str = "std_msgs;geometry_msgs;map_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "costmap_2d;/home/autolabor/catkin_ws/src/navigation/costmap_2d/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;map_msgs;/opt/ros/kinetic/share/map_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;nav_msgs;/opt/ros/kinetic/share/nav_msgs/cmake/../msg;actionlib_msgs;/opt/ros/kinetic/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| 76.166667
| 444
| 0.794311
|
messages_str = "/home/autolabor/catkin_ws/src/navigation/costmap_2d/msg/VoxelGrid.msg"
services_str = ""
pkg_name = "costmap_2d"
dependencies_str = "std_msgs;geometry_msgs;map_msgs"
langs = "gencpp;geneus;genlisp;gennodejs;genpy"
dep_include_paths_str = "costmap_2d;/home/autolabor/catkin_ws/src/navigation/costmap_2d/msg;std_msgs;/opt/ros/kinetic/share/std_msgs/cmake/../msg;geometry_msgs;/opt/ros/kinetic/share/geometry_msgs/cmake/../msg;map_msgs;/opt/ros/kinetic/share/map_msgs/cmake/../msg;sensor_msgs;/opt/ros/kinetic/share/sensor_msgs/cmake/../msg;nav_msgs;/opt/ros/kinetic/share/nav_msgs/cmake/../msg;actionlib_msgs;/opt/ros/kinetic/share/actionlib_msgs/cmake/../msg"
PYTHON_EXECUTABLE = "/usr/bin/python"
package_has_static_sources = '' == 'TRUE'
genmsg_check_deps_script = "/opt/ros/kinetic/share/genmsg/cmake/../../../lib/genmsg/genmsg_check_deps.py"
| true
| true
|
1c3e29688e081410e4d2cebe46cf7c935368e8e9
| 4,017
|
py
|
Python
|
websites_metrics_collector/communication/webpages_fetcher.py
|
antoniodimariano/websites_metrics_collector
|
5113a680612b126005ac7f9f52ed35d26b806ea0
|
[
"Apache-2.0"
] | null | null | null |
websites_metrics_collector/communication/webpages_fetcher.py
|
antoniodimariano/websites_metrics_collector
|
5113a680612b126005ac7f9f52ed35d26b806ea0
|
[
"Apache-2.0"
] | null | null | null |
websites_metrics_collector/communication/webpages_fetcher.py
|
antoniodimariano/websites_metrics_collector
|
5113a680612b126005ac7f9f52ed35d26b806ea0
|
[
"Apache-2.0"
] | null | null | null |
import aiohttp
import asyncio
import time
from websites_metrics_collector.helpers.regex_functions import check_patterns_in_webpage
from collections import namedtuple
from typing import Tuple, NamedTuple
WebCheck = namedtuple('WebCheck', ['url', 'http_status', 'elapsed_time', 'pattern_verified'])
async def fetch_url_and_check_pattern(session: aiohttp.client.ClientSession, url: str,
patter_to_verify: list) -> NamedTuple:
"""
This function fetches the given url and stores the HTML content as text, the HTTP status and
checks if the given pattern_to_verify exists in the HTML content fetched.
To track the elapsed time for each request time.monotonic() is used ( https://www.python.org/dev/peps/pep-0418/ )
time.monotonic() method of the time module in Python is used to get the value of a monotonic clock.
A monotonic clock is a clock that can not go backwards. Using a time.monotonic() avoid falling into issues that can
arise with time.time(). In fact, time.time() looks at the system clock that can be changed by the user and can produce
values that go forwards and backwards, resulting in unexpected behaviour.
:param session: an already instantiated aiohttp.client.ClientSession
:param url: http://cloudbased.me
:param patter_to_verify: ['Antonio Di Mariano', 'Cloud']
:return: a NamedTuple like WebCheck(url='http://cloudbased.me', http_status=200, elapsed_time=0.5274228749999998, pattern_verified=True)
"""
try:
start = time.monotonic()
async with session.get(url) as response:
elapsed_time = time.monotonic() - start
html_content = await response.text()
result = WebCheck(url=url, http_status=response.status, elapsed_time=elapsed_time,
pattern_verified=check_patterns_in_webpage(html_content, patterns=patter_to_verify))
return result
except Exception as error: #pragma no cover
print(f"HTTP error occurred: {error}")
async def fetch_all_urls(session: aiohttp.client.ClientSession, urls: list) -> Tuple:
"""
This function processes the list of the given url and for each value in the tuple
an asyncio Task is created to schedule coroutines concurrently.
Two parameters are passed: url[0] is the url, and url[1] is a list of patterns to verify against the fetched HTML content.
:param session: an already instantiated aiohttp.client.ClientSession
:param urls: a list of tuple[('http://motoguzzi.com',['twitter','Antonio']),('http://ferrari.com',['ferrari','url'])]
:return: a NamedTuple like [WebCheck(url='http://motoguzzi.com', http_status=200, elapsed_time=2.43176225, pattern_verified=False), WebCheck(url='http://ferrari.com', http_status=200, elapsed_time=1.416772042, pattern_verified=False)]
"""
tasks = []
for url in urls:
# The asyncio.create_task() function to run coroutines concurrently as asyncio Tasks.
# Tasks are used to schedule coroutines concurrently.
# When a coroutine is wrapped into a Task with functions like asyncio.create_task() the coroutine
# is automatically scheduled to run soon
#
# https://docs.python.org/3/library/asyncio-task.html#id4
task = asyncio.create_task(fetch_url_and_check_pattern(session, url[0], url[1]))
tasks.append(task)
results = await asyncio.gather(*tasks)
return results
async def fetch_list_of_urls(list_of_urls: list) -> tuple:
"""
This function use a Context manager to create/destroy a ClientSession
with aiohttp.ClientSession() does not perform I/O when entering the block,
but at the end of it, it will ensure all remaining resources are closed correctly.
https://docs.aiohttp.org/en/latest/http_request_lifecycle.html
:param list_of_urls:
:return:
"""
async with aiohttp.ClientSession() as session:
results = await fetch_all_urls(session, list_of_urls)
return results
| 51.5
| 238
| 0.717202
|
import aiohttp
import asyncio
import time
from websites_metrics_collector.helpers.regex_functions import check_patterns_in_webpage
from collections import namedtuple
from typing import Tuple, NamedTuple
WebCheck = namedtuple('WebCheck', ['url', 'http_status', 'elapsed_time', 'pattern_verified'])
async def fetch_url_and_check_pattern(session: aiohttp.client.ClientSession, url: str,
patter_to_verify: list) -> NamedTuple:
try:
start = time.monotonic()
async with session.get(url) as response:
elapsed_time = time.monotonic() - start
html_content = await response.text()
result = WebCheck(url=url, http_status=response.status, elapsed_time=elapsed_time,
pattern_verified=check_patterns_in_webpage(html_content, patterns=patter_to_verify))
return result
except Exception as error:
print(f"HTTP error occurred: {error}")
async def fetch_all_urls(session: aiohttp.client.ClientSession, urls: list) -> Tuple:
tasks = []
for url in urls:
task = asyncio.create_task(fetch_url_and_check_pattern(session, url[0], url[1]))
tasks.append(task)
results = await asyncio.gather(*tasks)
return results
async def fetch_list_of_urls(list_of_urls: list) -> tuple:
async with aiohttp.ClientSession() as session:
results = await fetch_all_urls(session, list_of_urls)
return results
| true
| true
|
1c3e29d0b0480d986c373efdc3b0b54efb1318d0
| 495
|
py
|
Python
|
sky.py
|
Evolution0/voxelcraft
|
06251870ea668cc54520947003f07e62ec736237
|
[
"MIT"
] | 3
|
2021-04-10T21:10:56.000Z
|
2021-04-18T12:08:45.000Z
|
sky.py
|
Evolution0/voxelcraft
|
06251870ea668cc54520947003f07e62ec736237
|
[
"MIT"
] | null | null | null |
sky.py
|
Evolution0/voxelcraft
|
06251870ea668cc54520947003f07e62ec736237
|
[
"MIT"
] | null | null | null |
from ursina import *
# 9. Create sky
class Sky(Entity):
def __init__(self):
super().__init__(
parent = scene, # Specifies parent of sky so it scales properly
model = 'sphere', # Specifies sky model
texture = 'assets/sky.jpg', # Sky texture
scale = 1000, # Increases size drastically
double_sided = True # See the sphere when you are in it
)
| 38.076923
| 92
| 0.50303
|
from ursina import *
class Sky(Entity):
def __init__(self):
super().__init__(
parent = scene,
model = 'sphere',
texture = 'assets/sky.jpg',
scale = 1000,
double_sided = True
)
| true
| true
|
1c3e29e29ac16f488b5df61e155bff9bc5c1340c
| 877
|
py
|
Python
|
galileo/framework/pytorch/python/dataset/__init__.py
|
YaoPu2021/galileo
|
0ebee2052bf78205f93f8cbbe0e2884095dd7af7
|
[
"Apache-2.0"
] | 115
|
2021-09-09T03:01:58.000Z
|
2022-03-30T10:46:26.000Z
|
galileo/framework/pytorch/python/dataset/__init__.py
|
Hacky-DH/galileo
|
e4d5021f0287dc879730dfa287b9a056f152f712
|
[
"Apache-2.0"
] | 1
|
2021-12-09T07:34:41.000Z
|
2021-12-20T06:24:27.000Z
|
galileo/framework/pytorch/python/dataset/__init__.py
|
Hacky-DH/galileo
|
e4d5021f0287dc879730dfa287b9a056f152f712
|
[
"Apache-2.0"
] | 28
|
2021-09-10T08:47:20.000Z
|
2022-03-17T07:29:26.000Z
|
# Copyright 2020 JD.com, Inc. Galileo Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from . import (
base_dataset,
batched_dataloader,
vertex_dataset,
edge_dataset,
dataset_pipeline,
textline_dataset,
range_dataset,
tensor_dataset,
)
| 33.730769
| 80
| 0.676169
|
from . import (
base_dataset,
batched_dataloader,
vertex_dataset,
edge_dataset,
dataset_pipeline,
textline_dataset,
range_dataset,
tensor_dataset,
)
| true
| true
|
1c3e2a7a0cc37de06ca731cbcf5536d7446fb1d5
| 2,120
|
py
|
Python
|
examples/rigidbody/plot_rigidbody.py
|
certik/pydy
|
d201b75d3e8fd8295b375e52eb4ce4c1f35adfb4
|
[
"BSD-3-Clause"
] | 1
|
2016-05-09T06:57:10.000Z
|
2016-05-09T06:57:10.000Z
|
examples/rigidbody/plot_rigidbody.py
|
certik/pydy
|
d201b75d3e8fd8295b375e52eb4ce4c1f35adfb4
|
[
"BSD-3-Clause"
] | null | null | null |
examples/rigidbody/plot_rigidbody.py
|
certik/pydy
|
d201b75d3e8fd8295b375e52eb4ce4c1f35adfb4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
import rigidbody_lib as rb
from scipy.integrate import odeint
from numpy import array, arange, zeros
# Dimensions of rigid body in the three body fixed directions
# Following are the dimensions of an iPhone 3G taken from apple.com
h = 0.1155 # meters in the 1 direction
w = 0.0621 # meters in the 2 direction
d = 0.0123 # meters in the 3 direction
m = 0.135 # kilograms
g = 0.0081 # meters / sec**2
I11 = m*(w**2 + d**2)/12.
I22 = m*(h**2 + d**2)/12.
I33 = m*(h**2 + w**2)/12.
params = [m, 0, I11, I22, I33]
# states = [q1, q2, q3, q4, q5, q6, u1, u2, u3, u4, u5, u6]
# q1, q2, q3 are Body Fixed (Euler) 3-1-2 angles
# q4, q5, q6 are x, y, z Inertial positions
# u1, ..., u6 are the generalized speeds.
# Gravity is in the positive z direction, defined to be downwards
# Specify the initial conditions of the coordinates and the generalized speeds
q0 = [0.0, 0.0, 0.0, .05, 0., 0.]
# Intermediate inertia axis is the body-2 axis, exhibits instability
u0 = [0.0, 2.0, 0.15, 0., 0., 0.0]
x0 = q0 + u0
# Integration time
ti = 0.0
ts = 0.01
tf = 40.0
t = arange(ti, tf+ts, ts)
n = len(t)
# Integrate the differential equations
x = odeint(rb.eoms, x0, t, args = (params,))
# Animate using Visual-Python
AO = zeros((n,3))
A1 = zeros((n,3))
A3 = zeros((n,3))
# Animation playback speed multiplier (1 == realtime)
k = 1.0
for i, state in enumerate(x[:,:6]):
AO[i], A1[i], A3[i] = rb.anim(state, params)
A1[i] *= h
from visual import box, display, rate, arrow
black = (0,0,0)
red = (1, 0, 0)
green = (0, 1, 0)
blue = (0, 0, 1)
scene = display(title='Rigid body animation @ %0.2f realtime'%k, width=800, height=800, up=(0,0,-1),\
uniform=1, background=black, forward=(1,0,0))
N = [arrow(pos=(0,0,0),axis=(.1,0,0),length=0.01,color=red),
arrow(pos=(0,0,0),axis=(0,.1,0),length=0.01,color=green),
arrow(pos=(0,0,0),axis=(0,0,.1),length=0.01,color=blue)]
body = box(pos=AO[0], axis=A1[0], up=A3[0],\
height=d, width=w, color=red)
i = 1
while i<n:
body.pos = AO[i]
body.axis = A1[i]
body.up = A3[i]
i += 1
rate(k/ts)
| 29.444444
| 101
| 0.616981
|
import rigidbody_lib as rb
from scipy.integrate import odeint
from numpy import array, arange, zeros
h = 0.1155
w = 0.0621
d = 0.0123
m = 0.135
g = 0.0081
I11 = m*(w**2 + d**2)/12.
I22 = m*(h**2 + d**2)/12.
I33 = m*(h**2 + w**2)/12.
params = [m, 0, I11, I22, I33]
q0 = [0.0, 0.0, 0.0, .05, 0., 0.]
u0 = [0.0, 2.0, 0.15, 0., 0., 0.0]
x0 = q0 + u0
ti = 0.0
ts = 0.01
tf = 40.0
t = arange(ti, tf+ts, ts)
n = len(t)
x = odeint(rb.eoms, x0, t, args = (params,))
AO = zeros((n,3))
A1 = zeros((n,3))
A3 = zeros((n,3))
k = 1.0
for i, state in enumerate(x[:,:6]):
AO[i], A1[i], A3[i] = rb.anim(state, params)
A1[i] *= h
from visual import box, display, rate, arrow
black = (0,0,0)
red = (1, 0, 0)
green = (0, 1, 0)
blue = (0, 0, 1)
scene = display(title='Rigid body animation @ %0.2f realtime'%k, width=800, height=800, up=(0,0,-1),\
uniform=1, background=black, forward=(1,0,0))
N = [arrow(pos=(0,0,0),axis=(.1,0,0),length=0.01,color=red),
arrow(pos=(0,0,0),axis=(0,.1,0),length=0.01,color=green),
arrow(pos=(0,0,0),axis=(0,0,.1),length=0.01,color=blue)]
body = box(pos=AO[0], axis=A1[0], up=A3[0],\
height=d, width=w, color=red)
i = 1
while i<n:
body.pos = AO[i]
body.axis = A1[i]
body.up = A3[i]
i += 1
rate(k/ts)
| true
| true
|
1c3e2b451501a4e34182c8af139665bf7d618113
| 497
|
py
|
Python
|
practice69.py
|
ikramulkayes/Python_season2
|
d057460d07c5d2d218ecd52e08c1d355add44df2
|
[
"MIT"
] | null | null | null |
practice69.py
|
ikramulkayes/Python_season2
|
d057460d07c5d2d218ecd52e08c1d355add44df2
|
[
"MIT"
] | null | null | null |
practice69.py
|
ikramulkayes/Python_season2
|
d057460d07c5d2d218ecd52e08c1d355add44df2
|
[
"MIT"
] | null | null | null |
class Marks:
def __init__(self,num = None):
self.mark = num
def __add__(self,other):
obj = Marks()
obj.mark = self.mark + other.mark
return obj
Q1 = Marks(int(input("Quiz 1 (out of 10): ")))
Q2 = Marks(int(input("Quiz 2 (out of 10): ")))
Lab = Marks(int(input("Lab (out of 30): ")))
Mid = Marks(int(input("Mid (out of 20): ")))
Final = Marks(int(input("Final (out of 30): ")))
total = Q1 + Q2 + Lab + Mid + Final
print("Total marks: {}".format(total.mark))
| 33.133333
| 48
| 0.581489
|
class Marks:
def __init__(self,num = None):
self.mark = num
def __add__(self,other):
obj = Marks()
obj.mark = self.mark + other.mark
return obj
Q1 = Marks(int(input("Quiz 1 (out of 10): ")))
Q2 = Marks(int(input("Quiz 2 (out of 10): ")))
Lab = Marks(int(input("Lab (out of 30): ")))
Mid = Marks(int(input("Mid (out of 20): ")))
Final = Marks(int(input("Final (out of 30): ")))
total = Q1 + Q2 + Lab + Mid + Final
print("Total marks: {}".format(total.mark))
| true
| true
|
1c3e2bbadcd5954813727b42a7702e096675d480
| 6,464
|
py
|
Python
|
snlds/utils.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-13T21:48:52.000Z
|
2022-03-13T21:48:52.000Z
|
snlds/utils.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | null | null | null |
snlds/utils.py
|
shaun95/google-research
|
d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5
|
[
"Apache-2.0"
] | 1
|
2022-03-30T07:20:29.000Z
|
2022-03-30T07:20:29.000Z
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to help implement switching non-linear dynamical systems."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
layers = tf.keras.layers
FLOAT_TYPE = tf.float32
def build_birnn(rnn_type, rnn_hidden_dim):
"""Helper function for building bidirectional RNN."""
rnn_type = rnn_type.lower()
if rnn_type == "gru":
rnn_unit = layers.GRU(units=rnn_hidden_dim,
return_sequences=True)
elif rnn_type == "lstm":
rnn_unit = layers.LSTM(units=rnn_hidden_dim,
return_sequences=True)
return layers.Bidirectional(rnn_unit)
def build_dense_network(layer_sizes,
layer_activations,
kernel_initializer="glorot_uniform",
bias_initializer="random_uniform"):
"""Helper function for building a multi-layer network."""
nets = tf.keras.models.Sequential()
for lsize, activation in zip(layer_sizes, layer_activations):
nets.add(layers.Dense(
lsize,
activation=activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer))
return nets
def build_rnn_cell(rnn_type, rnn_hidden_dim):
"""Helper function for building RNN cells."""
rnn_type = rnn_type.lower()
if rnn_type == "gru":
rnn_cell = layers.GRUCell(units=rnn_hidden_dim)
elif rnn_type == "lstm":
rnn_cell = layers.LSTMCell(units=rnn_hidden_dim)
elif rnn_type == "simplernn":
rnn_cell = layers.SimpleRNNCell(units=rnn_hidden_dim)
return rnn_cell
def get_posterior_crossentropy(log_posterior, prior_probs):
"""Calculate cross entropy between prior and posterior distributions.
Args:
log_posterior: a `float` `Tensor` of shape [batch_size, num_steps,
num_states].
prior_probs: a `float` `Tensor` of shape [num_states].
Returns:
cross_entropy: a `float` `Tensor` of shape [batch_size].
"""
log_posterior = tf.convert_to_tensor(log_posterior, dtype_hint=FLOAT_TYPE)
prior_probs = tf.convert_to_tensor(prior_probs, dtype_hint=FLOAT_TYPE)
entropy_mat = tf.einsum("ijk, k->ij", log_posterior, prior_probs)
# when it is cross entropy, we want to minimize the cross entropy,
# i.e. we want to maximize the sum(prior_prob * log_posterior)
return tf.reduce_sum(entropy_mat, axis=1)
def normalize_logprob(logmat, axis=-1, temperature=1.):
"""Normalizing log probability with `reduce_logsumexp`."""
logmat = tf.convert_to_tensor(logmat, dtype_hint=FLOAT_TYPE)
logmat = logmat / temperature
normalizer = tf.math.reduce_logsumexp(logmat, axis=axis, keepdims=True)
return logmat - normalizer, normalizer
def tensor_for_ta(input_ta, swap_batch_time=True):
"""Creates a `Tensor` for the input `TensorArray`."""
if swap_batch_time:
res = input_ta.stack()
return tf.transpose(
res,
np.concatenate([[1, 0], np.arange(2, res.shape.ndims)])
)
else:
return input_ta.stack()
def write_updates_to_tas(tensor_arrays, t, tensor_updates):
"""Write updates to corresponding TensorArrays at time step t."""
assert len(tensor_arrays) == len(tensor_updates)
num_updates = len(tensor_updates)
return [tensor_arrays[i].write(t, tensor_updates[i])
for i in range(num_updates)]
def learning_rate_warmup(global_step,
warmup_end_lr,
warmup_start_lr,
warmup_steps):
"""Linear learning rate warm-up."""
p = tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32)
diff = warmup_end_lr - warmup_start_lr
return warmup_start_lr + diff * p
def learning_rate_schedule(global_step,
config):
"""Learning rate schedule with linear warm-up and cosine decay."""
warmup_schedule = learning_rate_warmup(
global_step=global_step,
warmup_end_lr=config.learning_rate,
warmup_start_lr=config.warmup_start_lr,
warmup_steps=config.warmup_steps)
decay_schedule = tf.keras.experimental.CosineDecay(
initial_learning_rate=config.learning_rate,
decay_steps=config.decay_steps - config.warmup_steps,
alpha=config.decay_alpha,
name=None)(tf.math.maximum(global_step - config.warmup_steps, 0))
return tf.cond(global_step < config.warmup_steps,
lambda: warmup_schedule,
lambda: decay_schedule)
def inverse_annealing_learning_rate(global_step,
target_lr,
learning_rate_ramp=1e3,
learning_rate_min=1e-10,
decreasing_learning_rate_ramp=1e4):
"""Inverse annealing learning rate."""
decreasing_gate = 1.0 * tf.pow(
tf.constant(0.66, dtype=tf.float32),
tf.to_float(global_step) / decreasing_learning_rate_ramp)
increasing_gate = (1 - (1 - learning_rate_min) * tf.pow(
tf.constant(0.66, dtype=tf.float32),
tf.to_float(global_step) / learning_rate_ramp))
lr = target_lr * increasing_gate * decreasing_gate + learning_rate_min
return lr
def schedule_exponential_decay(global_step, config, min_val=1e-10,
dtype=tf.float32):
"""Flat and exponential decay schedule."""
global_step = tf.cast(global_step, dtype)
decay_steps = tf.cast(config.decay_steps, dtype)
kickin_steps = tf.cast(config.kickin_steps, dtype)
decay_schedule = (
config.initial_temperature *
config.decay_rate ** (
tf.math.maximum(global_step - kickin_steps, 0.)
/ decay_steps))
temp_schedule = tf.cond(global_step < config.kickin_steps,
lambda: config.initial_temperature,
lambda: tf.maximum(decay_schedule, min_val))
return temp_schedule
| 36.727273
| 76
| 0.691368
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
layers = tf.keras.layers
FLOAT_TYPE = tf.float32
def build_birnn(rnn_type, rnn_hidden_dim):
rnn_type = rnn_type.lower()
if rnn_type == "gru":
rnn_unit = layers.GRU(units=rnn_hidden_dim,
return_sequences=True)
elif rnn_type == "lstm":
rnn_unit = layers.LSTM(units=rnn_hidden_dim,
return_sequences=True)
return layers.Bidirectional(rnn_unit)
def build_dense_network(layer_sizes,
layer_activations,
kernel_initializer="glorot_uniform",
bias_initializer="random_uniform"):
nets = tf.keras.models.Sequential()
for lsize, activation in zip(layer_sizes, layer_activations):
nets.add(layers.Dense(
lsize,
activation=activation,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer))
return nets
def build_rnn_cell(rnn_type, rnn_hidden_dim):
rnn_type = rnn_type.lower()
if rnn_type == "gru":
rnn_cell = layers.GRUCell(units=rnn_hidden_dim)
elif rnn_type == "lstm":
rnn_cell = layers.LSTMCell(units=rnn_hidden_dim)
elif rnn_type == "simplernn":
rnn_cell = layers.SimpleRNNCell(units=rnn_hidden_dim)
return rnn_cell
def get_posterior_crossentropy(log_posterior, prior_probs):
log_posterior = tf.convert_to_tensor(log_posterior, dtype_hint=FLOAT_TYPE)
prior_probs = tf.convert_to_tensor(prior_probs, dtype_hint=FLOAT_TYPE)
entropy_mat = tf.einsum("ijk, k->ij", log_posterior, prior_probs)
return tf.reduce_sum(entropy_mat, axis=1)
def normalize_logprob(logmat, axis=-1, temperature=1.):
logmat = tf.convert_to_tensor(logmat, dtype_hint=FLOAT_TYPE)
logmat = logmat / temperature
normalizer = tf.math.reduce_logsumexp(logmat, axis=axis, keepdims=True)
return logmat - normalizer, normalizer
def tensor_for_ta(input_ta, swap_batch_time=True):
if swap_batch_time:
res = input_ta.stack()
return tf.transpose(
res,
np.concatenate([[1, 0], np.arange(2, res.shape.ndims)])
)
else:
return input_ta.stack()
def write_updates_to_tas(tensor_arrays, t, tensor_updates):
assert len(tensor_arrays) == len(tensor_updates)
num_updates = len(tensor_updates)
return [tensor_arrays[i].write(t, tensor_updates[i])
for i in range(num_updates)]
def learning_rate_warmup(global_step,
warmup_end_lr,
warmup_start_lr,
warmup_steps):
p = tf.cast(global_step, tf.float32) / tf.cast(warmup_steps, tf.float32)
diff = warmup_end_lr - warmup_start_lr
return warmup_start_lr + diff * p
def learning_rate_schedule(global_step,
config):
warmup_schedule = learning_rate_warmup(
global_step=global_step,
warmup_end_lr=config.learning_rate,
warmup_start_lr=config.warmup_start_lr,
warmup_steps=config.warmup_steps)
decay_schedule = tf.keras.experimental.CosineDecay(
initial_learning_rate=config.learning_rate,
decay_steps=config.decay_steps - config.warmup_steps,
alpha=config.decay_alpha,
name=None)(tf.math.maximum(global_step - config.warmup_steps, 0))
return tf.cond(global_step < config.warmup_steps,
lambda: warmup_schedule,
lambda: decay_schedule)
def inverse_annealing_learning_rate(global_step,
target_lr,
learning_rate_ramp=1e3,
learning_rate_min=1e-10,
decreasing_learning_rate_ramp=1e4):
decreasing_gate = 1.0 * tf.pow(
tf.constant(0.66, dtype=tf.float32),
tf.to_float(global_step) / decreasing_learning_rate_ramp)
increasing_gate = (1 - (1 - learning_rate_min) * tf.pow(
tf.constant(0.66, dtype=tf.float32),
tf.to_float(global_step) / learning_rate_ramp))
lr = target_lr * increasing_gate * decreasing_gate + learning_rate_min
return lr
def schedule_exponential_decay(global_step, config, min_val=1e-10,
dtype=tf.float32):
global_step = tf.cast(global_step, dtype)
decay_steps = tf.cast(config.decay_steps, dtype)
kickin_steps = tf.cast(config.kickin_steps, dtype)
decay_schedule = (
config.initial_temperature *
config.decay_rate ** (
tf.math.maximum(global_step - kickin_steps, 0.)
/ decay_steps))
temp_schedule = tf.cond(global_step < config.kickin_steps,
lambda: config.initial_temperature,
lambda: tf.maximum(decay_schedule, min_val))
return temp_schedule
| true
| true
|
1c3e2bdebb7ce8eab502e84ca3413255a0d6fe7a
| 8,786
|
py
|
Python
|
sdk/python/pulumi_azure_native/avs/v20210601/hcx_enterprise_site.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/avs/v20210601/hcx_enterprise_site.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/avs/v20210601/hcx_enterprise_site.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['HcxEnterpriseSiteArgs', 'HcxEnterpriseSite']
@pulumi.input_type
class HcxEnterpriseSiteArgs:
def __init__(__self__, *,
private_cloud_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
hcx_enterprise_site_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a HcxEnterpriseSite resource.
:param pulumi.Input[str] private_cloud_name: The name of the private cloud.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] hcx_enterprise_site_name: Name of the HCX Enterprise Site in the private cloud
"""
pulumi.set(__self__, "private_cloud_name", private_cloud_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if hcx_enterprise_site_name is not None:
pulumi.set(__self__, "hcx_enterprise_site_name", hcx_enterprise_site_name)
@property
@pulumi.getter(name="privateCloudName")
def private_cloud_name(self) -> pulumi.Input[str]:
"""
The name of the private cloud.
"""
return pulumi.get(self, "private_cloud_name")
@private_cloud_name.setter
def private_cloud_name(self, value: pulumi.Input[str]):
pulumi.set(self, "private_cloud_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="hcxEnterpriseSiteName")
def hcx_enterprise_site_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the HCX Enterprise Site in the private cloud
"""
return pulumi.get(self, "hcx_enterprise_site_name")
@hcx_enterprise_site_name.setter
def hcx_enterprise_site_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hcx_enterprise_site_name", value)
class HcxEnterpriseSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hcx_enterprise_site_name: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
An HCX Enterprise Site resource
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] hcx_enterprise_site_name: Name of the HCX Enterprise Site in the private cloud
:param pulumi.Input[str] private_cloud_name: The name of the private cloud.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: HcxEnterpriseSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
An HCX Enterprise Site resource
:param str resource_name: The name of the resource.
:param HcxEnterpriseSiteArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HcxEnterpriseSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hcx_enterprise_site_name: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HcxEnterpriseSiteArgs.__new__(HcxEnterpriseSiteArgs)
__props__.__dict__["hcx_enterprise_site_name"] = hcx_enterprise_site_name
if private_cloud_name is None and not opts.urn:
raise TypeError("Missing required property 'private_cloud_name'")
__props__.__dict__["private_cloud_name"] = private_cloud_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["activation_key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:avs/v20210601:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs/v20200320:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs/v20200320:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs/v20200717preview:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs/v20200717preview:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs/v20210101preview:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs/v20210101preview:HcxEnterpriseSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(HcxEnterpriseSite, __self__).__init__(
'azure-native:avs/v20210601:HcxEnterpriseSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HcxEnterpriseSite':
"""
Get an existing HcxEnterpriseSite resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = HcxEnterpriseSiteArgs.__new__(HcxEnterpriseSiteArgs)
__props__.__dict__["activation_key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
return HcxEnterpriseSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activationKey")
def activation_key(self) -> pulumi.Output[str]:
"""
The activation key
"""
return pulumi.get(self, "activation_key")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
"""
The status of the HCX Enterprise Site
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
| 44.598985
| 678
| 0.665946
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = ['HcxEnterpriseSiteArgs', 'HcxEnterpriseSite']
@pulumi.input_type
class HcxEnterpriseSiteArgs:
def __init__(__self__, *,
private_cloud_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
hcx_enterprise_site_name: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "private_cloud_name", private_cloud_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if hcx_enterprise_site_name is not None:
pulumi.set(__self__, "hcx_enterprise_site_name", hcx_enterprise_site_name)
@property
@pulumi.getter(name="privateCloudName")
def private_cloud_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "private_cloud_name")
@private_cloud_name.setter
def private_cloud_name(self, value: pulumi.Input[str]):
pulumi.set(self, "private_cloud_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="hcxEnterpriseSiteName")
def hcx_enterprise_site_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "hcx_enterprise_site_name")
@hcx_enterprise_site_name.setter
def hcx_enterprise_site_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hcx_enterprise_site_name", value)
class HcxEnterpriseSite(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hcx_enterprise_site_name: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: HcxEnterpriseSiteArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(HcxEnterpriseSiteArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
hcx_enterprise_site_name: Optional[pulumi.Input[str]] = None,
private_cloud_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = HcxEnterpriseSiteArgs.__new__(HcxEnterpriseSiteArgs)
__props__.__dict__["hcx_enterprise_site_name"] = hcx_enterprise_site_name
if private_cloud_name is None and not opts.urn:
raise TypeError("Missing required property 'private_cloud_name'")
__props__.__dict__["private_cloud_name"] = private_cloud_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["activation_key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:avs/v20210601:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs/v20200320:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs/v20200320:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs/v20200717preview:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs/v20200717preview:HcxEnterpriseSite"), pulumi.Alias(type_="azure-native:avs/v20210101preview:HcxEnterpriseSite"), pulumi.Alias(type_="azure-nextgen:avs/v20210101preview:HcxEnterpriseSite")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(HcxEnterpriseSite, __self__).__init__(
'azure-native:avs/v20210601:HcxEnterpriseSite',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'HcxEnterpriseSite':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = HcxEnterpriseSiteArgs.__new__(HcxEnterpriseSiteArgs)
__props__.__dict__["activation_key"] = None
__props__.__dict__["name"] = None
__props__.__dict__["status"] = None
__props__.__dict__["type"] = None
return HcxEnterpriseSite(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="activationKey")
def activation_key(self) -> pulumi.Output[str]:
return pulumi.get(self, "activation_key")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> pulumi.Output[str]:
return pulumi.get(self, "status")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
return pulumi.get(self, "type")
| true
| true
|
1c3e2ca8021695e3f7b0ca2fa0cef92808120aef
| 10,751
|
py
|
Python
|
sdk/redhatopenshift/azure-mgmt-redhatopenshift/tests/test_cli_mgmt_redhatopenshift.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/redhatopenshift/azure-mgmt-redhatopenshift/tests/test_cli_mgmt_redhatopenshift.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/redhatopenshift/azure-mgmt-redhatopenshift/tests/test_cli_mgmt_redhatopenshift.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding: utf-8
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# TEST SCENARIO COVERAGE
# ----------------------
# Methods Total : 8
# Methods Covered : 8
# Examples Total : 8
# Examples Tested : 8
# Coverage % : 100
# ----------------------
import unittest
import azure.mgmt.redhatopenshift
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'australiaeast'
@unittest.skip("skip test")
class MgmtAzureRedHatOpenShiftClientTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtAzureRedHatOpenShiftClientTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.redhatopenshift.AzureRedHatOpenShiftClient
)
if self.is_live:
from azure.mgmt.network import NetworkManagementClient
self.network_client = self.create_mgmt_client(
NetworkManagementClient
)
from azure.mgmt.authorization import AuthorizationManagementClient
self.authorization_client = self.create_mgmt_client(
AuthorizationManagementClient
)
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
self.acr_client = self.create_mgmt_client(
ContainerRegistryManagementClient
)
def create_virtual_network(self, group_name, location, network_name, subnet_name):
azure_operation_poller = self.network_client.virtual_networks.create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
async_subnet_creation = self.network_client.subnets.create_or_update(
group_name,
network_name,
subnet_name,
subnet_parameters={'address_prefix': '10.0.0.0/24', "private_link_service_network_policies": "Disabled", "private_endpoint_network_policies": "Disabled", "service_endpoints": [ { "service": "Microsoft.ContainerRegistry" } ] }
)
subnet_info = async_subnet_creation.result()
return subnet_info
def create_subnet(self, group_name, location, network_name, subnet_name):
async_subnet_creation = self.network_client.subnets.create_or_update(
group_name,
network_name,
subnet_name,
subnet_parameters={'address_prefix': '10.0.1.0/24', "private_link_service_network_policies": "Disabled", "private_endpoint_network_policies": "Disabled", "service_endpoints": [ { "service": "Microsoft.ContainerRegistry" } ] }
)
subnet_info = async_subnet_creation.result()
subnet_info = self.network_client.subnets.get(group_name,
network_name,
subnet_name)
print(str(subnet_info))
return subnet_info
def assign_role(self,
service_principal_id,
scope,
name,
full_id):
BODY = {
"role_definition_id": full_id,
"principal_id": service_principal_id,
"principal_type": "ServicePrincipal"
}
result = self.authorization_client.role_assignments.create(scope, role_assignment_name=name, parameters=BODY)
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_redhatopenshift(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
TENANT_ID = self.settings.TENANT_ID
RESOURCE_GROUP = resource_group.name
RESOURCE_NAME = "zimsclusterxx"
VIRTUAL_NETWORK_NAME = "myvirtualnetwork"
SUBNET_NAME = "mysubnet"
SUBNET_NAME_2 = "mysubnet2"
if self.is_live:
SUBNET = self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME)
SUBNET_2 = self.create_subnet(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME_2)
self.assign_role(self.settings.SERVICE_PRINCIPAL_ID, # SP Object ID
"/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME,
"1fa638dc-b769-420d-b822-340abb216e78",
"/subscriptions/" + SUBSCRIPTION_ID + "/providers/Microsoft.Authorization/roleDefinitions/" + "b24988ac-6180-42a0-ab88-20f7382dd24c")
self.assign_role(self.settings.ARO_SERVICE_PRINCIPAL_ID,
"/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME,
"1fa638dc-b769-420d-b822-340abb216e77",
"/subscriptions/" + SUBSCRIPTION_ID + "/providers/Microsoft.Authorization/roleDefinitions/" + "b24988ac-6180-42a0-ab88-20f7382dd24c")
CLIENT_ID = self.settings.CLIENT_ID
CLIENT_SECRET = self.settings.CLIENT_SECRET
self.be_careful_with_service_principal
else:
CLIENT_ID = "00000000-0000-0000-0000-000000000000"
CLIENT_SECRET = "xxxxxxxx"
# /OpenShiftClusters/put/Creates or updates a OpenShift cluster with the specified subscription, resource group and resource name.[put]
BODY = {
"location": "australiaeast",
"tags": {
"key": "value"
},
"cluster_profile": {
"pull_secret": "",
"domain": "ab0176mx",
"resource_group_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + "aro-ab0176mx"
},
"service_principal_profile": {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET
},
"network_profile": {
"pod_cidr": "10.128.0.0/14",
"service_cidr": "172.30.0.0/16"
},
"master_profile": {
"vm_size": "Standard_D8s_v3",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME + ""
},
"worker_profiles": [
{
"name": "worker",
"vm_size": "Standard_D4s_v3",
"disk_size_gb": "128",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_2 + "",
"count": "3"
}
],
"apiserver_profile": {
"visibility": "Public"
},
"ingress_profiles": [
{
"name": "default",
"visibility": "Public"
}
]
}
result = self.mgmt_client.open_shift_clusters.create_or_update(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME, parameters=BODY)
result = result.result()
# /OpenShiftClusters/get/Gets a OpenShift cluster with the specified subscription, resource group and resource name.[get]
result = self.mgmt_client.open_shift_clusters.get(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
# /OpenShiftClusters/get/Lists OpenShift clusters in the specified subscription and resource group.[get]
result = self.mgmt_client.open_shift_clusters.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
# /OpenShiftClusters/get/Lists OpenShift clusters in the specified subscription.[get]
result = self.mgmt_client.open_shift_clusters.list()
# /Operations/get/Lists all of the available RP operations.[get]
result = self.mgmt_client.operations.list()
# /OpenShiftClusters/post/Lists credentials of an OpenShift cluster with the specified subscription, resource group and resource name.[post]
result = self.mgmt_client.open_shift_clusters.list_credentials(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
# /OpenShiftClusters/patch/Creates or updates a OpenShift cluster with the specified subscription, resource group and resource name.[patch]
BODY = {
"tags": {
"key": "value"
},
"cluster_profile": {
"pull_secret": "",
"domain": "ab0176mx",
"resource_group_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + "aro-ab0176mx"
},
"service_principal_profile": {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET
},
"network_profile": {
"pod_cidr": "10.128.0.0/14",
"service_cidr": "172.30.0.0/16"
},
"master_profile": {
"vm_size": "Standard_D8s_v3",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME + ""
},
"worker_profiles": [
{
# "name": "worker",
"vm_size": "Standard_D4s_v3",
"disk_size_gb": "128",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_2 + "",
"count": "3"
}
],
"apiserver_profile": {
"visibility": "Public"
},
"ingress_profiles": [
{
"name": "default",
"visibility": "Public"
}
]
}
result = self.mgmt_client.open_shift_clusters.update(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME, parameters=BODY)
result = result.result()
# /OpenShiftClusters/delete/Deletes a OpenShift cluster with the specified subscription, resource group and resource name.[delete]
result = self.mgmt_client.open_shift_clusters.delete(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
result = result.result()
#------------------------------------------------------------------------------
if __name__ == '__main__':
unittest.main()
| 44.061475
| 237
| 0.61278
|
import unittest
import azure.mgmt.redhatopenshift
from devtools_testutils import AzureMgmtTestCase, ResourceGroupPreparer
AZURE_LOCATION = 'australiaeast'
@unittest.skip("skip test")
class MgmtAzureRedHatOpenShiftClientTest(AzureMgmtTestCase):
def setUp(self):
super(MgmtAzureRedHatOpenShiftClientTest, self).setUp()
self.mgmt_client = self.create_mgmt_client(
azure.mgmt.redhatopenshift.AzureRedHatOpenShiftClient
)
if self.is_live:
from azure.mgmt.network import NetworkManagementClient
self.network_client = self.create_mgmt_client(
NetworkManagementClient
)
from azure.mgmt.authorization import AuthorizationManagementClient
self.authorization_client = self.create_mgmt_client(
AuthorizationManagementClient
)
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
self.acr_client = self.create_mgmt_client(
ContainerRegistryManagementClient
)
def create_virtual_network(self, group_name, location, network_name, subnet_name):
azure_operation_poller = self.network_client.virtual_networks.create_or_update(
group_name,
network_name,
{
'location': location,
'address_space': {
'address_prefixes': ['10.0.0.0/16']
}
},
)
result_create = azure_operation_poller.result()
async_subnet_creation = self.network_client.subnets.create_or_update(
group_name,
network_name,
subnet_name,
subnet_parameters={'address_prefix': '10.0.0.0/24', "private_link_service_network_policies": "Disabled", "private_endpoint_network_policies": "Disabled", "service_endpoints": [ { "service": "Microsoft.ContainerRegistry" } ] }
)
subnet_info = async_subnet_creation.result()
return subnet_info
def create_subnet(self, group_name, location, network_name, subnet_name):
async_subnet_creation = self.network_client.subnets.create_or_update(
group_name,
network_name,
subnet_name,
subnet_parameters={'address_prefix': '10.0.1.0/24', "private_link_service_network_policies": "Disabled", "private_endpoint_network_policies": "Disabled", "service_endpoints": [ { "service": "Microsoft.ContainerRegistry" } ] }
)
subnet_info = async_subnet_creation.result()
subnet_info = self.network_client.subnets.get(group_name,
network_name,
subnet_name)
print(str(subnet_info))
return subnet_info
def assign_role(self,
service_principal_id,
scope,
name,
full_id):
BODY = {
"role_definition_id": full_id,
"principal_id": service_principal_id,
"principal_type": "ServicePrincipal"
}
result = self.authorization_client.role_assignments.create(scope, role_assignment_name=name, parameters=BODY)
@ResourceGroupPreparer(location=AZURE_LOCATION)
def test_redhatopenshift(self, resource_group):
SUBSCRIPTION_ID = self.settings.SUBSCRIPTION_ID
TENANT_ID = self.settings.TENANT_ID
RESOURCE_GROUP = resource_group.name
RESOURCE_NAME = "zimsclusterxx"
VIRTUAL_NETWORK_NAME = "myvirtualnetwork"
SUBNET_NAME = "mysubnet"
SUBNET_NAME_2 = "mysubnet2"
if self.is_live:
SUBNET = self.create_virtual_network(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME)
SUBNET_2 = self.create_subnet(RESOURCE_GROUP, AZURE_LOCATION, VIRTUAL_NETWORK_NAME, SUBNET_NAME_2)
self.assign_role(self.settings.SERVICE_PRINCIPAL_ID,
"/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME,
"1fa638dc-b769-420d-b822-340abb216e78",
"/subscriptions/" + SUBSCRIPTION_ID + "/providers/Microsoft.Authorization/roleDefinitions/" + "b24988ac-6180-42a0-ab88-20f7382dd24c")
self.assign_role(self.settings.ARO_SERVICE_PRINCIPAL_ID,
"/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME,
"1fa638dc-b769-420d-b822-340abb216e77",
"/subscriptions/" + SUBSCRIPTION_ID + "/providers/Microsoft.Authorization/roleDefinitions/" + "b24988ac-6180-42a0-ab88-20f7382dd24c")
CLIENT_ID = self.settings.CLIENT_ID
CLIENT_SECRET = self.settings.CLIENT_SECRET
self.be_careful_with_service_principal
else:
CLIENT_ID = "00000000-0000-0000-0000-000000000000"
CLIENT_SECRET = "xxxxxxxx"
BODY = {
"location": "australiaeast",
"tags": {
"key": "value"
},
"cluster_profile": {
"pull_secret": "",
"domain": "ab0176mx",
"resource_group_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + "aro-ab0176mx"
},
"service_principal_profile": {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET
},
"network_profile": {
"pod_cidr": "10.128.0.0/14",
"service_cidr": "172.30.0.0/16"
},
"master_profile": {
"vm_size": "Standard_D8s_v3",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME + ""
},
"worker_profiles": [
{
"name": "worker",
"vm_size": "Standard_D4s_v3",
"disk_size_gb": "128",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_2 + "",
"count": "3"
}
],
"apiserver_profile": {
"visibility": "Public"
},
"ingress_profiles": [
{
"name": "default",
"visibility": "Public"
}
]
}
result = self.mgmt_client.open_shift_clusters.create_or_update(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME, parameters=BODY)
result = result.result()
result = self.mgmt_client.open_shift_clusters.get(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
result = self.mgmt_client.open_shift_clusters.list_by_resource_group(resource_group_name=RESOURCE_GROUP)
result = self.mgmt_client.open_shift_clusters.list()
result = self.mgmt_client.operations.list()
result = self.mgmt_client.open_shift_clusters.list_credentials(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
BODY = {
"tags": {
"key": "value"
},
"cluster_profile": {
"pull_secret": "",
"domain": "ab0176mx",
"resource_group_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + "aro-ab0176mx"
},
"service_principal_profile": {
"client_id": CLIENT_ID,
"client_secret": CLIENT_SECRET
},
"network_profile": {
"pod_cidr": "10.128.0.0/14",
"service_cidr": "172.30.0.0/16"
},
"master_profile": {
"vm_size": "Standard_D8s_v3",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME + ""
},
"worker_profiles": [
{
"vm_size": "Standard_D4s_v3",
"disk_size_gb": "128",
"subnet_id": "/subscriptions/" + SUBSCRIPTION_ID + "/resourceGroups/" + RESOURCE_GROUP + "/providers/Microsoft.Network/virtualNetworks/" + VIRTUAL_NETWORK_NAME + "/subnets/" + SUBNET_NAME_2 + "",
"count": "3"
}
],
"apiserver_profile": {
"visibility": "Public"
},
"ingress_profiles": [
{
"name": "default",
"visibility": "Public"
}
]
}
result = self.mgmt_client.open_shift_clusters.update(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME, parameters=BODY)
result = result.result()
result = self.mgmt_client.open_shift_clusters.delete(resource_group_name=RESOURCE_GROUP, resource_name=RESOURCE_NAME)
result = result.result()
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c3e2d7a94c129453bc740cb391bfd2454d467f1
| 14,082
|
py
|
Python
|
source/tests/py_tests/names_in_error_messages_test.py
|
Panzerschrek/U-00DC-Sprache
|
eb677a66d178985433a62eb6b8a50ce2cdb14b1a
|
[
"BSD-3-Clause"
] | 45
|
2016-06-21T22:28:43.000Z
|
2022-03-26T12:21:46.000Z
|
source/tests/py_tests/names_in_error_messages_test.py
|
Panzerschrek/U-00DC-Sprache
|
eb677a66d178985433a62eb6b8a50ce2cdb14b1a
|
[
"BSD-3-Clause"
] | 6
|
2020-07-12T18:00:10.000Z
|
2021-11-30T11:20:14.000Z
|
source/tests/py_tests/names_in_error_messages_test.py
|
Panzerschrek/U-00DC-Sprache
|
eb677a66d178985433a62eb6b8a50ce2cdb14b1a
|
[
"BSD-3-Clause"
] | 5
|
2019-09-03T17:20:34.000Z
|
2022-01-30T15:10:21.000Z
|
from py_tests_common import *
def TypeNameInErrorMessage_FundamentalTypes():
c_program_text= """
fn Foo()
{
var i32 x= 0.0f;
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
# must print something, like "conversion from f32 to i32"
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 4 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "f32" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInGlobalNamespace():
c_program_text= """
struct SomeType{}
fn Foo()
{
var i32 x= SomeType();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
# must print something, like "conversion from SomeType to i32"
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInNamespace_Test0():
c_program_text= """
namespace NNN{ struct SomeType{} }
fn Foo()
{
var i32 x= NNN::SomeType();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
# must print something, like "conversion from SomeType to i32"
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "NNN::SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInNamespace_Test1():
c_program_text= """
namespace NNN{ namespace Bar{ struct SomeType{} } }
fn Foo()
{
var i32 x= NNN::Bar::SomeType();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
# must print something, like "conversion from NNN::SomeType to i32"
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "NNN::Bar::SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInNamespace_Test2():
c_program_text= """
namespace NNN
{
namespace Bar
{
struct SomeType{}
fn Foo()
{
var i32 x= SomeType();
}
}
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
# must print full type name
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 9 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "NNN::Bar::SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test0():
c_program_text= """
template</ type T /> struct Box {}
fn Foo()
{
var i32 x= Box</f64/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</f64/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test1():
c_program_text= """
namespace Bar{ template</ type T /> struct Box {} }
fn Foo()
{
var i32 x= Bar::Box</bool/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Bar::Box</bool/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test2():
c_program_text= """
struct S{}
template</ type T /> struct Box {}
fn Foo()
{
var i32 x= Box</S/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</S/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test3():
c_program_text= """
struct S{}
template</ type T, size_type X /> struct Box {}
fn Foo()
{
var i32 x= Box</S, size_type(66) />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</S, 66" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test4():
c_program_text= """
enum E { A, B, C, D, E, F, G, H, I, }
template</ E a, E b, E c /> struct Box{}
fn Foo()
{
var i32 x= Box</ E::B, E::G, E::A />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</E::B, E::G, E::A/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test5():
c_program_text= """
template<//> struct Box{}
fn Foo()
{
var i32 x= Box<//>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box<//>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test6():
c_program_text= """
template</type T/> struct F{}
template</type T/> struct Box</ F</T/> />{}
fn Foo()
{
var i32 x= Box</ F</u16/> />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</F</u16/>/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test7():
c_program_text= """
template<//> struct Box</ i32 />{}
fn Foo()
{
var i32 x= Box</i32/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</i32/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test8():
c_program_text= """
template</ type T /> struct Box{}
fn Foo()
{
var i32 x= Box</ typeof(typeinfo</f64/>) />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</typeof(typeinfo</f64/>)/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test9():
c_program_text= """
template</ i32 s /> struct Box{}
fn Foo()
{
var i32 x= Box</ -365 />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</-365/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test10():
c_program_text= """
template</ char16 C /> struct Box{}
fn Foo()
{
var i32 x= Box</ 45c16 />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</45c16/>" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test0():
c_program_text= """
template</ type T />
struct Box
{
T t;
UnknownName x;
}
type B= Box</ i32 />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 6 )
assert( errors_list[0].template_errors.parameters_description.find( "T = i32" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test1():
c_program_text= """
template</ type A, type B />
fn Add( A a, B b )
{
a + b;
}
fn Foo()
{
Add( -5, 0.25 );
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NoMatchBinaryOperatorForGivenTypes" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "A = i32" ) != -1 )
assert( errors_list[0].template_errors.parameters_description.find( "B = f64" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Add" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test2():
c_program_text= """
template</ type A, type B />
fn Add( A a, B b )
{
a + b;
}
fn Foo()
{
Add</bool, f32/>( false, 6.66f );
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NoMatchBinaryOperatorForGivenTypes" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "A = bool" ) != -1 )
assert( errors_list[0].template_errors.parameters_description.find( "B = f32" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Add" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test3():
c_program_text= """
template</ size_type s />
struct IVec
{
[ UnknownName, s ] x;
}
type B= IVec</ 4s />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 8 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "s = 4" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "IVec" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test4():
c_program_text= """
template</ i32 s />
struct Box
{
UnknownName x;
}
type B= Box</ -365 />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 8 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "s = -365" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test5():
c_program_text= """
template</ bool b />
struct Box
{
UnknownName x;
}
type B= Box</ false />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 8 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "b = false" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test6():
c_program_text= """
enum ErT{ One, Two2, Blue }
template</ ErT e />
struct Box
{
UnknownName x;
}
type B= Box</ ErT::Two2 />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 6 )
assert( errors_list[0].template_errors.parameters_description.find( "e = ErT::Two2" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
| 32.978923
| 102
| 0.687757
|
from py_tests_common import *
def TypeNameInErrorMessage_FundamentalTypes():
c_program_text= """
fn Foo()
{
var i32 x= 0.0f;
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 4 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "f32" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInGlobalNamespace():
c_program_text= """
struct SomeType{}
fn Foo()
{
var i32 x= SomeType();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInNamespace_Test0():
c_program_text= """
namespace NNN{ struct SomeType{} }
fn Foo()
{
var i32 x= NNN::SomeType();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "NNN::SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInNamespace_Test1():
c_program_text= """
namespace NNN{ namespace Bar{ struct SomeType{} } }
fn Foo()
{
var i32 x= NNN::Bar::SomeType();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "NNN::Bar::SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTypeInNamespace_Test2():
c_program_text= """
namespace NNN
{
namespace Bar
{
struct SomeType{}
fn Foo()
{
var i32 x= SomeType();
}
}
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 9 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "NNN::Bar::SomeType" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test0():
c_program_text= """
template</ type T /> struct Box {}
fn Foo()
{
var i32 x= Box</f64/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</f64/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test1():
c_program_text= """
namespace Bar{ template</ type T /> struct Box {} }
fn Foo()
{
var i32 x= Bar::Box</bool/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Bar::Box</bool/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test2():
c_program_text= """
struct S{}
template</ type T /> struct Box {}
fn Foo()
{
var i32 x= Box</S/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</S/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test3():
c_program_text= """
struct S{}
template</ type T, size_type X /> struct Box {}
fn Foo()
{
var i32 x= Box</S, size_type(66) />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</S, 66" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test4():
c_program_text= """
enum E { A, B, C, D, E, F, G, H, I, }
template</ E a, E b, E c /> struct Box{}
fn Foo()
{
var i32 x= Box</ E::B, E::G, E::A />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</E::B, E::G, E::A/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test5():
c_program_text= """
template<//> struct Box{}
fn Foo()
{
var i32 x= Box<//>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box<//>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test6():
c_program_text= """
template</type T/> struct F{}
template</type T/> struct Box</ F</T/> />{}
fn Foo()
{
var i32 x= Box</ F</u16/> />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 6 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</F</u16/>/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test7():
c_program_text= """
template<//> struct Box</ i32 />{}
fn Foo()
{
var i32 x= Box</i32/>();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</i32/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test8():
c_program_text= """
template</ type T /> struct Box{}
fn Foo()
{
var i32 x= Box</ typeof(typeinfo</f64/>) />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</typeof(typeinfo</f64/>)/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test9():
c_program_text= """
template</ i32 s /> struct Box{}
fn Foo()
{
var i32 x= Box</ -365 />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</-365/>" ) != -1 )
def TypeNameInErrorMessage_ClassTemplate_Test10():
c_program_text= """
template</ char16 C /> struct Box{}
fn Foo()
{
var i32 x= Box</ 45c16 />();
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TypesMismatch" )
assert( errors_list[0].src_loc.line == 5 )
assert( errors_list[0].text.find( "i32" ) != -1 )
assert( errors_list[0].text.find( "Box</45c16/>" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test0():
c_program_text= """
template</ type T />
struct Box
{
T t;
UnknownName x;
}
type B= Box</ i32 />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 6 )
assert( errors_list[0].template_errors.parameters_description.find( "T = i32" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test1():
c_program_text= """
template</ type A, type B />
fn Add( A a, B b )
{
a + b;
}
fn Foo()
{
Add( -5, 0.25 );
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NoMatchBinaryOperatorForGivenTypes" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "A = i32" ) != -1 )
assert( errors_list[0].template_errors.parameters_description.find( "B = f64" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Add" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test2():
c_program_text= """
template</ type A, type B />
fn Add( A a, B b )
{
a + b;
}
fn Foo()
{
Add</bool, f32/>( false, 6.66f );
}
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NoMatchBinaryOperatorForGivenTypes" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "A = bool" ) != -1 )
assert( errors_list[0].template_errors.parameters_description.find( "B = f32" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Add" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test3():
c_program_text= """
template</ size_type s />
struct IVec
{
[ UnknownName, s ] x;
}
type B= IVec</ 4s />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 8 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "s = 4" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "IVec" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test4():
c_program_text= """
template</ i32 s />
struct Box
{
UnknownName x;
}
type B= Box</ -365 />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 8 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "s = -365" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test5():
c_program_text= """
template</ bool b />
struct Box
{
UnknownName x;
}
type B= Box</ false />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 8 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 5 )
assert( errors_list[0].template_errors.parameters_description.find( "b = false" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
def TemplateParametersInErrorInsideTemplate_Test6():
c_program_text= """
enum ErT{ One, Two2, Blue }
template</ ErT e />
struct Box
{
UnknownName x;
}
type B= Box</ ErT::Two2 />;
"""
errors_list= ConvertErrors( tests_lib.build_program_with_errors( c_program_text ) )
assert( len(errors_list) > 0 )
assert( errors_list[0].error_code == "TemplateContext" )
assert( errors_list[0].src_loc.line == 9 )
assert( len(errors_list[0].template_errors.errors) > 0 )
assert( errors_list[0].template_errors.errors[0].error_code == "NameNotFound" )
assert( errors_list[0].template_errors.errors[0].src_loc.line == 6 )
assert( errors_list[0].template_errors.parameters_description.find( "e = ErT::Two2" ) != -1 )
assert( errors_list[0].template_errors.template_name.find( "Box" ) != -1 )
| true
| true
|
1c3e2e4500ec56f373ffa34b204ef913befead83
| 892
|
py
|
Python
|
rankAndTier.py
|
NullP0interExcepti0n/TierbyPlaytime
|
ebdfa404aa9e0e85942b6e50c10243606948832a
|
[
"MIT"
] | 1
|
2018-04-03T15:37:34.000Z
|
2018-04-03T15:37:34.000Z
|
rankAndTier.py
|
NullP0interExcepti0n/TierbyPlaytime
|
ebdfa404aa9e0e85942b6e50c10243606948832a
|
[
"MIT"
] | null | null | null |
rankAndTier.py
|
NullP0interExcepti0n/TierbyPlaytime
|
ebdfa404aa9e0e85942b6e50c10243606948832a
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
data = np.loadtxt('./data.csv', delimiter=',', unpack=True, dtype='float32')
playTime = np.transpose(data[0])
rank = np.transpose(data[1])
W = tf.Variable(tf.random_uniform([1], 0, 20000))
b = tf.Variable(tf.random_uniform([1], 1, 2000000))
X = tf.placeholder(tf.float32, name = "X")
Y = tf.placeholder(tf.float32, name = "Y")
hypothesis = W * X + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.00000001)
train_op = optimizer.minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(500):
_, cost_val = sess.run([train_op, cost], feed_dict = {X: playTime, Y: rank})
print(step, cost_val, sess.run(W), sess.run(b))
print("\n=== Test ===")
print("Play Time : 2100hrs, Rank :", sess.run(hypothesis, feed_dict={X: 2100}))
| 30.758621
| 80
| 0.692825
|
import tensorflow as tf
import numpy as np
data = np.loadtxt('./data.csv', delimiter=',', unpack=True, dtype='float32')
playTime = np.transpose(data[0])
rank = np.transpose(data[1])
W = tf.Variable(tf.random_uniform([1], 0, 20000))
b = tf.Variable(tf.random_uniform([1], 1, 2000000))
X = tf.placeholder(tf.float32, name = "X")
Y = tf.placeholder(tf.float32, name = "Y")
hypothesis = W * X + b
cost = tf.reduce_mean(tf.square(hypothesis - Y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate = 0.00000001)
train_op = optimizer.minimize(cost)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for step in range(500):
_, cost_val = sess.run([train_op, cost], feed_dict = {X: playTime, Y: rank})
print(step, cost_val, sess.run(W), sess.run(b))
print("\n=== Test ===")
print("Play Time : 2100hrs, Rank :", sess.run(hypothesis, feed_dict={X: 2100}))
| true
| true
|
1c3e2e5dede47d12cc5e4c184ba07d5166260b1c
| 1,259
|
py
|
Python
|
examples/study.cases/CUP2D/optimal-transport/findSample.py
|
JonathanLehner/korali
|
90f97d8e2fed2311f988f39cfe014f23ba7dd6cf
|
[
"MIT"
] | 43
|
2018-07-26T07:20:42.000Z
|
2022-03-02T10:23:12.000Z
|
examples/study.cases/CUP2D/optimal-transport/findSample.py
|
JonathanLehner/korali
|
90f97d8e2fed2311f988f39cfe014f23ba7dd6cf
|
[
"MIT"
] | 212
|
2018-09-21T10:44:07.000Z
|
2022-03-22T14:33:05.000Z
|
examples/study.cases/CUP2D/optimal-transport/findSample.py
|
JonathanLehner/korali
|
90f97d8e2fed2311f988f39cfe014f23ba7dd6cf
|
[
"MIT"
] | 16
|
2018-07-25T15:00:36.000Z
|
2022-03-22T14:19:46.000Z
|
import argparse
import json
import math
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--directory', type=str, help='Directory to check for latest file.', required=True)
parser.add_argument('--objective', type=int, help='Objective function.', required=True)
parser.add_argument('--value', type=float, help='Value to look for.', required=True)
args = parser.parse_args()
filename = args.directory + '/latest'
mindist = math.inf
sample = []
fsample = []
with open(filename) as json_file:
data = json.load(json_file)
samplevalues = data["Solver"]["Sample Value Collection"]
samples = data["Solver"]["Sample Collection"]
sampleidx = -1
objidx = args.objective
target = args.value
for idx, values in enumerate(samplevalues):
dist = abs(values[objidx] - args.value)
if dist < mindist:
mindist = dist
sampleidx = idx
sample = samples[sampleidx]
fsample = samplevalues[sampleidx]
print("Sample Found: {}".format(sampleidx))
print("Params: {}".format(sample))
print("Objectives: {}".format(fsample))
print("Dist: {}".format(mindist))
| 27.369565
| 107
| 0.621922
|
import argparse
import json
import math
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--directory', type=str, help='Directory to check for latest file.', required=True)
parser.add_argument('--objective', type=int, help='Objective function.', required=True)
parser.add_argument('--value', type=float, help='Value to look for.', required=True)
args = parser.parse_args()
filename = args.directory + '/latest'
mindist = math.inf
sample = []
fsample = []
with open(filename) as json_file:
data = json.load(json_file)
samplevalues = data["Solver"]["Sample Value Collection"]
samples = data["Solver"]["Sample Collection"]
sampleidx = -1
objidx = args.objective
target = args.value
for idx, values in enumerate(samplevalues):
dist = abs(values[objidx] - args.value)
if dist < mindist:
mindist = dist
sampleidx = idx
sample = samples[sampleidx]
fsample = samplevalues[sampleidx]
print("Sample Found: {}".format(sampleidx))
print("Params: {}".format(sample))
print("Objectives: {}".format(fsample))
print("Dist: {}".format(mindist))
| true
| true
|
1c3e30d6b87fe2900c07eb89a0d860f637827108
| 3,954
|
py
|
Python
|
salt/utils/psutil_compat.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2020-03-31T22:51:16.000Z
|
2020-03-31T22:51:16.000Z
|
salt/utils/psutil_compat.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
salt/utils/psutil_compat.py
|
yuriks/salt
|
d2a5bd8adddb98ec1718d79384aa13b4f37e8028
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-30T07:00:01.000Z
|
2021-09-30T07:00:01.000Z
|
# -*- coding: utf-8 -*-
'''
Version agnostic psutil hack to fully support both old (<2.0) and new (>=2.0)
psutil versions.
The old <1.0 psutil API is dropped in psutil 3.0
Should be removed once support for psutil <2.0 is dropped. (eg RHEL 6)
Built off of http://grodola.blogspot.com/2014/01/psutil-20-porting.html
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
# Import Salt libs
from salt.ext import six
# No exception handling, as we want ImportError if psutil doesn't exist
import psutil # pylint: disable=3rd-party-module-not-gated
if psutil.version_info >= (2, 0):
from psutil import * # pylint: disable=wildcard-import,unused-wildcard-import,3rd-party-module-not-gated
else:
# Import hack to work around bugs in old psutil's
# Psuedo "from psutil import *"
_globals = globals()
for attr in psutil.__all__:
_temp = __import__('psutil', globals(), locals(), [attr], -1 if six.PY2 else 0)
try:
_globals[attr] = getattr(_temp, attr)
except AttributeError:
pass
# Import functions not in __all__
# pylint: disable=unused-import,3rd-party-module-not-gated
from psutil import disk_partitions
from psutil import disk_usage
# pylint: enable=unused-import,3rd-party-module-not-gated
# Alias new module functions
def boot_time():
return psutil.BOOT_TIME
def cpu_count():
return psutil.NUM_CPUS
# Alias renamed module functions
pids = psutil.get_pid_list
try:
users = psutil.get_users
except AttributeError:
users = lambda: (_ for _ in ()).throw(NotImplementedError('Your '
'psutil version is too old'))
# Deprecated in 1.0.1, but not mentioned in blog post
if psutil.version_info < (1, 0, 1):
net_io_counters = psutil.network_io_counters()
class Process(psutil.Process): # pylint: disable=no-init
# Reimplement overloaded getters/setters
# pylint: disable=arguments-differ
def cpu_affinity(self, *args, **kwargs):
if args or kwargs:
return self.set_cpu_affinity(*args, **kwargs)
else:
return self.get_cpu_affinity()
def ionice(self, *args, **kwargs):
if args or kwargs:
return self.set_ionice(*args, **kwargs)
else:
return self.get_ionice()
def nice(self, *args, **kwargs):
if args or kwargs:
return self.set_nice(*args, **kwargs)
else:
return self.get_nice()
def rlimit(self, *args, **kwargs):
'''
set_rlimit and get_limit were not introduced until psutil v1.1.0
'''
if psutil.version_info >= (1, 1, 0):
if args or kwargs:
return self.set_rlimit(*args, **kwargs)
else:
return self.get_rlimit()
else:
pass
# pylint: enable=arguments-differ
# Alias renamed Process functions
_PROCESS_FUNCTION_MAP = {
"children": "get_children",
"connections": "get_connections",
"cpu_percent": "get_cpu_percent",
"cpu_times": "get_cpu_times",
"io_counters": "get_io_counters",
"memory_info": "get_memory_info",
"memory_info_ex": "get_ext_memory_info",
"memory_maps": "get_memory_maps",
"memory_percent": "get_memory_percent",
"num_ctx_switches": "get_num_ctx_switches",
"num_fds": "get_num_fds",
"num_threads": "get_num_threads",
"open_files": "get_open_files",
"threads": "get_threads",
"cwd": "getcwd",
}
for new, old in six.iteritems(_PROCESS_FUNCTION_MAP):
try:
setattr(Process, new, psutil.Process.__dict__[old])
except KeyError:
pass
| 33.226891
| 109
| 0.608245
|
from __future__ import absolute_import, print_function, unicode_literals
from salt.ext import six
import psutil # pylint: disable=3rd-party-module-not-gated
if psutil.version_info >= (2, 0):
from psutil import * # pylint: disable=wildcard-import,unused-wildcard-import,3rd-party-module-not-gated
else:
# Import hack to work around bugs in old psutil's
_globals = globals()
for attr in psutil.__all__:
_temp = __import__('psutil', globals(), locals(), [attr], -1 if six.PY2 else 0)
try:
_globals[attr] = getattr(_temp, attr)
except AttributeError:
pass
from psutil import disk_partitions
from psutil import disk_usage
def boot_time():
return psutil.BOOT_TIME
def cpu_count():
return psutil.NUM_CPUS
pids = psutil.get_pid_list
try:
users = psutil.get_users
except AttributeError:
users = lambda: (_ for _ in ()).throw(NotImplementedError('Your '
'psutil version is too old'))
if psutil.version_info < (1, 0, 1):
net_io_counters = psutil.network_io_counters()
class Process(psutil.Process):
def cpu_affinity(self, *args, **kwargs):
if args or kwargs:
return self.set_cpu_affinity(*args, **kwargs)
else:
return self.get_cpu_affinity()
def ionice(self, *args, **kwargs):
if args or kwargs:
return self.set_ionice(*args, **kwargs)
else:
return self.get_ionice()
def nice(self, *args, **kwargs):
if args or kwargs:
return self.set_nice(*args, **kwargs)
else:
return self.get_nice()
def rlimit(self, *args, **kwargs):
'''
set_rlimit and get_limit were not introduced until psutil v1.1.0
'''
if psutil.version_info >= (1, 1, 0):
if args or kwargs:
return self.set_rlimit(*args, **kwargs)
else:
return self.get_rlimit()
else:
pass
_PROCESS_FUNCTION_MAP = {
"children": "get_children",
"connections": "get_connections",
"cpu_percent": "get_cpu_percent",
"cpu_times": "get_cpu_times",
"io_counters": "get_io_counters",
"memory_info": "get_memory_info",
"memory_info_ex": "get_ext_memory_info",
"memory_maps": "get_memory_maps",
"memory_percent": "get_memory_percent",
"num_ctx_switches": "get_num_ctx_switches",
"num_fds": "get_num_fds",
"num_threads": "get_num_threads",
"open_files": "get_open_files",
"threads": "get_threads",
"cwd": "getcwd",
}
for new, old in six.iteritems(_PROCESS_FUNCTION_MAP):
try:
setattr(Process, new, psutil.Process.__dict__[old])
except KeyError:
pass
| true
| true
|
1c3e321b75875673f93646662427b6867f9f25aa
| 2,514
|
py
|
Python
|
experiments/murtaza/multiworld/reset_free/pointmass/pointmass_her_td3_count_based.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/murtaza/multiworld/reset_free/pointmass/pointmass_her_td3_count_based.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
experiments/murtaza/multiworld/reset_free/pointmass/pointmass_her_td3_count_based.py
|
Asap7772/rail-rl-franka-eval
|
4bf99072376828193d05b53cf83c7e8f4efbd3ba
|
[
"MIT"
] | null | null | null |
import railrl.misc.hyperparameter as hyp
from multiworld.envs.pygame.point2d import Point2DWallEnv
from railrl.data_management.obs_dict_count_based_replay_buffer import ObsDictCountBasedRelabelingBuffer
from railrl.launchers.experiments.murtaza.multiworld_her import her_td3_experiment
from railrl.launchers.launcher_util import run_experiment
if __name__ == "__main__":
# noinspection PyTypeChecker
variant = dict(
algo_kwargs=dict(
num_epochs=100,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
max_path_length=50,
discount=0.99,
batch_size=128,
num_updates_per_env_step=1,
reward_scale=1,
),
env_class=Point2DWallEnv,
env_kwargs=dict(
ball_radius=0.5,
render_onscreen=False,
inner_wall_max_dist=2,
wall_shape="u",
),
replay_buffer_class=ObsDictCountBasedRelabelingBuffer,
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_are_rollout_goals=0.5,
fraction_resampled_goals_are_env_goals=0.5,
count_based_reward_scale=0,
hash_dim=10,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
normalize=False,
algorithm='HER-TD3',
version='normal',
es_kwargs=dict(
),
observation_key='observation',
desired_goal_key='desired_goal',
exploration_type='ou'
)
search_space = {
'env_kwargs.randomize_position_on_reset':[True, False],
'replay_buffer_kwargs.fraction_resampled_goals_are_env_goals': [0, .5, 1],
'replay_buffer_kwargs.fraction_goals_are_rollout_goals': [0, .5, 1],
'replay_buffer_kwargs.count_based_reward_scale': [0, 1],
'es_kwargs.max_sigma':[.3, .4, .5]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
# n_seeds= 1
# mode='local'
# exp_prefix= 'test'
n_seeds=1
mode = 'ec2'
exp_prefix = 'pointmass_wall_u_count_based_exploration'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for i in range(n_seeds):
run_experiment(
her_td3_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
)
| 32.649351
| 103
| 0.627287
|
import railrl.misc.hyperparameter as hyp
from multiworld.envs.pygame.point2d import Point2DWallEnv
from railrl.data_management.obs_dict_count_based_replay_buffer import ObsDictCountBasedRelabelingBuffer
from railrl.launchers.experiments.murtaza.multiworld_her import her_td3_experiment
from railrl.launchers.launcher_util import run_experiment
if __name__ == "__main__":
variant = dict(
algo_kwargs=dict(
num_epochs=100,
num_steps_per_epoch=1000,
num_steps_per_eval=1000,
max_path_length=50,
discount=0.99,
batch_size=128,
num_updates_per_env_step=1,
reward_scale=1,
),
env_class=Point2DWallEnv,
env_kwargs=dict(
ball_radius=0.5,
render_onscreen=False,
inner_wall_max_dist=2,
wall_shape="u",
),
replay_buffer_class=ObsDictCountBasedRelabelingBuffer,
replay_buffer_kwargs=dict(
max_size=int(1E6),
fraction_goals_are_rollout_goals=0.5,
fraction_resampled_goals_are_env_goals=0.5,
count_based_reward_scale=0,
hash_dim=10,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
normalize=False,
algorithm='HER-TD3',
version='normal',
es_kwargs=dict(
),
observation_key='observation',
desired_goal_key='desired_goal',
exploration_type='ou'
)
search_space = {
'env_kwargs.randomize_position_on_reset':[True, False],
'replay_buffer_kwargs.fraction_resampled_goals_are_env_goals': [0, .5, 1],
'replay_buffer_kwargs.fraction_goals_are_rollout_goals': [0, .5, 1],
'replay_buffer_kwargs.count_based_reward_scale': [0, 1],
'es_kwargs.max_sigma':[.3, .4, .5]
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
n_seeds=1
mode = 'ec2'
exp_prefix = 'pointmass_wall_u_count_based_exploration'
for exp_id, variant in enumerate(sweeper.iterate_hyperparameters()):
for i in range(n_seeds):
run_experiment(
her_td3_experiment,
exp_prefix=exp_prefix,
mode=mode,
variant=variant,
)
| true
| true
|
1c3e331971bd647943a1017508dcc29d184c53b3
| 6,660
|
py
|
Python
|
rasa/core/policies/embedding_policy.py
|
pablhoney/RasaTest
|
acba90ccae0cf69efc70a7656f85c8d2266b4926
|
[
"Apache-2.0"
] | null | null | null |
rasa/core/policies/embedding_policy.py
|
pablhoney/RasaTest
|
acba90ccae0cf69efc70a7656f85c8d2266b4926
|
[
"Apache-2.0"
] | null | null | null |
rasa/core/policies/embedding_policy.py
|
pablhoney/RasaTest
|
acba90ccae0cf69efc70a7656f85c8d2266b4926
|
[
"Apache-2.0"
] | null | null | null |
import logging
from typing import Any, Dict, Optional, Text
from rasa.constants import DOCS_URL_MIGRATION_GUIDE
from rasa.core.constants import DEFAULT_POLICY_PRIORITY, DIALOGUE
from rasa.core.featurizers import TrackerFeaturizer
from rasa.core.policies.ted_policy import TEDPolicy
from rasa.utils.tensorflow.constants import (
LABEL,
HIDDEN_LAYERS_SIZES,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
DROP_RATE_DIALOGUE,
DROP_RATE_LABEL,
DROP_RATE_ATTENTION,
WEIGHT_SPARSITY,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
)
from rasa.utils.tensorflow.models import RasaModel
import rasa.utils.common as common_utils
logger = logging.getLogger(__name__)
class EmbeddingPolicy(TEDPolicy):
"""Transformer Embedding Dialogue (TED) Policy is described in
https://arxiv.org/abs/1910.00486.
This policy has a pre-defined architecture, which comprises the
following steps:
- concatenate user input (user intent and entities), previous system actions,
slots and active forms for each time step into an input vector to
pre-transformer embedding layer;
- feed it to transformer;
- apply a dense layer to the output of the transformer to get embeddings of a
dialogue for each time step;
- apply a dense layer to create embeddings for system actions for each time
step;
- calculate the similarity between the dialogue embedding and embedded system
actions. This step is based on the StarSpace
(https://arxiv.org/abs/1709.03856) idea.
"""
# please make sure to update the docs when changing a default parameter
defaults = {
# ## Architecture of the used neural network
# Hidden layer sizes for layers before the dialogue and label embedding layers.
# The number of hidden layers is equal to the length of the corresponding
# list.
HIDDEN_LAYERS_SIZES: {DIALOGUE: [], LABEL: []},
# Number of units in transformer
TRANSFORMER_SIZE: 128,
# Number of transformer layers
NUM_TRANSFORMER_LAYERS: 1,
# If 'True' use key relative embeddings in attention
KEY_RELATIVE_ATTENTION: False,
# If 'True' use key relative embeddings in attention
VALUE_RELATIVE_ATTENTION: False,
# Max position for relative embeddings
MAX_RELATIVE_POSITION: None,
# Number of attention heads in transformer
NUM_HEADS: 4,
# ## Training parameters
# Initial and final batch sizes:
# Batch size will be linearly increased for each epoch.
BATCH_SIZES: [8, 32],
# Strategy used when creating batches.
# Can be either 'sequence' or 'balanced'.
BATCH_STRATEGY: BALANCED,
# Number of epochs to train
EPOCHS: 1,
# Set random seed to any 'int' to get reproducible results
RANDOM_SEED: None,
# ## Parameters for embeddings
# Dimension size of embedding vectors
EMBEDDING_DIMENSION: 20,
# The number of incorrect labels. The algorithm will minimize
# their similarity to the user input during training.
NUM_NEG: 20,
# Type of similarity measure to use, either 'auto' or 'cosine' or 'inner'.
SIMILARITY_TYPE: AUTO,
# The type of the loss function, either 'softmax' or 'margin'.
LOSS_TYPE: SOFTMAX,
# Number of top actions to normalize scores for loss type 'softmax'.
# Set to 0 to turn off normalization.
RANKING_LENGTH: 10,
# Indicates how similar the algorithm should try to make embedding vectors
# for correct labels.
# Should be 0.0 < ... < 1.0 for 'cosine' similarity type.
MAX_POS_SIM: 0.8,
# Maximum negative similarity for incorrect labels.
# Should be -1.0 < ... < 1.0 for 'cosine' similarity type.
MAX_NEG_SIM: -0.2,
# If 'True' the algorithm only minimizes maximum similarity over
# incorrect intent labels, used only if 'loss_type' is set to 'margin'.
USE_MAX_NEG_SIM: True,
# Scale loss inverse proportionally to confidence of correct prediction
SCALE_LOSS: True,
# ## Regularization parameters
# The scale of regularization
REGULARIZATION_CONSTANT: 0.001,
# The scale of how important is to minimize the maximum similarity
# between embeddings of different labels.
NEGATIVE_MARGIN_SCALE: 0.8,
# Dropout rate for embedding layers of dialogue features.
DROP_RATE_DIALOGUE: 0.1,
# Dropout rate for embedding layers of label, e.g. action, features.
DROP_RATE_LABEL: 0.0,
# Dropout rate for attention.
DROP_RATE_ATTENTION: 0,
# Sparsity of the weights in dense layers
WEIGHT_SPARSITY: 0.8,
# ## Evaluation parameters
# How often calculate validation accuracy.
# Small values may hurt performance, e.g. model accuracy.
EVAL_NUM_EPOCHS: 20,
# How many examples to use for hold out validation set
# Large values may hurt performance, e.g. model accuracy.
EVAL_NUM_EXAMPLES: 0,
# If you want to use tensorboard to visualize training and validation metrics,
# set this option to a valid output directory.
TENSORBOARD_LOG_DIR: None,
# Define when training metrics for tensorboard should be logged.
# Either after every epoch or for every training step.
# Valid values: 'epoch' and 'minibatch'
TENSORBOARD_LOG_LEVEL: "epoch",
}
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = DEFAULT_POLICY_PRIORITY,
max_history: Optional[int] = None,
model: Optional[RasaModel] = None,
**kwargs: Dict[Text, Any],
) -> None:
super().__init__(featurizer, priority, max_history, model, **kwargs)
common_utils.raise_warning(
"'EmbeddingPolicy' is deprecated and will be removed in version 2.0. "
"Use 'TEDPolicy' instead.",
category=FutureWarning,
docs=DOCS_URL_MIGRATION_GUIDE,
)
| 39.176471
| 87
| 0.668769
|
import logging
from typing import Any, Dict, Optional, Text
from rasa.constants import DOCS_URL_MIGRATION_GUIDE
from rasa.core.constants import DEFAULT_POLICY_PRIORITY, DIALOGUE
from rasa.core.featurizers import TrackerFeaturizer
from rasa.core.policies.ted_policy import TEDPolicy
from rasa.utils.tensorflow.constants import (
LABEL,
HIDDEN_LAYERS_SIZES,
TRANSFORMER_SIZE,
NUM_TRANSFORMER_LAYERS,
NUM_HEADS,
BATCH_SIZES,
BATCH_STRATEGY,
EPOCHS,
RANDOM_SEED,
RANKING_LENGTH,
LOSS_TYPE,
SIMILARITY_TYPE,
NUM_NEG,
EVAL_NUM_EXAMPLES,
EVAL_NUM_EPOCHS,
NEGATIVE_MARGIN_SCALE,
REGULARIZATION_CONSTANT,
SCALE_LOSS,
USE_MAX_NEG_SIM,
MAX_NEG_SIM,
MAX_POS_SIM,
EMBEDDING_DIMENSION,
DROP_RATE_DIALOGUE,
DROP_RATE_LABEL,
DROP_RATE_ATTENTION,
WEIGHT_SPARSITY,
KEY_RELATIVE_ATTENTION,
VALUE_RELATIVE_ATTENTION,
MAX_RELATIVE_POSITION,
SOFTMAX,
AUTO,
BALANCED,
TENSORBOARD_LOG_DIR,
TENSORBOARD_LOG_LEVEL,
)
from rasa.utils.tensorflow.models import RasaModel
import rasa.utils.common as common_utils
logger = logging.getLogger(__name__)
class EmbeddingPolicy(TEDPolicy):
defaults = {
TRANSFORMER_SIZE: 128,
NUM_TRANSFORMER_LAYERS: 1,
KEY_RELATIVE_ATTENTION: False,
VALUE_RELATIVE_ATTENTION: False,
MAX_RELATIVE_POSITION: None,
NUM_HEADS: 4,
32],
BATCH_STRATEGY: BALANCED,
EPOCHS: 1,
RANDOM_SEED: None,
NUM_NEG: 20,
SIMILARITY_TYPE: AUTO,
LOSS_TYPE: SOFTMAX,
RANKING_LENGTH: 10,
MAX_POS_SIM: 0.8,
MAX_NEG_SIM: -0.2,
USE_MAX_NEG_SIM: True,
SCALE_LOSS: True,
NEGATIVE_MARGIN_SCALE: 0.8,
DROP_RATE_DIALOGUE: 0.1,
DROP_RATE_LABEL: 0.0,
DROP_RATE_ATTENTION: 0,
WEIGHT_SPARSITY: 0.8,
EVAL_NUM_EXAMPLES: 0,
TENSORBOARD_LOG_DIR: None,
TENSORBOARD_LOG_LEVEL: "epoch",
}
def __init__(
self,
featurizer: Optional[TrackerFeaturizer] = None,
priority: int = DEFAULT_POLICY_PRIORITY,
max_history: Optional[int] = None,
model: Optional[RasaModel] = None,
**kwargs: Dict[Text, Any],
) -> None:
super().__init__(featurizer, priority, max_history, model, **kwargs)
common_utils.raise_warning(
"'EmbeddingPolicy' is deprecated and will be removed in version 2.0. "
"Use 'TEDPolicy' instead.",
category=FutureWarning,
docs=DOCS_URL_MIGRATION_GUIDE,
)
| true
| true
|
1c3e33c7f0102c2010cb88e2f21ad95e7f86bb16
| 8,725
|
py
|
Python
|
core/test.py
|
xrcui/Pix2Vox
|
30ba9518dcfc06add38bf5e8491a6a05fc08eaee
|
[
"MIT"
] | null | null | null |
core/test.py
|
xrcui/Pix2Vox
|
30ba9518dcfc06add38bf5e8491a6a05fc08eaee
|
[
"MIT"
] | null | null | null |
core/test.py
|
xrcui/Pix2Vox
|
30ba9518dcfc06add38bf5e8491a6a05fc08eaee
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Developed by Haozhe Xie <cshzxie@gmail.com>
import json
import numpy as np
import os
import torch
import torch.backends.cudnn
import torch.utils.data
import utils.binvox_visualization
import utils.data_loaders
import utils.data_transforms
import utils.network_utils
from datetime import datetime as dt
from models.encoder import Encoder
from models.decoder import Decoder
from models.refiner import Refiner
from models.merger import Merger
def test_net(cfg,
epoch_idx=-1,
output_dir=None,
test_data_loader=None,
test_writer=None,
encoder=None,
decoder=None,
refiner=None,
merger=None):
# Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
torch.backends.cudnn.benchmark = True
# Load taxonomies of dataset
taxonomies = []
with open(cfg.DATASETS[cfg.DATASET.TEST_DATASET.upper()].TAXONOMY_FILE_PATH, encoding='utf-8') as file:
taxonomies = json.loads(file.read())
taxonomies = {t['taxonomy_id']: t for t in taxonomies}
# Set up data loader
if test_data_loader is None:
# Set up data augmentation
IMG_SIZE = cfg.CONST.IMG_H, cfg.CONST.IMG_W
CROP_SIZE = cfg.CONST.CROP_IMG_H, cfg.CONST.CROP_IMG_W
test_transforms = utils.data_transforms.Compose([
utils.data_transforms.CenterCrop(IMG_SIZE, CROP_SIZE),
utils.data_transforms.RandomBackground(cfg.TEST.RANDOM_BG_COLOR_RANGE),
utils.data_transforms.Normalize(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD),
utils.data_transforms.ToTensor(),
])
dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
# dataset_loader.dataset_taxonomy = dataset_loader.dataset_taxonomy[5:7]
test_data_loader = torch.utils.data.DataLoader(dataset=dataset_loader.get_dataset(
utils.data_loaders.DatasetType.TEST, cfg.CONST.N_VIEWS_RENDERING, test_transforms),
batch_size=1,
num_workers=1,
pin_memory=True,
shuffle=False)
# Set up networks
if decoder is None or encoder is None:
encoder = Encoder(cfg)
decoder = Decoder(cfg)
refiner = Refiner(cfg)
merger = Merger(cfg)
if torch.cuda.is_available():
encoder = torch.nn.DataParallel(encoder).cuda()
decoder = torch.nn.DataParallel(decoder).cuda()
refiner = torch.nn.DataParallel(refiner).cuda()
merger = torch.nn.DataParallel(merger).cuda()
print('[INFO] %s Loading weights from %s ...' % (dt.now(), cfg.CONST.WEIGHTS))
checkpoint = torch.load(cfg.CONST.WEIGHTS)
epoch_idx = checkpoint['epoch_idx']
encoder.load_state_dict(checkpoint['encoder_state_dict'])
decoder.load_state_dict(checkpoint['decoder_state_dict'])
if cfg.NETWORK.USE_REFINER:
refiner.load_state_dict(checkpoint['refiner_state_dict'])
if cfg.NETWORK.USE_MERGER:
merger.load_state_dict(checkpoint['merger_state_dict'])
# Set up loss functions
bce_loss = torch.nn.BCELoss()
# Testing loop
n_samples = len(test_data_loader)
test_iou = dict()
encoder_losses = utils.network_utils.AverageMeter()
refiner_losses = utils.network_utils.AverageMeter()
# Switch models to evaluation mode
encoder.eval()
decoder.eval()
refiner.eval()
merger.eval()
for sample_idx, (taxonomy_id, sample_name, rendering_images, ground_truth_volume) in enumerate(test_data_loader):
taxonomy_id = taxonomy_id[0] if isinstance(taxonomy_id[0], str) else taxonomy_id[0].item()
sample_name = sample_name[0]
with torch.no_grad():
# Get data from data loader
rendering_images = utils.network_utils.var_or_cuda(rendering_images)
ground_truth_volume = utils.network_utils.var_or_cuda(ground_truth_volume)
# Test the encoder, decoder, refiner and merger
image_features = encoder(rendering_images)
raw_features, generated_volume = decoder(image_features)
if cfg.NETWORK.USE_MERGER and epoch_idx >= cfg.TRAIN.EPOCH_START_USE_MERGER:
generated_volume = merger(raw_features, generated_volume)
else:
generated_volume = torch.mean(generated_volume, dim=1)
encoder_loss = bce_loss(generated_volume, ground_truth_volume) * 10
if cfg.NETWORK.USE_REFINER and epoch_idx >= cfg.TRAIN.EPOCH_START_USE_REFINER:
generated_volume = refiner(generated_volume)
refiner_loss = bce_loss(generated_volume, ground_truth_volume) * 10
else:
refiner_loss = encoder_loss
# Append loss and accuracy to average metrics
encoder_losses.update(encoder_loss.item())
refiner_losses.update(refiner_loss.item())
# IoU per sample
sample_iou = []
for th in cfg.TEST.VOXEL_THRESH:
_volume = torch.ge(generated_volume, th).float()
intersection = torch.sum(_volume.mul(ground_truth_volume)).float()
union = torch.sum(torch.ge(_volume.add(ground_truth_volume), 1)).float()
sample_iou.append((intersection / union).item())
# IoU per taxonomy
if taxonomy_id not in test_iou:
test_iou[taxonomy_id] = {'n_samples': 0, 'iou': []}
test_iou[taxonomy_id]['n_samples'] += 1
test_iou[taxonomy_id]['iou'].append(sample_iou)
# Append generated volumes to TensorBoard
if output_dir and sample_idx < 3:
img_dir = output_dir % 'images'
# Volume Visualization
gv = generated_volume.cpu().numpy()
rendering_views = utils.binvox_visualization.get_volume_views(gv, os.path.join(img_dir, 'test'),
epoch_idx)
test_writer.add_image('Test Sample#%02d/Volume Reconstructed' % sample_idx, rendering_views, epoch_idx)
gtv = ground_truth_volume.cpu().numpy()
rendering_views = utils.binvox_visualization.get_volume_views(gtv, os.path.join(img_dir, 'test'),
epoch_idx)
test_writer.add_image('Test Sample#%02d/Volume GroundTruth' % sample_idx, rendering_views, epoch_idx)
# Print sample loss and IoU
print('[INFO] %s Test[%d/%d] Taxonomy = %s Sample = %s EDLoss = %.4f RLoss = %.4f IoU = %s' %
(dt.now(), sample_idx + 1, n_samples, taxonomy_id, sample_name, encoder_loss.item(),
refiner_loss.item(), ['%.4f' % si for si in sample_iou]))
# Output testing results
mean_iou = []
for taxonomy_id in test_iou:
test_iou[taxonomy_id]['iou'] = np.mean(test_iou[taxonomy_id]['iou'], axis=0)
mean_iou.append(test_iou[taxonomy_id]['iou'] * test_iou[taxonomy_id]['n_samples'])
mean_iou = np.sum(mean_iou, axis=0) / n_samples
# Print header
print('============================ TEST RESULTS ============================')
print('Taxonomy', end='\t')
print('#Sample', end='\t')
print('Baseline', end='\t')
for th in cfg.TEST.VOXEL_THRESH:
print('t=%.2f' % th, end='\t')
print()
# Print body
for taxonomy_id in test_iou:
print('%s' % taxonomies[taxonomy_id]['taxonomy_name'].ljust(8), end='\t')
print('%d' % test_iou[taxonomy_id]['n_samples'], end='\t')
if 'baseline' in taxonomies[taxonomy_id]:
print('%.4f' % taxonomies[taxonomy_id]['baseline']['%d-view' % cfg.CONST.N_VIEWS_RENDERING], end='\t\t')
else:
print('N/a', end='\t\t')
for ti in test_iou[taxonomy_id]['iou']:
print('%.4f' % ti, end='\t')
print()
# Print mean IoU for each threshold
print('Overall ', end='\t\t\t\t')
for mi in mean_iou:
print('%.4f' % mi, end='\t')
print('\n')
# Add testing results to TensorBoard
max_iou = np.max(mean_iou)
if test_writer is not None:
test_writer.add_scalar('EncoderDecoder/EpochLoss', encoder_losses.avg, epoch_idx)
test_writer.add_scalar('Refiner/EpochLoss', refiner_losses.avg, epoch_idx)
test_writer.add_scalar('Refiner/IoU', max_iou, epoch_idx)
return max_iou
| 42.560976
| 119
| 0.615931
|
import json
import numpy as np
import os
import torch
import torch.backends.cudnn
import torch.utils.data
import utils.binvox_visualization
import utils.data_loaders
import utils.data_transforms
import utils.network_utils
from datetime import datetime as dt
from models.encoder import Encoder
from models.decoder import Decoder
from models.refiner import Refiner
from models.merger import Merger
def test_net(cfg,
epoch_idx=-1,
output_dir=None,
test_data_loader=None,
test_writer=None,
encoder=None,
decoder=None,
refiner=None,
merger=None):
torch.backends.cudnn.benchmark = True
taxonomies = []
with open(cfg.DATASETS[cfg.DATASET.TEST_DATASET.upper()].TAXONOMY_FILE_PATH, encoding='utf-8') as file:
taxonomies = json.loads(file.read())
taxonomies = {t['taxonomy_id']: t for t in taxonomies}
if test_data_loader is None:
IMG_SIZE = cfg.CONST.IMG_H, cfg.CONST.IMG_W
CROP_SIZE = cfg.CONST.CROP_IMG_H, cfg.CONST.CROP_IMG_W
test_transforms = utils.data_transforms.Compose([
utils.data_transforms.CenterCrop(IMG_SIZE, CROP_SIZE),
utils.data_transforms.RandomBackground(cfg.TEST.RANDOM_BG_COLOR_RANGE),
utils.data_transforms.Normalize(mean=cfg.DATASET.MEAN, std=cfg.DATASET.STD),
utils.data_transforms.ToTensor(),
])
dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
test_data_loader = torch.utils.data.DataLoader(dataset=dataset_loader.get_dataset(
utils.data_loaders.DatasetType.TEST, cfg.CONST.N_VIEWS_RENDERING, test_transforms),
batch_size=1,
num_workers=1,
pin_memory=True,
shuffle=False)
if decoder is None or encoder is None:
encoder = Encoder(cfg)
decoder = Decoder(cfg)
refiner = Refiner(cfg)
merger = Merger(cfg)
if torch.cuda.is_available():
encoder = torch.nn.DataParallel(encoder).cuda()
decoder = torch.nn.DataParallel(decoder).cuda()
refiner = torch.nn.DataParallel(refiner).cuda()
merger = torch.nn.DataParallel(merger).cuda()
print('[INFO] %s Loading weights from %s ...' % (dt.now(), cfg.CONST.WEIGHTS))
checkpoint = torch.load(cfg.CONST.WEIGHTS)
epoch_idx = checkpoint['epoch_idx']
encoder.load_state_dict(checkpoint['encoder_state_dict'])
decoder.load_state_dict(checkpoint['decoder_state_dict'])
if cfg.NETWORK.USE_REFINER:
refiner.load_state_dict(checkpoint['refiner_state_dict'])
if cfg.NETWORK.USE_MERGER:
merger.load_state_dict(checkpoint['merger_state_dict'])
bce_loss = torch.nn.BCELoss()
n_samples = len(test_data_loader)
test_iou = dict()
encoder_losses = utils.network_utils.AverageMeter()
refiner_losses = utils.network_utils.AverageMeter()
encoder.eval()
decoder.eval()
refiner.eval()
merger.eval()
for sample_idx, (taxonomy_id, sample_name, rendering_images, ground_truth_volume) in enumerate(test_data_loader):
taxonomy_id = taxonomy_id[0] if isinstance(taxonomy_id[0], str) else taxonomy_id[0].item()
sample_name = sample_name[0]
with torch.no_grad():
rendering_images = utils.network_utils.var_or_cuda(rendering_images)
ground_truth_volume = utils.network_utils.var_or_cuda(ground_truth_volume)
image_features = encoder(rendering_images)
raw_features, generated_volume = decoder(image_features)
if cfg.NETWORK.USE_MERGER and epoch_idx >= cfg.TRAIN.EPOCH_START_USE_MERGER:
generated_volume = merger(raw_features, generated_volume)
else:
generated_volume = torch.mean(generated_volume, dim=1)
encoder_loss = bce_loss(generated_volume, ground_truth_volume) * 10
if cfg.NETWORK.USE_REFINER and epoch_idx >= cfg.TRAIN.EPOCH_START_USE_REFINER:
generated_volume = refiner(generated_volume)
refiner_loss = bce_loss(generated_volume, ground_truth_volume) * 10
else:
refiner_loss = encoder_loss
encoder_losses.update(encoder_loss.item())
refiner_losses.update(refiner_loss.item())
sample_iou = []
for th in cfg.TEST.VOXEL_THRESH:
_volume = torch.ge(generated_volume, th).float()
intersection = torch.sum(_volume.mul(ground_truth_volume)).float()
union = torch.sum(torch.ge(_volume.add(ground_truth_volume), 1)).float()
sample_iou.append((intersection / union).item())
if taxonomy_id not in test_iou:
test_iou[taxonomy_id] = {'n_samples': 0, 'iou': []}
test_iou[taxonomy_id]['n_samples'] += 1
test_iou[taxonomy_id]['iou'].append(sample_iou)
if output_dir and sample_idx < 3:
img_dir = output_dir % 'images'
gv = generated_volume.cpu().numpy()
rendering_views = utils.binvox_visualization.get_volume_views(gv, os.path.join(img_dir, 'test'),
epoch_idx)
test_writer.add_image('Test Sample#%02d/Volume Reconstructed' % sample_idx, rendering_views, epoch_idx)
gtv = ground_truth_volume.cpu().numpy()
rendering_views = utils.binvox_visualization.get_volume_views(gtv, os.path.join(img_dir, 'test'),
epoch_idx)
test_writer.add_image('Test Sample#%02d/Volume GroundTruth' % sample_idx, rendering_views, epoch_idx)
print('[INFO] %s Test[%d/%d] Taxonomy = %s Sample = %s EDLoss = %.4f RLoss = %.4f IoU = %s' %
(dt.now(), sample_idx + 1, n_samples, taxonomy_id, sample_name, encoder_loss.item(),
refiner_loss.item(), ['%.4f' % si for si in sample_iou]))
mean_iou = []
for taxonomy_id in test_iou:
test_iou[taxonomy_id]['iou'] = np.mean(test_iou[taxonomy_id]['iou'], axis=0)
mean_iou.append(test_iou[taxonomy_id]['iou'] * test_iou[taxonomy_id]['n_samples'])
mean_iou = np.sum(mean_iou, axis=0) / n_samples
print('============================ TEST RESULTS ============================')
print('Taxonomy', end='\t')
print('#Sample', end='\t')
print('Baseline', end='\t')
for th in cfg.TEST.VOXEL_THRESH:
print('t=%.2f' % th, end='\t')
print()
for taxonomy_id in test_iou:
print('%s' % taxonomies[taxonomy_id]['taxonomy_name'].ljust(8), end='\t')
print('%d' % test_iou[taxonomy_id]['n_samples'], end='\t')
if 'baseline' in taxonomies[taxonomy_id]:
print('%.4f' % taxonomies[taxonomy_id]['baseline']['%d-view' % cfg.CONST.N_VIEWS_RENDERING], end='\t\t')
else:
print('N/a', end='\t\t')
for ti in test_iou[taxonomy_id]['iou']:
print('%.4f' % ti, end='\t')
print()
print('Overall ', end='\t\t\t\t')
for mi in mean_iou:
print('%.4f' % mi, end='\t')
print('\n')
max_iou = np.max(mean_iou)
if test_writer is not None:
test_writer.add_scalar('EncoderDecoder/EpochLoss', encoder_losses.avg, epoch_idx)
test_writer.add_scalar('Refiner/EpochLoss', refiner_losses.avg, epoch_idx)
test_writer.add_scalar('Refiner/IoU', max_iou, epoch_idx)
return max_iou
| true
| true
|
1c3e3463540fc028f1d93f2a5ec8e2f0d1614f17
| 4,399
|
py
|
Python
|
runners/episode_runner.py
|
gingkg/pymarl
|
b5a72b3ab6c89b4a492f5853c02c1ce3f9189ea4
|
[
"MIT"
] | 3
|
2021-04-11T07:34:11.000Z
|
2022-03-23T08:43:37.000Z
|
runners/episode_runner.py
|
gingkg/pymarl
|
b5a72b3ab6c89b4a492f5853c02c1ce3f9189ea4
|
[
"MIT"
] | null | null | null |
runners/episode_runner.py
|
gingkg/pymarl
|
b5a72b3ab6c89b4a492f5853c02c1ce3f9189ea4
|
[
"MIT"
] | 1
|
2021-05-28T11:26:20.000Z
|
2021-05-28T11:26:20.000Z
|
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
import numpy as np
class EpisodeRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
assert self.batch_size == 1
self.env = env_REGISTRY[self.args.env](**self.args.env_args)
self.episode_limit = self.env.episode_limit
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
# Log the first run
self.log_train_stats_t = -1000000
#
self.new_batch = None
self.mac = None
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
def get_env_info(self):
return self.env.get_env_info()
def save_replay(self):
self.env.save_replay()
def close_env(self):
self.env.close()
def reset(self):
self.batch = self.new_batch()
self.env.reset()
self.t = 0
def run(self, test_mode=False):
self.reset()
terminated = False
episode_return = 0
self.mac.init_hidden(batch_size=self.batch_size)
while not terminated:
pre_transition_data = {
"state": [self.env.get_state()],
"avail_actions": [self.env.get_avail_actions()],
"obs": [self.env.get_obs()]
}
self.batch.update(pre_transition_data, ts=self.t)
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch of size 1
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
reward, terminated, env_info = self.env.step(actions[0])
episode_return += reward
post_transition_data = {
"actions": actions,
"reward": [(reward,)],
"terminated": [(terminated != env_info.get("episode_limit", False),)],
}
self.batch.update(post_transition_data, ts=self.t)
self.t += 1
last_data = {
"state": [self.env.get_state()],
"avail_actions": [self.env.get_avail_actions()],
"obs": [self.env.get_obs()]
}
self.batch.update(last_data, ts=self.t)
# Select actions in the last stored state
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
self.batch.update({"actions": actions}, ts=self.t)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
cur_stats.update({k: cur_stats.get(k, 0) + env_info.get(k, 0) for k in set(cur_stats) | set(env_info)})
cur_stats["n_episodes"] = 1 + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = self.t + cur_stats.get("ep_length", 0)
if not test_mode:
self.t_env += self.t
cur_returns.append(episode_return)
if test_mode and (len(self.test_returns) == self.args.test_nepisode):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
| 34.367188
| 111
| 0.605592
|
from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
import numpy as np
class EpisodeRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
assert self.batch_size == 1
self.env = env_REGISTRY[self.args.env](**self.args.env_args)
self.episode_limit = self.env.episode_limit
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -1000000
self.new_batch = None
self.mac = None
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
def get_env_info(self):
return self.env.get_env_info()
def save_replay(self):
self.env.save_replay()
def close_env(self):
self.env.close()
def reset(self):
self.batch = self.new_batch()
self.env.reset()
self.t = 0
def run(self, test_mode=False):
self.reset()
terminated = False
episode_return = 0
self.mac.init_hidden(batch_size=self.batch_size)
while not terminated:
pre_transition_data = {
"state": [self.env.get_state()],
"avail_actions": [self.env.get_avail_actions()],
"obs": [self.env.get_obs()]
}
self.batch.update(pre_transition_data, ts=self.t)
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
reward, terminated, env_info = self.env.step(actions[0])
episode_return += reward
post_transition_data = {
"actions": actions,
"reward": [(reward,)],
"terminated": [(terminated != env_info.get("episode_limit", False),)],
}
self.batch.update(post_transition_data, ts=self.t)
self.t += 1
last_data = {
"state": [self.env.get_state()],
"avail_actions": [self.env.get_avail_actions()],
"obs": [self.env.get_obs()]
}
self.batch.update(last_data, ts=self.t)
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, test_mode=test_mode)
self.batch.update({"actions": actions}, ts=self.t)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
cur_stats.update({k: cur_stats.get(k, 0) + env_info.get(k, 0) for k in set(cur_stats) | set(env_info)})
cur_stats["n_episodes"] = 1 + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = self.t + cur_stats.get("ep_length", 0)
if not test_mode:
self.t_env += self.t
cur_returns.append(episode_return)
if test_mode and (len(self.test_returns) == self.args.test_nepisode):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
| true
| true
|
1c3e34bfe2cd4e5b3c1b755fca75ab3620ff4d3c
| 1,422
|
py
|
Python
|
xlsxwriter/test/comparison/test_chart_gradient10.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_gradient10.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
xlsxwriter/test/comparison/test_chart_gradient10.py
|
Rippling/XlsxWriter-1
|
be8d1cb8f8b156cf87bbe5d591f1f5475804be44
|
[
"BSD-2-Clause"
] | null | null | null |
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('chart_gradient10.xlsx')
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({'type': 'column'})
chart.axis_ids = [56159232, 61364096]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column('A1', data[0])
worksheet.write_column('B1', data[1])
worksheet.write_column('C1', data[2])
chart.add_series({
'values': '=Sheet1!$A$1:$A$5',
'gradient': {'colors': ['#DDEBCF', '#156B13']}
})
chart.add_series({'values': '=Sheet1!$B$1:$B$5'})
chart.add_series({'values': '=Sheet1!$C$1:$C$5'})
worksheet.insert_chart('E9', chart)
workbook.close()
self.assertExcelEqual()
| 25.392857
| 79
| 0.552743
| true
| true
|
|
1c3e3559ddaf9744115065e974ddb78f69bb6858
| 912
|
py
|
Python
|
example/webservice/module/module.py
|
errord/sputnik
|
b83c635a9a160dcd5809265c0d9d231ade33e5ea
|
[
"BSD-3-Clause"
] | null | null | null |
example/webservice/module/module.py
|
errord/sputnik
|
b83c635a9a160dcd5809265c0d9d231ade33e5ea
|
[
"BSD-3-Clause"
] | null | null | null |
example/webservice/module/module.py
|
errord/sputnik
|
b83c635a9a160dcd5809265c0d9d231ade33e5ea
|
[
"BSD-3-Clause"
] | 1
|
2018-03-04T04:48:44.000Z
|
2018-03-04T04:48:44.000Z
|
#-*- coding: utf-8 -*
#
# Copyright 2011 shuotao.me
# Copyright 2012 2013 2014 msx.com
# by error.d@gmail.com
# 2014-08-26
#
from datetime import datetime
from sputnik.SpuDBObject import SpuDBObject, Field
from sputnik.SpuDateTime import SpuDateTime
class FoodAndPlace(SpuDBObject):
_table_ = 'sputnik.food_and_place'
def __init__(self, spudb, spucache, debug):
SpuDBObject.__init__(self, FoodAndPlace._table_, spudb, spucache, debug = debug)
self.id = Field(int, 0, 8, auto_inc = True)
self.place_id = Field(int, 0, 8)
self.food_id = Field(int, 0, 8)
self.picture_count = Field(int, 0, 4) # 1000
self.comment_total = Field(int, 0, 5) # 10000
self.publish_time = Field(datetime, SpuDateTime.current_time())
self.best_picture_id = Field(int, 0, 8)
self.want_it_total = Field(int, 0, 6)
self.nom_it_total = Field(int, 0, 6)
| 35.076923
| 88
| 0.673246
|
from datetime import datetime
from sputnik.SpuDBObject import SpuDBObject, Field
from sputnik.SpuDateTime import SpuDateTime
class FoodAndPlace(SpuDBObject):
_table_ = 'sputnik.food_and_place'
def __init__(self, spudb, spucache, debug):
SpuDBObject.__init__(self, FoodAndPlace._table_, spudb, spucache, debug = debug)
self.id = Field(int, 0, 8, auto_inc = True)
self.place_id = Field(int, 0, 8)
self.food_id = Field(int, 0, 8)
self.picture_count = Field(int, 0, 4)
self.comment_total = Field(int, 0, 5)
self.publish_time = Field(datetime, SpuDateTime.current_time())
self.best_picture_id = Field(int, 0, 8)
self.want_it_total = Field(int, 0, 6)
self.nom_it_total = Field(int, 0, 6)
| true
| true
|
1c3e35c660ab6db7e356e609d03b3debbcd82e20
| 253
|
py
|
Python
|
randomness/__init__.py
|
jpmolinamatute/randomness
|
a9b24098b912637548ba8e89d1260a082c1da734
|
[
"Apache-2.0"
] | null | null | null |
randomness/__init__.py
|
jpmolinamatute/randomness
|
a9b24098b912637548ba8e89d1260a082c1da734
|
[
"Apache-2.0"
] | null | null | null |
randomness/__init__.py
|
jpmolinamatute/randomness
|
a9b24098b912637548ba8e89d1260a082c1da734
|
[
"Apache-2.0"
] | null | null | null |
# pylint: disable=unused-import
from .db_oauth import OAuth
from .db_library import Library
from .common import TOKEN_URL, str_to_base64, Mark
from .client_aouth import get_access_token, save_access_token
from .client_requests import generate_playlist
| 31.625
| 61
| 0.84585
|
from .db_oauth import OAuth
from .db_library import Library
from .common import TOKEN_URL, str_to_base64, Mark
from .client_aouth import get_access_token, save_access_token
from .client_requests import generate_playlist
| true
| true
|
1c3e365b10d8d6c328efd3f1e795a8fe15bbcc68
| 1,245
|
py
|
Python
|
src/consensus/consensus_message_pb2.py
|
SINTEF-Infosec/sawtooth-consensus-engine-template
|
f5b895f13bcfa94216a5148104b3b1419df643c1
|
[
"MIT"
] | null | null | null |
src/consensus/consensus_message_pb2.py
|
SINTEF-Infosec/sawtooth-consensus-engine-template
|
f5b895f13bcfa94216a5148104b3b1419df643c1
|
[
"MIT"
] | null | null | null |
src/consensus/consensus_message_pb2.py
|
SINTEF-Infosec/sawtooth-consensus-engine-template
|
f5b895f13bcfa94216a5148104b3b1419df643c1
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: consensus_message.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x63onsensus_message.proto\x12\rsawtooth_dpos\"\x12\n\x10\x43onsensusMessageb\x06proto3')
_CONSENSUSMESSAGE = DESCRIPTOR.message_types_by_name['ConsensusMessage']
ConsensusMessage = _reflection.GeneratedProtocolMessageType('ConsensusMessage', (_message.Message,), {
'DESCRIPTOR' : _CONSENSUSMESSAGE,
'__module__' : 'consensus_message_pb2'
# @@protoc_insertion_point(class_scope:sawtooth_dpos.ConsensusMessage)
})
_sym_db.RegisterMessage(ConsensusMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_CONSENSUSMESSAGE._serialized_start=42
_CONSENSUSMESSAGE._serialized_end=60
# @@protoc_insertion_point(module_scope)
| 35.571429
| 155
| 0.818474
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x17\x63onsensus_message.proto\x12\rsawtooth_dpos\"\x12\n\x10\x43onsensusMessageb\x06proto3')
_CONSENSUSMESSAGE = DESCRIPTOR.message_types_by_name['ConsensusMessage']
ConsensusMessage = _reflection.GeneratedProtocolMessageType('ConsensusMessage', (_message.Message,), {
'DESCRIPTOR' : _CONSENSUSMESSAGE,
'__module__' : 'consensus_message_pb2'
# @@protoc_insertion_point(class_scope:sawtooth_dpos.ConsensusMessage)
})
_sym_db.RegisterMessage(ConsensusMessage)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
_CONSENSUSMESSAGE._serialized_start=42
_CONSENSUSMESSAGE._serialized_end=60
# @@protoc_insertion_point(module_scope)
| true
| true
|
1c3e393d8d84c64a7dbeda497a5fda44ee5664af
| 3,201
|
py
|
Python
|
plot/dataio.py
|
psFournier/rltf
|
aae5451415dc18deda3c0c84580df42a12dc3843
|
[
"MIT"
] | null | null | null |
plot/dataio.py
|
psFournier/rltf
|
aae5451415dc18deda3c0c84580df42a12dc3843
|
[
"MIT"
] | null | null | null |
plot/dataio.py
|
psFournier/rltf
|
aae5451415dc18deda3c0c84580df42a12dc3843
|
[
"MIT"
] | null | null | null |
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import tabulate
CODE_DIR = os.path.abspath(os.path.dirname(__file__))
CONF_DIR = os.path.join(CODE_DIR, "conf")
def save_scores(scores, file, args):
"""Write scores in table format to a .txt file and to a .tex file (in latex format)
Args:
scores: dict
file: str. Does not contain the extension
args: ArgumentParser. The command-line arguments
"""
envs = sorted(scores.keys())
labels = [label for label in args.conf["legend"]]
csvdata = []
texdata = []
for env in envs:
data = [scores[env].get(label, -float("inf")) for label in labels]
csvdata.append([env] + data)
if args.boldmax:
best = max(data)
data = ["{:,.1f}".format(score) if score != best else "\\textbf{{{:,.1f}}}".format(score) for score in data]
texdata.append([env] + data)
csvtable = tabulate.tabulate(csvdata, headers=labels, floatfmt=".1f", tablefmt="presto")
textable = tabulate.tabulate(texdata, headers=labels, floatfmt=".1f", tablefmt="latex_raw")
with open(file + ".txt", 'w') as f:
f.write(csvtable)
with open(file + ".tex", 'w') as f:
f.write(textable)
def get_model_props(conf, model):
props = conf["legend"][model]
return props["label"], props["color"]
def get_model_name(model_dir):
s = model_dir.find("/")
name = model_dir[:s]
return name
def get_env_name(model_dir):
"""
Args:
model_dir: str. Will be in the format model-name/env-name_run-date and might end in "/"
Return:
str with the env name as it appears in gym
"""
len_date = 20
if model_dir[-1] == "/":
len_date += 1
env = model_dir[:-20]
s = env.find("/")
env = env[s+1:]
s = env.find("NoFrameskip")
if s > 0:
env = env[:s]
else:
s = env.find("-v")
env = env[:s]
return env
def get_model_dir(model, args):
return os.path.join(args.conf["root_dir"], model)
def read_conf(file):
file = os.path.join(CONF_DIR, file)
if not os.path.exists(file):
raise ValueError("Configuration file does not exist")
with open(file, 'r') as f:
# conf = json.load(f)
conf = json.load(f, object_pairs_hook=OrderedDict)
assert "legend" in conf
assert "root_dir" in conf
assert os.path.exists(conf["root_dir"])
for label, props in conf["legend"].items():
assert "models" in props
assert "color" in props
return conf
def write_tb_file(tb_dir, steps, data):
"""
Args:
tb_dir: str. Directory where the file should be opened
steps: list. List of the event time steps
data: dict. Every key is a tag and every value is a list of the data for the tag. The length of
the list must equal the length of steps
"""
# Check for correctness
for tag, vals in data.items():
assert tag.startswith("train/") or tag.startswith("eval/")
assert len(steps) == len(vals)
# import tensorflow as tf
writer = tf.summary.FileWriter(tb_dir)
for i, s in enumerate(steps):
summary = tf.Summary()
for tag, vals in data.items():
summary.value.add(tag=tag, simple_value=vals[i])
writer.add_summary(summary, global_step=s)
writer.flush()
writer.close()
| 26.454545
| 114
| 0.657295
|
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import tabulate
CODE_DIR = os.path.abspath(os.path.dirname(__file__))
CONF_DIR = os.path.join(CODE_DIR, "conf")
def save_scores(scores, file, args):
envs = sorted(scores.keys())
labels = [label for label in args.conf["legend"]]
csvdata = []
texdata = []
for env in envs:
data = [scores[env].get(label, -float("inf")) for label in labels]
csvdata.append([env] + data)
if args.boldmax:
best = max(data)
data = ["{:,.1f}".format(score) if score != best else "\\textbf{{{:,.1f}}}".format(score) for score in data]
texdata.append([env] + data)
csvtable = tabulate.tabulate(csvdata, headers=labels, floatfmt=".1f", tablefmt="presto")
textable = tabulate.tabulate(texdata, headers=labels, floatfmt=".1f", tablefmt="latex_raw")
with open(file + ".txt", 'w') as f:
f.write(csvtable)
with open(file + ".tex", 'w') as f:
f.write(textable)
def get_model_props(conf, model):
props = conf["legend"][model]
return props["label"], props["color"]
def get_model_name(model_dir):
s = model_dir.find("/")
name = model_dir[:s]
return name
def get_env_name(model_dir):
len_date = 20
if model_dir[-1] == "/":
len_date += 1
env = model_dir[:-20]
s = env.find("/")
env = env[s+1:]
s = env.find("NoFrameskip")
if s > 0:
env = env[:s]
else:
s = env.find("-v")
env = env[:s]
return env
def get_model_dir(model, args):
return os.path.join(args.conf["root_dir"], model)
def read_conf(file):
file = os.path.join(CONF_DIR, file)
if not os.path.exists(file):
raise ValueError("Configuration file does not exist")
with open(file, 'r') as f:
conf = json.load(f, object_pairs_hook=OrderedDict)
assert "legend" in conf
assert "root_dir" in conf
assert os.path.exists(conf["root_dir"])
for label, props in conf["legend"].items():
assert "models" in props
assert "color" in props
return conf
def write_tb_file(tb_dir, steps, data):
for tag, vals in data.items():
assert tag.startswith("train/") or tag.startswith("eval/")
assert len(steps) == len(vals)
writer = tf.summary.FileWriter(tb_dir)
for i, s in enumerate(steps):
summary = tf.Summary()
for tag, vals in data.items():
summary.value.add(tag=tag, simple_value=vals[i])
writer.add_summary(summary, global_step=s)
writer.flush()
writer.close()
| true
| true
|
1c3e39601e53da411311267ee7a86cb6a1474cd3
| 891
|
py
|
Python
|
vocalkiev/urls.py
|
CATALINA-DJAGER/vocalkiev-crm-django
|
69d1491a7f94dd9943c9204ac15e8a6ca2a1a3b0
|
[
"MIT"
] | null | null | null |
vocalkiev/urls.py
|
CATALINA-DJAGER/vocalkiev-crm-django
|
69d1491a7f94dd9943c9204ac15e8a6ca2a1a3b0
|
[
"MIT"
] | 1
|
2021-12-02T06:13:15.000Z
|
2021-12-02T06:13:15.000Z
|
vocalkiev/urls.py
|
CATALINA-DJAGER/vocalkiev-crm-django
|
69d1491a7f94dd9943c9204ac15e8a6ca2a1a3b0
|
[
"MIT"
] | 1
|
2021-12-02T16:08:44.000Z
|
2021-12-02T16:08:44.000Z
|
"""vocalkiev URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.i18n import i18n_patterns
urlpatterns = [
]
urlpatterns += i18n_patterns(
path('', include('crm.urls')),
path('admin', admin.site.urls), # admin panel
)
| 31.821429
| 77
| 0.710438
|
from django.contrib import admin
from django.urls import include, path
from django.conf.urls.i18n import i18n_patterns
urlpatterns = [
]
urlpatterns += i18n_patterns(
path('', include('crm.urls')),
path('admin', admin.site.urls),
)
| true
| true
|
1c3e39edd4fba2c79db95c44ce2cbc3db03c56cf
| 122
|
py
|
Python
|
credentials.py
|
Suraj1127/facebook-crawler
|
5f61a30127c3583d19c2f63dc871ae95705a36f7
|
[
"MIT"
] | null | null | null |
credentials.py
|
Suraj1127/facebook-crawler
|
5f61a30127c3583d19c2f63dc871ae95705a36f7
|
[
"MIT"
] | null | null | null |
credentials.py
|
Suraj1127/facebook-crawler
|
5f61a30127c3583d19c2f63dc871ae95705a36f7
|
[
"MIT"
] | null | null | null |
"""
Contains credentials, Email or Phone and Password
"""
# enter your credentials here
EMAIL_OR_PHONE = ''
PASSWORD = ''
| 17.428571
| 49
| 0.721311
|
EMAIL_OR_PHONE = ''
PASSWORD = ''
| true
| true
|
1c3e3b78b0a80f991205c4899e210a194da71819
| 589
|
py
|
Python
|
testerlib/models/suite_code.py
|
mnaumanali94/PYTHON-SDK
|
97eceab462d86b8666ff1f74830d30cae5202a35
|
[
"MIT"
] | null | null | null |
testerlib/models/suite_code.py
|
mnaumanali94/PYTHON-SDK
|
97eceab462d86b8666ff1f74830d30cae5202a35
|
[
"MIT"
] | null | null | null |
testerlib/models/suite_code.py
|
mnaumanali94/PYTHON-SDK
|
97eceab462d86b8666ff1f74830d30cae5202a35
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
testerlib.models.suite_code
This file was automatically generated for Stamplay by APIMATIC v2.0 ( https://apimatic.io ) on 08/03/2016
"""
class SuiteCode(object):
"""Implementation of the 'SuiteCode' enum.
A integer based enum representing a Suite in a game of cards
Attributes:
HEARTS: TODO: type description here.
SPADES: TODO: type description here.
CLUBS: TODO: type description here.
DIAMONDS: TODO: type description here.
"""
HEARTS = 1
SPADES = 2
CLUBS = 3
DIAMONDS = 4
| 19
| 109
| 0.634975
|
class SuiteCode(object):
HEARTS = 1
SPADES = 2
CLUBS = 3
DIAMONDS = 4
| true
| true
|
1c3e3c2b893f760a44c50c9da620ef79ee4dd129
| 6,894
|
py
|
Python
|
src/ctf_gameserver/checker/metrics.py
|
flagbot/ctf-gameserver
|
cb59363ce93e8cb80bac03da4f150db6f12051aa
|
[
"ISC"
] | 30
|
2016-11-14T23:26:52.000Z
|
2022-02-23T02:06:40.000Z
|
src/ctf_gameserver/checker/metrics.py
|
flagbot/ctf-gameserver
|
cb59363ce93e8cb80bac03da4f150db6f12051aa
|
[
"ISC"
] | 64
|
2017-04-28T21:19:01.000Z
|
2021-06-12T16:40:29.000Z
|
src/ctf_gameserver/checker/metrics.py
|
flagbot/ctf-gameserver
|
cb59363ce93e8cb80bac03da4f150db6f12051aa
|
[
"ISC"
] | 25
|
2016-11-16T19:37:31.000Z
|
2022-02-23T02:06:22.000Z
|
import logging
import queue
from wsgiref import simple_server
import prometheus_client
from ctf_gameserver.lib.metrics import SilentHandler
def inc(metrics_queue, name, value=1, labels=None):
metrics_queue.put(MetricsMessage(name, 'inc', value, labels))
def dec(metrics_queue, name, value=1, labels=None):
metrics_queue.put(MetricsMessage(name, 'dec', value, labels))
def set(metrics_queue, name, value, labels=None): # pylint: disable=redefined-builtin
metrics_queue.put(MetricsMessage(name, 'set', value, labels))
def observe(metrics_queue, name, value, labels=None):
metrics_queue.put(MetricsMessage(name, 'observe', value, labels))
class MetricsMessage:
"""
Message to put into run_collector()'s queue for recording metric changes.
"""
def __init__(self, name, instruction, value, labels=None):
self.name = name
self.instruction = instruction
self.value = value
if labels is None:
self.labels = {}
else:
self.labels = labels
class HTTPGenMessage:
"""
Message to put into run_collector()'s queue for receiving a text representation of its metrics (for HTTP
export) through its pipe.
"""
def checker_metrics_factory(registry):
metrics = {}
metric_prefix = 'ctf_checkermaster_'
counters = [
('started_tasks', 'Number of started Checker Script instances', []),
('completed_tasks', 'Number of successfully completed checks', ['result']),
('terminated_tasks', 'Number of Checker Script instances forcibly terminated', [])
]
for name, doc, labels in counters:
metrics[name] = prometheus_client.Counter(metric_prefix+name, doc, labels+['service'],
registry=registry)
gauges = [
('start_timestamp', '(Unix timestamp when the process was started', []),
('interval_length_seconds', 'Configured launch interval length', []),
('last_launch_timestamp', '(Unix) timestamp when tasks were launched the last time', []),
('tasks_per_launch_count', 'Number of checks to start in one launch interval', []),
('max_task_duration_seconds', 'Currently estimated maximum runtime of one check', [])
]
for name, doc, labels in gauges:
metrics[name] = prometheus_client.Gauge(metric_prefix+name, doc, labels+['service'],
registry=registry)
histograms = [
('task_launch_delay_seconds', 'Differences between supposed and actual task launch times', [],
(0.01, 0.03, 0.05, 0.1, 0.3, 0.5, 1, 3, 5, 10, 30, 60, float('inf'))),
('script_duration_seconds', 'Observed runtimes of Checker Scripts', [],
(1, 3, 5, 8, 10, 20, 30, 45, 60, 90, 120, 150, 180, 240, 300, float('inf')))
]
for name, doc, labels, buckets in histograms:
metrics[name] = prometheus_client.Histogram(metric_prefix+name, doc, labels+['service'],
buckets=buckets, registry=registry)
return metrics
def run_collector(service, metrics_factory, in_queue, pipe_to_server):
"""
Manages Prometheus metrics. Receives changes to the metrics through a queue and emits their text
representation (for HTTP export) over a pipe. Designed to be run as "target" in a multiprocessing.Process
in conjunction with run_http_server().
Args:
service: Slug of this checker instance's service.
metrics_factory: Callable returning a dict of the mtrics to use mapping from name to Metric object.
in_queue: Queue over which MetricsMessages and HTTPGenMessages are received.
pipe_to_server: Pipe to which text representations of the metrics are sent in response to
HTTPGenMessages.
"""
registry = prometheus_client.CollectorRegistry()
metrics = metrics_factory(registry)
def handle_metrics_message(msg):
try:
metric = metrics[msg.name]
except KeyError:
logging.error('Recevied message for unknown metric "%s", ignoring', msg.name)
return
# Apparently, there is no nicer way to access the label names
if 'service' in metric._labelnames: # pylint: disable=protected-access
msg.labels['service'] = service
if len(msg.labels) > 0:
try:
metric = metric.labels(**(msg.labels))
except ValueError:
logging.error('Invalid labels specified for metric "%s", ignoring', msg.name)
return
try:
bound_method = getattr(metric, msg.instruction)
except AttributeError:
logging.error('Cannot use instruction "%s" on metric "%s", ignoring', msg.instruction, msg.name)
return
try:
bound_method(msg.value)
except: # noqa, pylint: disable=bare-except
logging.exception('Could not update metric "%s":', msg.name)
def send_metrics_text():
metrics_text = prometheus_client.generate_latest(registry)
pipe_to_server.send(metrics_text)
while True:
message = in_queue.get(True)
if isinstance(message, MetricsMessage):
handle_metrics_message(message)
elif isinstance(message, HTTPGenMessage):
send_metrics_text()
else:
logging.error('Received unknown message on collector queue')
def run_http_server(host, port, family, queue_to_collector, pipe_from_collector):
"""
Runs a server exposing Prometheus metrics via HTTP. The metrics are requested through a HTTPGenMessage
and received over the pipe. Designed to be run as "target" in a multiprocessing.Process in conjunction
with run_collector().
Args:
host: Host to run the HTTP server on.
port: Port to run the HTTP server on.
family: Address family to run the HTTP server with.
queue_to_collector: Queue to which HTTPGenMessages are sent.
pipe_from_collector: Pipe from which text representations of the metrics are received.
"""
def app(_, start_response):
queue_to_collector.put(HTTPGenMessage())
output = pipe_from_collector.recv()
status = '200 OK'
headers = [
('Content-Type', prometheus_client.CONTENT_TYPE_LATEST)
]
start_response(status, headers)
return [output]
class FamilyServer(simple_server.WSGIServer):
address_family = family
http_server = simple_server.make_server(host, port, app, server_class=FamilyServer,
handler_class=SilentHandler)
http_server.serve_forever()
class DummyQueue(queue.Queue):
"""
Queue that discards all elements put into it.
"""
def put(self, item, block=True, timeout=None):
pass
| 36.47619
| 109
| 0.6481
|
import logging
import queue
from wsgiref import simple_server
import prometheus_client
from ctf_gameserver.lib.metrics import SilentHandler
def inc(metrics_queue, name, value=1, labels=None):
metrics_queue.put(MetricsMessage(name, 'inc', value, labels))
def dec(metrics_queue, name, value=1, labels=None):
metrics_queue.put(MetricsMessage(name, 'dec', value, labels))
def set(metrics_queue, name, value, labels=None):
metrics_queue.put(MetricsMessage(name, 'set', value, labels))
def observe(metrics_queue, name, value, labels=None):
metrics_queue.put(MetricsMessage(name, 'observe', value, labels))
class MetricsMessage:
def __init__(self, name, instruction, value, labels=None):
self.name = name
self.instruction = instruction
self.value = value
if labels is None:
self.labels = {}
else:
self.labels = labels
class HTTPGenMessage:
def checker_metrics_factory(registry):
metrics = {}
metric_prefix = 'ctf_checkermaster_'
counters = [
('started_tasks', 'Number of started Checker Script instances', []),
('completed_tasks', 'Number of successfully completed checks', ['result']),
('terminated_tasks', 'Number of Checker Script instances forcibly terminated', [])
]
for name, doc, labels in counters:
metrics[name] = prometheus_client.Counter(metric_prefix+name, doc, labels+['service'],
registry=registry)
gauges = [
('start_timestamp', '(Unix timestamp when the process was started', []),
('interval_length_seconds', 'Configured launch interval length', []),
('last_launch_timestamp', '(Unix) timestamp when tasks were launched the last time', []),
('tasks_per_launch_count', 'Number of checks to start in one launch interval', []),
('max_task_duration_seconds', 'Currently estimated maximum runtime of one check', [])
]
for name, doc, labels in gauges:
metrics[name] = prometheus_client.Gauge(metric_prefix+name, doc, labels+['service'],
registry=registry)
histograms = [
('task_launch_delay_seconds', 'Differences between supposed and actual task launch times', [],
(0.01, 0.03, 0.05, 0.1, 0.3, 0.5, 1, 3, 5, 10, 30, 60, float('inf'))),
('script_duration_seconds', 'Observed runtimes of Checker Scripts', [],
(1, 3, 5, 8, 10, 20, 30, 45, 60, 90, 120, 150, 180, 240, 300, float('inf')))
]
for name, doc, labels, buckets in histograms:
metrics[name] = prometheus_client.Histogram(metric_prefix+name, doc, labels+['service'],
buckets=buckets, registry=registry)
return metrics
def run_collector(service, metrics_factory, in_queue, pipe_to_server):
registry = prometheus_client.CollectorRegistry()
metrics = metrics_factory(registry)
def handle_metrics_message(msg):
try:
metric = metrics[msg.name]
except KeyError:
logging.error('Recevied message for unknown metric "%s", ignoring', msg.name)
return
if 'service' in metric._labelnames:
msg.labels['service'] = service
if len(msg.labels) > 0:
try:
metric = metric.labels(**(msg.labels))
except ValueError:
logging.error('Invalid labels specified for metric "%s", ignoring', msg.name)
return
try:
bound_method = getattr(metric, msg.instruction)
except AttributeError:
logging.error('Cannot use instruction "%s" on metric "%s", ignoring', msg.instruction, msg.name)
return
try:
bound_method(msg.value)
except:
logging.exception('Could not update metric "%s":', msg.name)
def send_metrics_text():
metrics_text = prometheus_client.generate_latest(registry)
pipe_to_server.send(metrics_text)
while True:
message = in_queue.get(True)
if isinstance(message, MetricsMessage):
handle_metrics_message(message)
elif isinstance(message, HTTPGenMessage):
send_metrics_text()
else:
logging.error('Received unknown message on collector queue')
def run_http_server(host, port, family, queue_to_collector, pipe_from_collector):
def app(_, start_response):
queue_to_collector.put(HTTPGenMessage())
output = pipe_from_collector.recv()
status = '200 OK'
headers = [
('Content-Type', prometheus_client.CONTENT_TYPE_LATEST)
]
start_response(status, headers)
return [output]
class FamilyServer(simple_server.WSGIServer):
address_family = family
http_server = simple_server.make_server(host, port, app, server_class=FamilyServer,
handler_class=SilentHandler)
http_server.serve_forever()
class DummyQueue(queue.Queue):
def put(self, item, block=True, timeout=None):
pass
| true
| true
|
1c3e3c5371505395b1b5ede79b55396e902e1f0b
| 348
|
py
|
Python
|
cctbx/sgtbx/direct_space_asu/proto/__init__.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 155
|
2016-11-23T12:52:16.000Z
|
2022-03-31T15:35:44.000Z
|
cctbx/sgtbx/direct_space_asu/proto/__init__.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 590
|
2016-12-10T11:31:18.000Z
|
2022-03-30T23:10:09.000Z
|
cctbx/sgtbx/direct_space_asu/proto/__init__.py
|
dperl-sol/cctbx_project
|
b9e390221a2bc4fd00b9122e97c3b79c632c6664
|
[
"BSD-3-Clause-LBNL"
] | 115
|
2016-11-15T08:17:28.000Z
|
2022-02-09T15:30:14.000Z
|
from __future__ import absolute_import, division, print_function
import sys
import boost_adaptbx.boost.python as bp
ext = bp.import_ext("cctbx_sgtbx_asu_ext")
from cctbx_sgtbx_asu_ext import *
def asu_show_(asu, f=None):
if f is None:
f = sys.stdout
print(asu.as_string(), file=f)
direct_space_asu.show_comprehensive_summary = asu_show_
| 24.857143
| 64
| 0.79023
|
from __future__ import absolute_import, division, print_function
import sys
import boost_adaptbx.boost.python as bp
ext = bp.import_ext("cctbx_sgtbx_asu_ext")
from cctbx_sgtbx_asu_ext import *
def asu_show_(asu, f=None):
if f is None:
f = sys.stdout
print(asu.as_string(), file=f)
direct_space_asu.show_comprehensive_summary = asu_show_
| true
| true
|
1c3e3c5ad328772370c7da8c0fc0264690bcf649
| 8,357
|
py
|
Python
|
tests/test_keycache.py
|
deesto/scitokens
|
2eaa31c052093389fc090a89de32afc131c486ee
|
[
"Apache-2.0"
] | null | null | null |
tests/test_keycache.py
|
deesto/scitokens
|
2eaa31c052093389fc090a89de32afc131c486ee
|
[
"Apache-2.0"
] | null | null | null |
tests/test_keycache.py
|
deesto/scitokens
|
2eaa31c052093389fc090a89de32afc131c486ee
|
[
"Apache-2.0"
] | null | null | null |
"""
Test the keycache
"""
import os
import tempfile
import shutil
import unittest
from unittest import mock
from scitokens.utils.keycache import KeyCache
from scitokens.utils.errors import UnableToCreateCache
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
# Python 3 vs. Python 2
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
import create_webserver
class TestKeyCache(unittest.TestCase):
"""
Test the creation of a simple SciToken
"""
def setUp(self):
# Force the keycache to create a cache in a new directory
self.tmp_dir = tempfile.mkdtemp()
self.old_xdg = os.environ.get('XDG_CACHE_HOME', None)
os.environ['XDG_CACHE_HOME'] = self.tmp_dir
# Clear the cache
self.keycache = KeyCache()
# make sure it made the directory where I wanted it
self.assertTrue(self.keycache.cache_location.startswith(self.tmp_dir))
self.assertTrue(os.path.exists(self.keycache.cache_location))
def tearDown(self):
shutil.rmtree(self.tmp_dir)
if self.old_xdg:
os.environ['XDG_CACHE_HOME'] = self.old_xdg
@mock.patch("os.makedirs", side_effect=OSError)
@mock.patch.dict("os.environ")
def test_cannot_make_cache(self, _):
"""
Test when the keycache shouldn't be able to make the cache
"""
os.environ['XDG_CACHE_HOME'] = "/does/not/exists"
# Make sure it raises an unable to create cache exception
with self.assertRaises(UnableToCreateCache):
keycache = KeyCache()
del keycache
def test_empty(self):
"""
Test when the keycache should be empty
"""
# Stand up an HTTP server
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_numbers = private_key.public_key().public_numbers()
test_id = "thisisatestid"
server_address = create_webserver.start_server(public_numbers.n, public_numbers.e, test_id)
print(server_address)
# Now try to get the public key from the server
pubkey_from_keycache = self.keycache.getkeyinfo("http://localhost:{}/".format(server_address[1]),
test_id,
insecure=True)
# Now compare the 2 public keys
public_pem = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
pubkey_pem_from_keycache = pubkey_from_keycache.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, pubkey_pem_from_keycache)
create_webserver.shutdown_server()
def test_populated(self):
"""
Test when there should be some entries populated in the sqllite DB
"""
# Create a pem encoded public key
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.keycache.addkeyinfo("https://doesnotexists.edu/", "blahstuff", public_key, cache_timer=60)
# Now extract the just inserted key
pubkey = self.keycache.getkeyinfo("https://doesnotexists.edu/", "blahstuff")
public_pem2 = pubkey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, public_pem2)
# Make sure it errors with urlerror when it should not exist
with self.assertRaises(URLError):
self.keycache.getkeyinfo("https://doesnotexists.edu/", "asdf")
def test_cache_timer(self):
"""
Test if the cache max-age is retrieved from the HTTPS resource
"""
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_numbers = private_key.public_key().public_numbers()
test_id = "thisisatestid"
server_address = create_webserver.start_server(public_numbers.n, public_numbers.e, test_id)
print(server_address)
_, cache_timer = self.keycache._get_issuer_publickey("http://localhost:{}/".format(server_address[1]),
key_id=test_id,
insecure=True)
self.assertEqual(cache_timer, 3600)
create_webserver.shutdown_server()
def test_cache_update_time(self):
"""
Test if the cache next_update works
"""
# Create a pem encoded public key
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.keycache.addkeyinfo("https://doesnotexists.edu/", "blahstuff", public_key, cache_timer=60, next_update=-1)
# Even though the cache is still valid, the next update is triggered
# We should still get the key, even though the next update fails
# (invalid url)
pubkey = self.keycache.getkeyinfo("https://doesnotexists.edu/", "blahstuff")
public_pem2 = pubkey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, public_pem2)
def test_cache_update_trigger(self):
"""
Test when the next_update triggers and goes to the webserver
"""
# Stand up an HTTP server
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_numbers = private_key.public_key().public_numbers()
test_id = "thisisatestid"
server_address = create_webserver.start_server(public_numbers.n, public_numbers.e, test_id)
print(server_address)
# Create a pem encoded public key, just to insert, want to make sure
# it downloads from the server
tmp_private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = tmp_private_key.public_key()
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
# Now try to get the public key from the server
self.keycache.addkeyinfo("http://localhost:{}/".format(server_address[1]),
test_id,
public_key,
cache_timer=60,
next_update=-1)
# Next update should trigger now
pubkey_from_keycache = self.keycache.getkeyinfo("http://localhost:{}/".format(server_address[1]),
test_id,
insecure=True)
# Now compare the 2 public keys
public_pem = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
pubkey_pem_from_keycache = pubkey_from_keycache.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, pubkey_pem_from_keycache)
create_webserver.shutdown_server()
| 35.411017
| 119
| 0.637669
|
import os
import tempfile
import shutil
import unittest
from unittest import mock
from scitokens.utils.keycache import KeyCache
from scitokens.utils.errors import UnableToCreateCache
from cryptography.hazmat.primitives.asymmetric.rsa import generate_private_key
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
try:
from urllib.error import URLError
except ImportError:
from urllib2 import URLError
import create_webserver
class TestKeyCache(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
self.old_xdg = os.environ.get('XDG_CACHE_HOME', None)
os.environ['XDG_CACHE_HOME'] = self.tmp_dir
self.keycache = KeyCache()
self.assertTrue(self.keycache.cache_location.startswith(self.tmp_dir))
self.assertTrue(os.path.exists(self.keycache.cache_location))
def tearDown(self):
shutil.rmtree(self.tmp_dir)
if self.old_xdg:
os.environ['XDG_CACHE_HOME'] = self.old_xdg
@mock.patch("os.makedirs", side_effect=OSError)
@mock.patch.dict("os.environ")
def test_cannot_make_cache(self, _):
os.environ['XDG_CACHE_HOME'] = "/does/not/exists"
with self.assertRaises(UnableToCreateCache):
keycache = KeyCache()
del keycache
def test_empty(self):
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_numbers = private_key.public_key().public_numbers()
test_id = "thisisatestid"
server_address = create_webserver.start_server(public_numbers.n, public_numbers.e, test_id)
print(server_address)
pubkey_from_keycache = self.keycache.getkeyinfo("http://localhost:{}/".format(server_address[1]),
test_id,
insecure=True)
public_pem = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
pubkey_pem_from_keycache = pubkey_from_keycache.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, pubkey_pem_from_keycache)
create_webserver.shutdown_server()
def test_populated(self):
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.keycache.addkeyinfo("https://doesnotexists.edu/", "blahstuff", public_key, cache_timer=60)
pubkey = self.keycache.getkeyinfo("https://doesnotexists.edu/", "blahstuff")
public_pem2 = pubkey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, public_pem2)
with self.assertRaises(URLError):
self.keycache.getkeyinfo("https://doesnotexists.edu/", "asdf")
def test_cache_timer(self):
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_numbers = private_key.public_key().public_numbers()
test_id = "thisisatestid"
server_address = create_webserver.start_server(public_numbers.n, public_numbers.e, test_id)
print(server_address)
_, cache_timer = self.keycache._get_issuer_publickey("http://localhost:{}/".format(server_address[1]),
key_id=test_id,
insecure=True)
self.assertEqual(cache_timer, 3600)
create_webserver.shutdown_server()
def test_cache_update_time(self):
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = private_key.public_key()
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.keycache.addkeyinfo("https://doesnotexists.edu/", "blahstuff", public_key, cache_timer=60, next_update=-1)
pubkey = self.keycache.getkeyinfo("https://doesnotexists.edu/", "blahstuff")
public_pem2 = pubkey.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, public_pem2)
def test_cache_update_trigger(self):
private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_numbers = private_key.public_key().public_numbers()
test_id = "thisisatestid"
server_address = create_webserver.start_server(public_numbers.n, public_numbers.e, test_id)
print(server_address)
tmp_private_key = generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()
)
public_key = tmp_private_key.public_key()
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.keycache.addkeyinfo("http://localhost:{}/".format(server_address[1]),
test_id,
public_key,
cache_timer=60,
next_update=-1)
pubkey_from_keycache = self.keycache.getkeyinfo("http://localhost:{}/".format(server_address[1]),
test_id,
insecure=True)
public_pem = private_key.public_key().public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
pubkey_pem_from_keycache = pubkey_from_keycache.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
self.assertEqual(public_pem, pubkey_pem_from_keycache)
create_webserver.shutdown_server()
| true
| true
|
1c3e3dc3cd371984b5da1866b6293f75fd8c2b20
| 351
|
py
|
Python
|
dashboard/urls.py
|
JohnRoach/beat-desk
|
743e00bed954dbaada3c6e664386c23bc3c35393
|
[
"MIT"
] | 1
|
2015-12-30T22:03:42.000Z
|
2015-12-30T22:03:42.000Z
|
dashboard/urls.py
|
JohnRoach/beat-desk
|
743e00bed954dbaada3c6e664386c23bc3c35393
|
[
"MIT"
] | null | null | null |
dashboard/urls.py
|
JohnRoach/beat-desk
|
743e00bed954dbaada3c6e664386c23bc3c35393
|
[
"MIT"
] | null | null | null |
from . import views
from django.conf.urls import url
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'posts$', views.posts, name='posts'),
url(r'posts/post/(?P<post_id>[0-9]+)/$', views.post, name='post'),
url(r'logout$', views.logout_user, name='logout_user'),
url(r'login$', views.login_user, name="login_user"),
]
| 29.25
| 70
| 0.635328
|
from . import views
from django.conf.urls import url
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'posts$', views.posts, name='posts'),
url(r'posts/post/(?P<post_id>[0-9]+)/$', views.post, name='post'),
url(r'logout$', views.logout_user, name='logout_user'),
url(r'login$', views.login_user, name="login_user"),
]
| true
| true
|
1c3e3e2f5dec30762f5afd5a04fa89772914f997
| 1,128
|
py
|
Python
|
hpc-historias-clinicas/medicos/views.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/medicos/views.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
hpc-historias-clinicas/medicos/views.py
|
btenaglia/hpc-historias-clinicas
|
649d8660381381b1c591667760c122d73071d5ec
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.contrib import messages
from braces.views import LoginRequiredMixin
from django.views.generic import (
ListView,
CreateView,
UpdateView,
DeleteView
)
from .models import Medicos
class MedicosMixin(object):
@property
def success_msg(self):
return NotImplemented
def get_success_url(self):
messages.success(self.request, self.success_msg)
return super(MedicosMixin, self).get_success_url()
class MedicosListView(LoginRequiredMixin, ListView):
"""
Lista todos los medicos
"""
model = Medicos
class MedicosCreateView(LoginRequiredMixin, MedicosMixin, CreateView):
"""
Creacion de medico
"""
model = Medicos
success_msg = 'El médico se agregó correctamente.'
class MedicosUpdateView(LoginRequiredMixin, MedicosMixin, UpdateView):
"""
Modificacion de un medico
"""
model = Medicos
success_msg = 'El médico se editó correctamente.'
class MedicosDeleteView(LoginRequiredMixin, DeleteView):
"""
Eliminar un medico
"""
model = Medicos
success_url = '/medicos/'
| 20.888889
| 70
| 0.693262
|
from django.contrib import messages
from braces.views import LoginRequiredMixin
from django.views.generic import (
ListView,
CreateView,
UpdateView,
DeleteView
)
from .models import Medicos
class MedicosMixin(object):
@property
def success_msg(self):
return NotImplemented
def get_success_url(self):
messages.success(self.request, self.success_msg)
return super(MedicosMixin, self).get_success_url()
class MedicosListView(LoginRequiredMixin, ListView):
model = Medicos
class MedicosCreateView(LoginRequiredMixin, MedicosMixin, CreateView):
model = Medicos
success_msg = 'El médico se agregó correctamente.'
class MedicosUpdateView(LoginRequiredMixin, MedicosMixin, UpdateView):
model = Medicos
success_msg = 'El médico se editó correctamente.'
class MedicosDeleteView(LoginRequiredMixin, DeleteView):
model = Medicos
success_url = '/medicos/'
| true
| true
|
1c3e3e3be4e2e71a4c6cf9a26979d0ca814dbfcb
| 69
|
py
|
Python
|
fluent_python/variable/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:07:56.000Z
|
2018-12-19T22:07:56.000Z
|
fluent_python/variable/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 12
|
2020-03-14T05:32:26.000Z
|
2022-03-12T00:08:49.000Z
|
fluent_python/variable/__init__.py
|
ftconan/python3
|
eb63ba33960072f792ecce6db809866b38c402f8
|
[
"MIT"
] | 1
|
2018-12-19T22:08:00.000Z
|
2018-12-19T22:08:00.000Z
|
"""
@author: magician
@file: __init__.py.py
@date: 2020/10/22
"""
| 13.8
| 23
| 0.608696
| true
| true
|
|
1c3e3e76cf5680110bc941958c2cb7a3e671d5f4
| 539
|
py
|
Python
|
tests/test.py
|
idmillington/layout
|
c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5
|
[
"MIT"
] | 6
|
2015-08-10T01:43:54.000Z
|
2020-10-06T19:09:10.000Z
|
tests/test.py
|
idmillington/layout
|
c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5
|
[
"MIT"
] | null | null | null |
tests/test.py
|
idmillington/layout
|
c452d1d7a74c9a74f7639c1b49e2a41c4e354bb5
|
[
"MIT"
] | null | null | null |
import os.path
import unittest
import layout
class TestVersion(unittest.TestCase):
def text_version_exists(self):
assert layout.__version__
def test_version_tuple(self):
assert layout.__version_info__
assert len(layout.__version_info__) == 3
for value in layout.__version_info__:
assert type(value) == int
def test_versions_match(self):
string = '.'.join([str(value) for value in layout.__version_info__])
assert string == layout.__version__
| 26.95
| 76
| 0.666048
|
import os.path
import unittest
import layout
class TestVersion(unittest.TestCase):
def text_version_exists(self):
assert layout.__version__
def test_version_tuple(self):
assert layout.__version_info__
assert len(layout.__version_info__) == 3
for value in layout.__version_info__:
assert type(value) == int
def test_versions_match(self):
string = '.'.join([str(value) for value in layout.__version_info__])
assert string == layout.__version__
| true
| true
|
1c3e3fa2eeb30250a2c5eee6f0177b7298022c3b
| 3,320
|
py
|
Python
|
03 - Pandas/b_series.py
|
2020-A-Python-GR1/py-sanango-simbana-edison-ubaldo
|
5ca5a6a8c8596cc76b0d09f3bb700f0c6c1780e8
|
[
"MIT"
] | null | null | null |
03 - Pandas/b_series.py
|
2020-A-Python-GR1/py-sanango-simbana-edison-ubaldo
|
5ca5a6a8c8596cc76b0d09f3bb700f0c6c1780e8
|
[
"MIT"
] | null | null | null |
03 - Pandas/b_series.py
|
2020-A-Python-GR1/py-sanango-simbana-edison-ubaldo
|
5ca5a6a8c8596cc76b0d09f3bb700f0c6c1780e8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 07:57:41 2020
@author: edison
"""
import numpy as np
import pandas as pd
lista_numeros = [1,2,3]
tupla_numeros = (1,2,3)
np_numeros = np.array([1,2,3])
series_a = pd.Series(lista_numeros)
series_b = pd.Series(tupla_numeros)
series_c = pd.Series(np_numeros)
series_d = pd.Series(
[True,
False,
12,
12.12,
"EDISON",
None,
(1),
[2],
{"nom":"Edison"}])
print(series_d[len(series_d)-1]) # Accede al último
ciudades = ['Quito', 'Cuenca', 'Ambato', 'Baños']
series_ciudad = pd.Series(ciudades, index=["Q", "C", "A", "B"])
print(series_ciudad[3])
valores_ciudad = {
"Ibarra": 100,
"Guayaquil": 200,
"Cuenca": 300,
"Quito": 400,
"Loja": 500
}
# index = ["Ibarra", "Guayaquil", "CUenca", "Quito", "Loja", "A"]
series_Valor_ciudad = pd.Series(valores_ciudad)
ciudades_menor_a_300 = series_Valor_ciudad < 300 # Esto retorna una serie con valores de verdadero y falso
ciudades_menor_a_300 = series_Valor_ciudad[series_Valor_ciudad < 300] # Se filtra solo los que cumplen con la condición
print(type(series_Valor_ciudad))
print(type(ciudades_menor_a_300))
print(ciudades_menor_a_300)
mas_10_porciento = series_Valor_ciudad * 1.1
series_Valor_ciudad["Quito"] = series_Valor_ciudad["Quito"] - 20
#print(series_Valor_ciudad)
for i in series_Valor_ciudad:
print(i) # imprime solo el valor
svc_cuadrado = np.square(series_Valor_ciudad)
ciudades_uno = pd.Series({
"Cuenca": 300,
"Zamora": 500,
"Quito": 100})
ciudades_dos = pd.Series({
"Guayaquil": 700,
"Loja": 1000,
"Baños": 100})
ciudades_uno["Loja"] = 0
print(ciudades_uno + ciudades_dos)
# o también
ciudades_add = ciudades_uno.add(ciudades_dos)
# verificar que no haya elemento repetido entre dos series.
ciud_concat = pd.concat([ciudades_uno, ciudades_dos],verify_integrity= False)
# append
ciud_append = ciudades_uno.append(ciudades_dos,verify_integrity= False)
print(ciudades_uno.max())
print(pd.Series.max(ciudades_uno))
print(np.max(ciudades_uno))
print(ciudades_uno.min())
print(pd.Series.min(ciudades_uno))
print(np.min(ciudades_uno))
# Funciones de estadística
print(ciudades_uno.mean())
print(ciudades_uno.median())
print(np.average(ciudades_uno))
# ordenar
ciudades_uno.sort_values(ascending = False)
ciudades_uno.sort_values(ascending = True)
# 0- 100 - 5%
# 101 - 300 - 10%
#301 - 500 - 15%
print(ciudades_uno)
def calcular(valor_serie):
if(valor_serie <= 100):
return valor_serie * 1.05
if(valor_serie > 100 and valor_serie <= 300):
return valor_serie * 1.10
if(valor_serie > 300 and valor_serie <= 500):
return valor_serie * 1.15
resultado = ciudades_uno.map(calcular)
print(resultado)
# WHERE
# Cuando NO CUMPLE la condición, se aplica
print(ciudades_uno)
print(ciudades_uno.where(ciudades_uno < 300, ciudades_uno * 1.15))
# PROBLEMAS CON TIPOS DE DATOS
series_numeros = pd.Series(['1.0', '2', -3])
print(pd.to_numeric(series_numeros))
print(pd.to_numeric(series_numeros, downcast= 'integer'))
series_numeros_err = pd.Series(['1.0', '2', -3, 'a'])
# errors = ignore, coerce, raise (default)
print(pd.to_numeric(series_numeros_err, errors='ignore'))
print(pd.to_numeric(series_numeros_err, errors='coerce'))
| 19.761905
| 119
| 0.702108
|
import numpy as np
import pandas as pd
lista_numeros = [1,2,3]
tupla_numeros = (1,2,3)
np_numeros = np.array([1,2,3])
series_a = pd.Series(lista_numeros)
series_b = pd.Series(tupla_numeros)
series_c = pd.Series(np_numeros)
series_d = pd.Series(
[True,
False,
12,
12.12,
"EDISON",
None,
(1),
[2],
{"nom":"Edison"}])
print(series_d[len(series_d)-1])
ciudades = ['Quito', 'Cuenca', 'Ambato', 'Baños']
series_ciudad = pd.Series(ciudades, index=["Q", "C", "A", "B"])
print(series_ciudad[3])
valores_ciudad = {
"Ibarra": 100,
"Guayaquil": 200,
"Cuenca": 300,
"Quito": 400,
"Loja": 500
}
series_Valor_ciudad = pd.Series(valores_ciudad)
ciudades_menor_a_300 = series_Valor_ciudad < 300
ciudades_menor_a_300 = series_Valor_ciudad[series_Valor_ciudad < 300]
print(type(series_Valor_ciudad))
print(type(ciudades_menor_a_300))
print(ciudades_menor_a_300)
mas_10_porciento = series_Valor_ciudad * 1.1
series_Valor_ciudad["Quito"] = series_Valor_ciudad["Quito"] - 20
for i in series_Valor_ciudad:
print(i)
svc_cuadrado = np.square(series_Valor_ciudad)
ciudades_uno = pd.Series({
"Cuenca": 300,
"Zamora": 500,
"Quito": 100})
ciudades_dos = pd.Series({
"Guayaquil": 700,
"Loja": 1000,
"Baños": 100})
ciudades_uno["Loja"] = 0
print(ciudades_uno + ciudades_dos)
ciudades_add = ciudades_uno.add(ciudades_dos)
ciud_concat = pd.concat([ciudades_uno, ciudades_dos],verify_integrity= False)
ciud_append = ciudades_uno.append(ciudades_dos,verify_integrity= False)
print(ciudades_uno.max())
print(pd.Series.max(ciudades_uno))
print(np.max(ciudades_uno))
print(ciudades_uno.min())
print(pd.Series.min(ciudades_uno))
print(np.min(ciudades_uno))
print(ciudades_uno.mean())
print(ciudades_uno.median())
print(np.average(ciudades_uno))
ciudades_uno.sort_values(ascending = False)
ciudades_uno.sort_values(ascending = True)
print(ciudades_uno)
def calcular(valor_serie):
if(valor_serie <= 100):
return valor_serie * 1.05
if(valor_serie > 100 and valor_serie <= 300):
return valor_serie * 1.10
if(valor_serie > 300 and valor_serie <= 500):
return valor_serie * 1.15
resultado = ciudades_uno.map(calcular)
print(resultado)
print(ciudades_uno)
print(ciudades_uno.where(ciudades_uno < 300, ciudades_uno * 1.15))
series_numeros = pd.Series(['1.0', '2', -3])
print(pd.to_numeric(series_numeros))
print(pd.to_numeric(series_numeros, downcast= 'integer'))
series_numeros_err = pd.Series(['1.0', '2', -3, 'a'])
print(pd.to_numeric(series_numeros_err, errors='ignore'))
print(pd.to_numeric(series_numeros_err, errors='coerce'))
| true
| true
|
1c3e417eccb4ae602abfb27f20df371f2ec0b0da
| 5,520
|
py
|
Python
|
contrib/seeds/makeseeds.py
|
ScaMar/ICHIBA
|
7524763de06cecedbc8d6c355a429c664bdf1008
|
[
"MIT"
] | 2
|
2019-03-09T10:03:47.000Z
|
2019-03-23T19:59:08.000Z
|
contrib/seeds/makeseeds.py
|
ScaMar/ICHIBA
|
7524763de06cecedbc8d6c355a429c664bdf1008
|
[
"MIT"
] | null | null | null |
contrib/seeds/makeseeds.py
|
ScaMar/ICHIBA
|
7524763de06cecedbc8d6c355a429c664bdf1008
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/IchibaCoinCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| 32.093023
| 186
| 0.567391
|
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/IchibaCoinCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| true
| true
|
1c3e427700e8283980f5d1e25076bee67001188e
| 3,242
|
py
|
Python
|
project/settings.py
|
martinfaucheux/django-archving
|
9b1cc056c2f6e92fa42e31079a5f87037deef4e0
|
[
"MIT"
] | 1
|
2022-01-19T19:03:53.000Z
|
2022-01-19T19:03:53.000Z
|
project/settings.py
|
martinfaucheux/django-archiving
|
9b1cc056c2f6e92fa42e31079a5f87037deef4e0
|
[
"MIT"
] | null | null | null |
project/settings.py
|
martinfaucheux/django-archiving
|
9b1cc056c2f6e92fa42e31079a5f87037deef4e0
|
[
"MIT"
] | null | null | null |
"""
Django settings for project project.
Generated by 'django-admin startproject' using Django 3.2.9.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure--*6_ke@q3!$(w!u1g!3fmh&&7iqm=(5p8j!w7rsp#%yb7olt$6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| 25.730159
| 91
| 0.700494
|
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure--*6_ke@q3!$(w!u1g!3fmh&&7iqm=(5p8j!w7rsp#%yb7olt$6'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| true
| true
|
1c3e4427f0acdcd97d1d8b28cfb60e335832903b
| 11,958
|
py
|
Python
|
MultiQubit_PulseGenerator/gates.py
|
philip-krantz/Drivers
|
31d05e852f4e30d40d41949f3f76e9322f0be9e8
|
[
"MIT"
] | 48
|
2015-11-16T13:35:11.000Z
|
2022-02-24T11:02:14.000Z
|
MultiQubit_PulseGenerator/gates.py
|
philip-krantz/Drivers
|
31d05e852f4e30d40d41949f3f76e9322f0be9e8
|
[
"MIT"
] | 30
|
2015-11-16T14:37:46.000Z
|
2021-02-22T19:39:34.000Z
|
MultiQubit_PulseGenerator/gates.py
|
philip-krantz/Drivers
|
31d05e852f4e30d40d41949f3f76e9322f0be9e8
|
[
"MIT"
] | 61
|
2015-11-12T18:31:58.000Z
|
2022-03-04T12:59:35.000Z
|
#!/usr/bin/env python3
from copy import copy
import numpy as np
import logging
from sequence import Step
log = logging.getLogger('LabberDriver')
# TODO remove Step dep from CompositeGate
class BaseGate:
"""Base class for a qubit gate.
"""
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
return pulse
def __repr__(self):
return self.__str__()
class OneQubitGate(BaseGate):
def number_of_qubits(self):
return 1
class TwoQubitGate(BaseGate):
def number_of_qubits(self):
return 2
class SingleQubitXYRotation(OneQubitGate):
"""Single qubit rotations around the XY axes.
Angles defined as in https://en.wikipedia.org/wiki/Bloch_sphere.
Parameters
----------
phi : float
Rotation axis.
theta : float
Roation angle.
"""
def __init__(self, phi, theta, name=None):
self.phi = phi
self.theta = theta
self.name = name
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
pulse.phase = self.phi
# pi pulse correspond to the full amplitude
pulse.amplitude *= self.theta / np.pi
return pulse
def __str__(self):
if self.name is None:
return "XYPhi={:+.6f}theta={:+.6f}".format(self.phi, self.theta)
else:
return self.name
def __eq__(self, other):
threshold = 1e-10
if not isinstance(other, SingleQubitXYRotation):
return False
if np.abs(self.phi - other.phi) > threshold:
return False
if np.abs(self.theta - other.theta) > threshold:
return False
return True
class SingleQubitZRotation(OneQubitGate):
"""Single qubit rotation around the Z axis.
Parameters
----------
theta : float
Roation angle.
"""
def __init__(self, theta, name=None):
self.theta = theta
self.name = name
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
# pi pulse correspond to the full amplitude
pulse.amplitude *= self.theta / np.pi
return pulse
def __str__(self):
if self.name is None:
return "Ztheta={:+.2f}".format(self.theta)
else:
return self.name
def __eq__(self, other):
threshold = 1e-10
if not isinstance(other, SingleQubitZRotation):
return False
if np.abs(self.theta - other.theta) > threshold:
return False
return True
class IdentityGate(OneQubitGate):
"""Identity gate.
Does nothing to the qubit. The width can be specififed to
implement a delay in the sequence. If no width is given, the identity gate
inherits the width of the given pulse.
Parameters
----------
width : float
Width of the I gate in seconds,
None uses the XY width (the default is None).
"""
def __init__(self, width=None):
self.width = width
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
pulse.amplitude = 0
pulse.use_drag = False # Avoids bug
if self.width is not None:
pulse.width = 0
pulse.plateau = self.width
return pulse
def __str__(self):
return "I"
class VirtualZGate(OneQubitGate):
"""Virtual Z Gate."""
def __init__(self, theta, name=None):
self.theta = theta
self.name = name
def __eq__(self, other):
threshold = 1e-10
if not isinstance(other, VirtualZGate):
return False
if np.abs(self.theta - other.theta) > threshold:
return False
return True
def __str__(self):
if self.name is None:
return "VZtheta={:+.2f}".format(self.theta)
else:
return self.name
class CPHASE(TwoQubitGate):
""" CPHASE gate. """
class iSWAP_no_1qb_phases(TwoQubitGate):
""" ISWAP gate. """
class ReadoutGate(OneQubitGate):
"""Readouts the qubit state."""
class CustomGate(BaseGate):
"""A gate using a given :obj:`Pulse`.
Parameters
----------
pulse : :obj:`Pulse`
The corresponding pulse.
"""
def __init__(self, pulse):
self.pulse = pulse
class RabiGate(SingleQubitXYRotation):
"""Creates the Rabi gate used in the spin-locking sequence.
Parameters
----------
amplitude : Amplitude of the pulse
plateau : The duration of the pulse.
phase : Phase of the Rabi gate. 0 corresponds to rotation around X axis.
frequency: Drive frequency
width: Pulse rise/fall time
use_drag: Turn on/off drag
drag_coefficient: DRAG scaling
drag_detuning: DRAG detuning
iq_skew: Phase delay between I/Q arms
iq_ratio: Imbalance between I/Q amplitudes
"""
def __init__(self, amplitude=None, plateau=None, phase=None,
frequency=None, width=None,
use_drag=None, drag_coefficient=None,
drag_detuning=None, iq_skew=None, iq_ratio=None):
self.amplitude = amplitude
self.plateau = plateau
self.phase = phase
self.frequency = frequency
self.width = width
self.use_drag = use_drag
self.drag_coefficient = drag_coefficient
self.drag_detuning = drag_detuning
self.iq_skew = iq_skew
self.iq_ratio = iq_ratio
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
if self.amplitude is not None:
pulse.amplitude = self.amplitude
if self.plateau is not None:
pulse.plateau = self.plateau
if self.phase is not None:
pulse.phase = self.phase
if self.frequency is not None:
pulse.frequency = self.frequency
if self.width is not None:
pulse.width = self.width
if self.use_drag is not None:
pulse.use_drag = self.use_drag
if self.drag_coefficient is not None:
pulse.drag_coefficient = self.drag_coefficient
if self.drag_detuning is not None:
pulse.drag_detuning = self.drag_detuning
if self.iq_skew is not None:
pulse.iq_skew = self.iq_skew
if self.iq_ratio is not None:
pulse.iq_ratio = self.iq_ratio
return pulse
class CompositeGate:
"""Multiple gates in one object.
Parameters
----------
n_qubit : int
Number of qubits involved in the composite gate.
Attributes
----------
sequence : list of :Step:
Holds the gates involved.
"""
def __init__(self, n_qubit, name=None):
self.n_qubit = n_qubit
self.sequence = []
self.name = name
def add_gate(self, gate, qubit=None):
"""Add a set of gates to the given qubit.
For the qubits with no specificied gate, an IdentityGate will be given.
The length of the step is given by the longest pulse.
Parameters
----------
qubit : int or list of int
The qubit(s) to add the gate(s) to.
gate : :obj:`BaseGate` or list of :obj:`BaseGate`
The gate(s) to add.
"""
if qubit is None:
if self.n_qubit == 1:
qubit = 0
else:
qubit = [n for n in range(self.n_qubit)]
step = Step()
if isinstance(gate, list):
if len(gate) == 1:
raise ValueError(
"For single gates, don't provide gate as a list.")
if not isinstance(qubit, list):
raise ValueError(
"""Please provide qubit indices as a list when adding more
than one gate.""")
if len(gate) != len(qubit):
raise ValueError(
"Length of gate list must equal length of qubit list.")
for q, g in zip(qubit, gate):
step.add_gate(q, g)
else:
if gate.number_of_qubits() > 1:
if not isinstance(qubit, list):
raise ValueError(
"""Please provide qubit list for gates with more than
one qubit.""")
else:
if not isinstance(qubit, int):
raise ValueError(
"For single gates, give qubit as int (not list).")
step.add_gate(qubit, gate)
self.sequence.append(step)
def number_of_qubits(self):
return self.n_qubit
def __len__(self):
return len(self.sequence)
def __str__(self):
if self.name is not None:
return self.name
else:
super().__str__()
def __repr__(self):
return self.__str__()
class iSWAP_with_1qb_phases(CompositeGate):
"""iSWAP gate followed by single qubit Z rotations.
Parameters
----------
phi1 : float
Z rotation angle for qubit 1.
phi2 : float
Z rotation angle for qubit 2.
"""
def __init__(self, phi1, phi2):
super().__init__(n_qubit=2)
self.add_gate(iSWAP_no_1qb_phases())
self.add_gate([VirtualZGate(phi1), VirtualZGate(phi2)])
def new_angles(self, phi1, phi2):
"""Update the angles of the single qubit rotations.
Parameters
----------
phi1 : float
Z rotation angle for qubit 1.
phi2 : float
Z rotation angle for qubit 2.
"""
self.__init__(phi1, phi2)
def __str__(self):
return "iSWAP"
class CPHASE_with_1qb_phases(CompositeGate):
"""CPHASE gate followed by single qubit Z rotations.
Parameters
----------
phi1 : float
Z rotation angle for qubit 1.
phi2 : float
Z rotation angle for qubit 2.
"""
def __init__(self, phi1, phi2):
super().__init__(n_qubit=2)
self.add_gate(CPHASE())
self.add_gate([VirtualZGate(phi1), VirtualZGate(phi2)])
def new_angles(self, phi1, phi2):
"""Update the angles of the single qubit rotations.
Parameters
----------
phi1 : float
Z rotation angle for qubit 1.
phi2 : float
Z rotation angle for qubit 2.
"""
self.__init__(phi1, phi2)
def __str__(self):
return "CZ"
I = IdentityGate(width=None)
I0 = IdentityGate(width=0)
Ilong = IdentityGate(width=75e-9)
# X gates
Xp = SingleQubitXYRotation(phi=0, theta=np.pi, name='Xp')
Xm = SingleQubitXYRotation(phi=0, theta=-np.pi, name='Xm')
X2p = SingleQubitXYRotation(phi=0, theta=np.pi / 2, name='X2p')
X2m = SingleQubitXYRotation(phi=0, theta=-np.pi / 2, name='X2m')
# Y gates
Yp = SingleQubitXYRotation(phi=np.pi / 2, theta=np.pi, name='Yp')
Ym = SingleQubitXYRotation(phi=np.pi / 2, theta=-np.pi, name='Ym')
Y2m = SingleQubitXYRotation(phi=np.pi / 2, theta=-np.pi / 2, name='Y2m')
Y2p = SingleQubitXYRotation(phi=np.pi / 2, theta=np.pi / 2, name='Y2p')
# Z gates
Zp = SingleQubitZRotation(np.pi, name='Zp')
Z2p = SingleQubitZRotation(np.pi / 2, name='Z2p')
Zm = SingleQubitZRotation(-np.pi, name='Zm')
Z2m = SingleQubitZRotation(-np.pi / 2, name='Z2m')
# Virtual Z gates
VZp = VirtualZGate(np.pi, name='VZp')
VZ2p = VirtualZGate(np.pi / 2, name='VZ2p')
VZm = VirtualZGate(-np.pi, name='VZm')
VZ2m = VirtualZGate(np.pi / 2, name='VZ2m')
# two-qubit gates
CPh = CPHASE()
iSWAP_without_Z = iSWAP_no_1qb_phases()
# Composite gates
CZEcho = CompositeGate(n_qubit=2)
CZEcho.add_gate([X2p, I])
CZEcho.add_gate(CPh)
CZEcho.add_gate([Xp, Xp])
CZEcho.add_gate(CPh)
CZEcho.add_gate([X2p, Xp])
H = CompositeGate(n_qubit=1, name='H')
H.add_gate(VZp)
H.add_gate(Y2p)
CZ = CPHASE_with_1qb_phases(
0, 0) # Start with 0, 0 as the single qubit phase shifts.
iSWAP = iSWAP_with_1qb_phases(0,0)
CNOT = CompositeGate(n_qubit=2, name='CNOT')
CNOT.add_gate(H, 1)
CNOT.add_gate(CZ, [0, 1])
CNOT.add_gate(H, 1)
if __name__ == '__main__':
pass
| 26.281319
| 79
| 0.594581
|
from copy import copy
import numpy as np
import logging
from sequence import Step
log = logging.getLogger('LabberDriver')
class BaseGate:
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
return pulse
def __repr__(self):
return self.__str__()
class OneQubitGate(BaseGate):
def number_of_qubits(self):
return 1
class TwoQubitGate(BaseGate):
def number_of_qubits(self):
return 2
class SingleQubitXYRotation(OneQubitGate):
def __init__(self, phi, theta, name=None):
self.phi = phi
self.theta = theta
self.name = name
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
pulse.phase = self.phi
pulse.amplitude *= self.theta / np.pi
return pulse
def __str__(self):
if self.name is None:
return "XYPhi={:+.6f}theta={:+.6f}".format(self.phi, self.theta)
else:
return self.name
def __eq__(self, other):
threshold = 1e-10
if not isinstance(other, SingleQubitXYRotation):
return False
if np.abs(self.phi - other.phi) > threshold:
return False
if np.abs(self.theta - other.theta) > threshold:
return False
return True
class SingleQubitZRotation(OneQubitGate):
def __init__(self, theta, name=None):
self.theta = theta
self.name = name
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
pulse.amplitude *= self.theta / np.pi
return pulse
def __str__(self):
if self.name is None:
return "Ztheta={:+.2f}".format(self.theta)
else:
return self.name
def __eq__(self, other):
threshold = 1e-10
if not isinstance(other, SingleQubitZRotation):
return False
if np.abs(self.theta - other.theta) > threshold:
return False
return True
class IdentityGate(OneQubitGate):
def __init__(self, width=None):
self.width = width
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
pulse.amplitude = 0
pulse.use_drag = False
if self.width is not None:
pulse.width = 0
pulse.plateau = self.width
return pulse
def __str__(self):
return "I"
class VirtualZGate(OneQubitGate):
def __init__(self, theta, name=None):
self.theta = theta
self.name = name
def __eq__(self, other):
threshold = 1e-10
if not isinstance(other, VirtualZGate):
return False
if np.abs(self.theta - other.theta) > threshold:
return False
return True
def __str__(self):
if self.name is None:
return "VZtheta={:+.2f}".format(self.theta)
else:
return self.name
class CPHASE(TwoQubitGate):
class iSWAP_no_1qb_phases(TwoQubitGate):
class ReadoutGate(OneQubitGate):
class CustomGate(BaseGate):
def __init__(self, pulse):
self.pulse = pulse
class RabiGate(SingleQubitXYRotation):
def __init__(self, amplitude=None, plateau=None, phase=None,
frequency=None, width=None,
use_drag=None, drag_coefficient=None,
drag_detuning=None, iq_skew=None, iq_ratio=None):
self.amplitude = amplitude
self.plateau = plateau
self.phase = phase
self.frequency = frequency
self.width = width
self.use_drag = use_drag
self.drag_coefficient = drag_coefficient
self.drag_detuning = drag_detuning
self.iq_skew = iq_skew
self.iq_ratio = iq_ratio
def get_adjusted_pulse(self, pulse):
pulse = copy(pulse)
if self.amplitude is not None:
pulse.amplitude = self.amplitude
if self.plateau is not None:
pulse.plateau = self.plateau
if self.phase is not None:
pulse.phase = self.phase
if self.frequency is not None:
pulse.frequency = self.frequency
if self.width is not None:
pulse.width = self.width
if self.use_drag is not None:
pulse.use_drag = self.use_drag
if self.drag_coefficient is not None:
pulse.drag_coefficient = self.drag_coefficient
if self.drag_detuning is not None:
pulse.drag_detuning = self.drag_detuning
if self.iq_skew is not None:
pulse.iq_skew = self.iq_skew
if self.iq_ratio is not None:
pulse.iq_ratio = self.iq_ratio
return pulse
class CompositeGate:
def __init__(self, n_qubit, name=None):
self.n_qubit = n_qubit
self.sequence = []
self.name = name
def add_gate(self, gate, qubit=None):
if qubit is None:
if self.n_qubit == 1:
qubit = 0
else:
qubit = [n for n in range(self.n_qubit)]
step = Step()
if isinstance(gate, list):
if len(gate) == 1:
raise ValueError(
"For single gates, don't provide gate as a list.")
if not isinstance(qubit, list):
raise ValueError(
"""Please provide qubit indices as a list when adding more
than one gate.""")
if len(gate) != len(qubit):
raise ValueError(
"Length of gate list must equal length of qubit list.")
for q, g in zip(qubit, gate):
step.add_gate(q, g)
else:
if gate.number_of_qubits() > 1:
if not isinstance(qubit, list):
raise ValueError(
"""Please provide qubit list for gates with more than
one qubit.""")
else:
if not isinstance(qubit, int):
raise ValueError(
"For single gates, give qubit as int (not list).")
step.add_gate(qubit, gate)
self.sequence.append(step)
def number_of_qubits(self):
return self.n_qubit
def __len__(self):
return len(self.sequence)
def __str__(self):
if self.name is not None:
return self.name
else:
super().__str__()
def __repr__(self):
return self.__str__()
class iSWAP_with_1qb_phases(CompositeGate):
def __init__(self, phi1, phi2):
super().__init__(n_qubit=2)
self.add_gate(iSWAP_no_1qb_phases())
self.add_gate([VirtualZGate(phi1), VirtualZGate(phi2)])
def new_angles(self, phi1, phi2):
self.__init__(phi1, phi2)
def __str__(self):
return "iSWAP"
class CPHASE_with_1qb_phases(CompositeGate):
def __init__(self, phi1, phi2):
super().__init__(n_qubit=2)
self.add_gate(CPHASE())
self.add_gate([VirtualZGate(phi1), VirtualZGate(phi2)])
def new_angles(self, phi1, phi2):
self.__init__(phi1, phi2)
def __str__(self):
return "CZ"
I = IdentityGate(width=None)
I0 = IdentityGate(width=0)
Ilong = IdentityGate(width=75e-9)
# X gates
Xp = SingleQubitXYRotation(phi=0, theta=np.pi, name='Xp')
Xm = SingleQubitXYRotation(phi=0, theta=-np.pi, name='Xm')
X2p = SingleQubitXYRotation(phi=0, theta=np.pi / 2, name='X2p')
X2m = SingleQubitXYRotation(phi=0, theta=-np.pi / 2, name='X2m')
# Y gates
Yp = SingleQubitXYRotation(phi=np.pi / 2, theta=np.pi, name='Yp')
Ym = SingleQubitXYRotation(phi=np.pi / 2, theta=-np.pi, name='Ym')
Y2m = SingleQubitXYRotation(phi=np.pi / 2, theta=-np.pi / 2, name='Y2m')
Y2p = SingleQubitXYRotation(phi=np.pi / 2, theta=np.pi / 2, name='Y2p')
# Z gates
Zp = SingleQubitZRotation(np.pi, name='Zp')
Z2p = SingleQubitZRotation(np.pi / 2, name='Z2p')
Zm = SingleQubitZRotation(-np.pi, name='Zm')
Z2m = SingleQubitZRotation(-np.pi / 2, name='Z2m')
# Virtual Z gates
VZp = VirtualZGate(np.pi, name='VZp')
VZ2p = VirtualZGate(np.pi / 2, name='VZ2p')
VZm = VirtualZGate(-np.pi, name='VZm')
VZ2m = VirtualZGate(np.pi / 2, name='VZ2m')
# two-qubit gates
CPh = CPHASE()
iSWAP_without_Z = iSWAP_no_1qb_phases()
# Composite gates
CZEcho = CompositeGate(n_qubit=2)
CZEcho.add_gate([X2p, I])
CZEcho.add_gate(CPh)
CZEcho.add_gate([Xp, Xp])
CZEcho.add_gate(CPh)
CZEcho.add_gate([X2p, Xp])
H = CompositeGate(n_qubit=1, name='H')
H.add_gate(VZp)
H.add_gate(Y2p)
CZ = CPHASE_with_1qb_phases(
0, 0) # Start with 0, 0 as the single qubit phase shifts.
iSWAP = iSWAP_with_1qb_phases(0,0)
CNOT = CompositeGate(n_qubit=2, name='CNOT')
CNOT.add_gate(H, 1)
CNOT.add_gate(CZ, [0, 1])
CNOT.add_gate(H, 1)
if __name__ == '__main__':
pass
| true
| true
|
1c3e4597b373a87c78c63dc3fc76ffc8ff061d3a
| 349
|
py
|
Python
|
projgrad/tests/basic.py
|
andim/projgrad
|
3854c704b6c413f8d79aa324ef4758676cdb8c68
|
[
"MIT"
] | 10
|
2019-01-05T13:51:01.000Z
|
2022-03-18T01:32:14.000Z
|
projgrad/tests/basic.py
|
andim/projgrad
|
3854c704b6c413f8d79aa324ef4758676cdb8c68
|
[
"MIT"
] | null | null | null |
projgrad/tests/basic.py
|
andim/projgrad
|
3854c704b6c413f8d79aa324ef4758676cdb8c68
|
[
"MIT"
] | 6
|
2017-11-16T01:00:09.000Z
|
2022-01-17T14:08:26.000Z
|
import numpy as np
import numpy.testing as npt
import projgrad
def test_basic():
def objective(x):
f = np.sum(x**2)
grad = 2 * x
return f, grad
res = projgrad.minimize(objective, [0.1, 0.7, 0.2], reltol=1e-8)
npt.assert_allclose(res.x, np.ones(3)/3.0)
if __name__ == '__main__':
npt.run_module_suite()
| 21.8125
| 68
| 0.613181
|
import numpy as np
import numpy.testing as npt
import projgrad
def test_basic():
def objective(x):
f = np.sum(x**2)
grad = 2 * x
return f, grad
res = projgrad.minimize(objective, [0.1, 0.7, 0.2], reltol=1e-8)
npt.assert_allclose(res.x, np.ones(3)/3.0)
if __name__ == '__main__':
npt.run_module_suite()
| true
| true
|
1c3e4881dd472c3c3641cb8a815e4baf723a9eb9
| 4,758
|
py
|
Python
|
unionability_search/calculate_unionability.py
|
guenthermi/table-embeddings
|
3ce094483fc5057b18f898d450a7c376d49818fa
|
[
"MIT"
] | 6
|
2021-03-17T09:53:10.000Z
|
2022-03-28T18:26:22.000Z
|
unionability_search/calculate_unionability.py
|
guenthermi/table-embeddings
|
3ce094483fc5057b18f898d450a7c376d49818fa
|
[
"MIT"
] | null | null | null |
unionability_search/calculate_unionability.py
|
guenthermi/table-embeddings
|
3ce094483fc5057b18f898d450a7c376d49818fa
|
[
"MIT"
] | null | null | null |
import json
import random
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
from web_table_embedding_model import WebTableEmbeddingModel
from fasttext_embedding_model import FasttextEmbeddingModel
from dataset_loader import DatasetLoader
def create_arg_parser():
parser = ArgumentParser("calculate_unionablity",
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve',
description='''Evaluates embedding model on unionablity task.''')
parser.add_argument('-e', '--embedding-model',
help="path to embedding model", required=True, nargs=1)
parser.add_argument('-et', '--embedding-type',
help="embedding type: 'web-table', 'fasttext', or 'word2vec'", required=True, nargs=1)
parser.add_argument('-o', '--output',
help="path for output txt file", required=True, nargs=1)
parser.add_argument('-b', '--benchmark',
help="path to unionablity benchmark folder", required=True, nargs=1)
parser.add_argument('-s', '--sample-size',
help="number of evaluation samples", required=True, nargs=1)
parser.add_argument('-h', '--model-headers',
help="calculate vectors for header terms", nargs='?', const=True, default=False)
parser.add_argument('-n', '--negative-sample-factor',
help="factor that determine number of negative samples in comparison to positive samples", nargs=1, default=[2])
return parser
def load_embedding_model(model_type, model_path):
model = None
if model_type == 'web-table':
model = WebTableEmbeddingModel(model_path)
elif model_type == 'fasttext':
model = FasttextEmbeddingModel(model_path)
return model
def create_samples(dataset, sample_size=100, n_sample_rate=2):
alignments, alignments_reverse = dataset.get_alignments()
query_columns = list(alignments.keys())
all_columns = list(alignments_reverse.keys())
p_samples = list()
n_samples = list()
while len(p_samples) < sample_size:
query_table_name, query_col_name = random.choice(query_columns)
text_values_q = dataset.get_column(query_table_name, query_col_name)
text_values_p = None
text_values_n = None
candidate_list = list([x for x in alignments[(
query_table_name, query_col_name)] if x[0] != query_table_name])
if len(candidate_list) == 0:
continue
pos_candidate = random.choice(candidate_list)
try:
text_values_p = dataset.get_column(
pos_candidate[0], pos_candidate[1])
except:
continue
p_samples.append(
(query_col_name, pos_candidate[1], text_values_q, text_values_p))
for i in range(n_sample_rate):
text_values_n = None
while text_values_n is None:
neg_candidate = random.choice(all_columns)
if neg_candidate in alignments[(query_table_name, query_col_name)]:
continue
try:
text_values_n = dataset.get_column(
neg_candidate[0], neg_candidate[1])
except:
continue
n_samples.append(
(query_col_name, neg_candidate[1], text_values_q, text_values_n))
return p_samples, n_samples
def evaluate(model, p_samples, n_samples, model_headers=False):
results = dict()
for sample_set, label in [(p_samples, 'p_samples'), (n_samples, 'n_samples')]:
results[label] = list()
for (col_name_q, col_name_c, text_values_q, text_values_c) in sample_set:
score = model.get_approximated_unionability_score(
text_values_q, text_values_c, col_name_q, col_name_c, model_headers=model_headers)
results[label].append((col_name_q, col_name_c, score))
return results
def output_results(results, output_path):
output_file = open(output_path, 'w')
json.dump(results, output_file)
return
def main():
arg_parser = create_arg_parser()
args = arg_parser.parse_args()
dataset = DatasetLoader(args.benchmark[0])
model = load_embedding_model(
args.embedding_type[0], args.embedding_model[0])
p_samples, n_samples = create_samples(
dataset, sample_size=int(args.sample_size[0]), n_sample_rate=int(args.negative_sample_factor[0]))
results = evaluate(model, p_samples, n_samples,
model_headers=args.model_headers)
output_results(results, args.output[0])
return
if __name__ == "__main__":
main()
| 40.322034
| 136
| 0.648382
|
import json
import random
from argparse import ArgumentParser, FileType, ArgumentDefaultsHelpFormatter
from web_table_embedding_model import WebTableEmbeddingModel
from fasttext_embedding_model import FasttextEmbeddingModel
from dataset_loader import DatasetLoader
def create_arg_parser():
parser = ArgumentParser("calculate_unionablity",
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve',
description='''Evaluates embedding model on unionablity task.''')
parser.add_argument('-e', '--embedding-model',
help="path to embedding model", required=True, nargs=1)
parser.add_argument('-et', '--embedding-type',
help="embedding type: 'web-table', 'fasttext', or 'word2vec'", required=True, nargs=1)
parser.add_argument('-o', '--output',
help="path for output txt file", required=True, nargs=1)
parser.add_argument('-b', '--benchmark',
help="path to unionablity benchmark folder", required=True, nargs=1)
parser.add_argument('-s', '--sample-size',
help="number of evaluation samples", required=True, nargs=1)
parser.add_argument('-h', '--model-headers',
help="calculate vectors for header terms", nargs='?', const=True, default=False)
parser.add_argument('-n', '--negative-sample-factor',
help="factor that determine number of negative samples in comparison to positive samples", nargs=1, default=[2])
return parser
def load_embedding_model(model_type, model_path):
model = None
if model_type == 'web-table':
model = WebTableEmbeddingModel(model_path)
elif model_type == 'fasttext':
model = FasttextEmbeddingModel(model_path)
return model
def create_samples(dataset, sample_size=100, n_sample_rate=2):
alignments, alignments_reverse = dataset.get_alignments()
query_columns = list(alignments.keys())
all_columns = list(alignments_reverse.keys())
p_samples = list()
n_samples = list()
while len(p_samples) < sample_size:
query_table_name, query_col_name = random.choice(query_columns)
text_values_q = dataset.get_column(query_table_name, query_col_name)
text_values_p = None
text_values_n = None
candidate_list = list([x for x in alignments[(
query_table_name, query_col_name)] if x[0] != query_table_name])
if len(candidate_list) == 0:
continue
pos_candidate = random.choice(candidate_list)
try:
text_values_p = dataset.get_column(
pos_candidate[0], pos_candidate[1])
except:
continue
p_samples.append(
(query_col_name, pos_candidate[1], text_values_q, text_values_p))
for i in range(n_sample_rate):
text_values_n = None
while text_values_n is None:
neg_candidate = random.choice(all_columns)
if neg_candidate in alignments[(query_table_name, query_col_name)]:
continue
try:
text_values_n = dataset.get_column(
neg_candidate[0], neg_candidate[1])
except:
continue
n_samples.append(
(query_col_name, neg_candidate[1], text_values_q, text_values_n))
return p_samples, n_samples
def evaluate(model, p_samples, n_samples, model_headers=False):
results = dict()
for sample_set, label in [(p_samples, 'p_samples'), (n_samples, 'n_samples')]:
results[label] = list()
for (col_name_q, col_name_c, text_values_q, text_values_c) in sample_set:
score = model.get_approximated_unionability_score(
text_values_q, text_values_c, col_name_q, col_name_c, model_headers=model_headers)
results[label].append((col_name_q, col_name_c, score))
return results
def output_results(results, output_path):
output_file = open(output_path, 'w')
json.dump(results, output_file)
return
def main():
arg_parser = create_arg_parser()
args = arg_parser.parse_args()
dataset = DatasetLoader(args.benchmark[0])
model = load_embedding_model(
args.embedding_type[0], args.embedding_model[0])
p_samples, n_samples = create_samples(
dataset, sample_size=int(args.sample_size[0]), n_sample_rate=int(args.negative_sample_factor[0]))
results = evaluate(model, p_samples, n_samples,
model_headers=args.model_headers)
output_results(results, args.output[0])
return
if __name__ == "__main__":
main()
| true
| true
|
1c3e49531beeb3eeeae61b576d87ce5954cb8183
| 13,826
|
py
|
Python
|
pysnmp/CISCO-PORT-STORM-CONTROL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/CISCO-PORT-STORM-CONTROL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/CISCO-PORT-STORM-CONTROL-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module CISCO-PORT-STORM-CONTROL-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CISCO-PORT-STORM-CONTROL-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:53:01 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ModuleIdentity, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, Bits, TimeTicks, IpAddress, NotificationType, Unsigned32, iso, Gauge32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "Bits", "TimeTicks", "IpAddress", "NotificationType", "Unsigned32", "iso", "Gauge32", "Counter32")
TruthValue, DisplayString, TimeStamp, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TimeStamp", "TextualConvention")
ciscoPortStormControlMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 362))
ciscoPortStormControlMIB.setRevisions(('2007-10-19 00:00', '2003-07-03 00:00',))
if mibBuilder.loadTexts: ciscoPortStormControlMIB.setLastUpdated('200710190000Z')
if mibBuilder.loadTexts: ciscoPortStormControlMIB.setOrganization('Cisco Systems, Inc.')
ciscoPortStormControlMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 0))
ciscoPortStormControlMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1))
ciscoPortStormControlMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2))
cpscConfigObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1))
cpscStatusObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2))
class CPortStormControlTrafficType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("broadcast", 1), ("multicast", 2), ("unicast", 3), ("all", 4))
class CPortStormControlActionType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("filter", 1), ("shutdown", 2))
class CPortStormControlStatusType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("inactive", 1), ("forwarding", 2), ("trafficTypeFiltered", 3), ("allTrafficFiltered", 4), ("shutdown", 5))
cpscThresholdTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1), )
if mibBuilder.loadTexts: cpscThresholdTable.setStatus('current')
cpscThresholdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscTrafficType"))
if mibBuilder.loadTexts: cpscThresholdEntry.setStatus('current')
cpscTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 1), CPortStormControlTrafficType())
if mibBuilder.loadTexts: cpscTrafficType.setStatus('current')
cpscUpperThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscUpperThreshold.setStatus('current')
cpscLowerThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscLowerThreshold.setStatus('current')
cpscActionTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2), )
if mibBuilder.loadTexts: cpscActionTable.setStatus('current')
cpscActionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cpscActionEntry.setStatus('current')
cpscAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1, 1), CPortStormControlActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscAction.setStatus('current')
cpscNotificationControl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("stormOccurred", 2), ("stormCleared", 3), ("both", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscNotificationControl.setStatus('current')
cpscNotificationThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setUnits('Notifications per Minute').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscNotificationThreshold.setStatus('current')
cpscStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1), )
if mibBuilder.loadTexts: cpscStatusTable.setStatus('current')
cpscStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscTrafficType"))
if mibBuilder.loadTexts: cpscStatusEntry.setStatus('current')
cpscStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 1), CPortStormControlStatusType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscStatus.setStatus('current')
cpscCurrentLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscCurrentLevel.setStatus('current')
cpscSuppressedPacket = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscSuppressedPacket.setStatus('current')
cpscHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2), )
if mibBuilder.loadTexts: cpscHistoryTable.setStatus('current')
cpscHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryTrafficType"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryIndex"))
if mibBuilder.loadTexts: cpscHistoryEntry.setStatus('current')
cpscHistoryTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 1), CPortStormControlTrafficType())
if mibBuilder.loadTexts: cpscHistoryTrafficType.setStatus('current')
cpscHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: cpscHistoryIndex.setStatus('current')
cpscHistoryStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscHistoryStartTime.setStatus('current')
cpscHistoryEndTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscHistoryEndTime.setStatus('current')
cpscNotificationsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 1))
cpscEventRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"))
if mibBuilder.loadTexts: cpscEventRev1.setStatus('current')
cpscEvent = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 1, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"))
if mibBuilder.loadTexts: cpscEvent.setStatus('deprecated')
ciscoPortStormControlMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1))
ciscoPortStormControlMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2))
ciscoPortStormControlMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotifConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatusGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatisticsGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPortStormControlMIBCompliance = ciscoPortStormControlMIBCompliance.setStatus('deprecated')
ciscoPortStormControlMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotifConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationGroupRev1"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatusGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatisticsGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPortStormControlMIBComplianceRev1 = ciscoPortStormControlMIBComplianceRev1.setStatus('current')
cpscConfigurationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscUpperThreshold"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscLowerThreshold"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscAction"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscConfigurationGroup = cpscConfigurationGroup.setStatus('current')
cpscStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscCurrentLevel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscStatusGroup = cpscStatusGroup.setStatus('current')
cpscNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 3)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscEvent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotificationGroup = cpscNotificationGroup.setStatus('deprecated')
cpscNotifConfigurationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 4)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationControl"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationThreshold"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotifConfigurationGroup = cpscNotifConfigurationGroup.setStatus('current')
cpscStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 5)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscSuppressedPacket"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscStatisticsGroup = cpscStatisticsGroup.setStatus('current')
cpscHistoryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 6)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryStartTime"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryEndTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscHistoryGroup = cpscHistoryGroup.setStatus('current')
cpscNotificationGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 7)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscEventRev1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotificationGroupRev1 = cpscNotificationGroupRev1.setStatus('current')
mibBuilder.exportSymbols("CISCO-PORT-STORM-CONTROL-MIB", CPortStormControlActionType=CPortStormControlActionType, cpscHistoryEntry=cpscHistoryEntry, cpscHistoryStartTime=cpscHistoryStartTime, PYSNMP_MODULE_ID=ciscoPortStormControlMIB, cpscEventRev1=cpscEventRev1, ciscoPortStormControlMIBConform=ciscoPortStormControlMIBConform, cpscLowerThreshold=cpscLowerThreshold, CPortStormControlTrafficType=CPortStormControlTrafficType, cpscAction=cpscAction, cpscHistoryTrafficType=cpscHistoryTrafficType, ciscoPortStormControlMIBObjects=ciscoPortStormControlMIBObjects, cpscStatusEntry=cpscStatusEntry, cpscStatusGroup=cpscStatusGroup, cpscStatusTable=cpscStatusTable, cpscActionEntry=cpscActionEntry, cpscSuppressedPacket=cpscSuppressedPacket, ciscoPortStormControlMIBCompliances=ciscoPortStormControlMIBCompliances, ciscoPortStormControlMIBComplianceRev1=ciscoPortStormControlMIBComplianceRev1, cpscThresholdTable=cpscThresholdTable, cpscNotificationControl=cpscNotificationControl, cpscNotificationThreshold=cpscNotificationThreshold, ciscoPortStormControlMIBGroups=ciscoPortStormControlMIBGroups, cpscConfigurationGroup=cpscConfigurationGroup, cpscHistoryEndTime=cpscHistoryEndTime, cpscTrafficType=cpscTrafficType, cpscHistoryIndex=cpscHistoryIndex, CPortStormControlStatusType=CPortStormControlStatusType, cpscCurrentLevel=cpscCurrentLevel, cpscEvent=cpscEvent, cpscThresholdEntry=cpscThresholdEntry, cpscHistoryTable=cpscHistoryTable, ciscoPortStormControlMIBCompliance=ciscoPortStormControlMIBCompliance, ciscoPortStormControlMIB=ciscoPortStormControlMIB, cpscUpperThreshold=cpscUpperThreshold, cpscNotificationGroup=cpscNotificationGroup, cpscHistoryGroup=cpscHistoryGroup, cpscStatusObjects=cpscStatusObjects, cpscStatisticsGroup=cpscStatisticsGroup, cpscActionTable=cpscActionTable, cpscStatus=cpscStatus, cpscConfigObjects=cpscConfigObjects, cpscNotificationsPrefix=cpscNotificationsPrefix, cpscNotificationGroupRev1=cpscNotificationGroupRev1, ciscoPortStormControlMIBNotifs=ciscoPortStormControlMIBNotifs, cpscNotifConfigurationGroup=cpscNotifConfigurationGroup)
| 116.184874
| 2,067
| 0.756907
|
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ValueRangeConstraint, ValueSizeConstraint, ConstraintsIntersection, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsIntersection", "SingleValueConstraint")
ciscoMgmt, = mibBuilder.importSymbols("CISCO-SMI", "ciscoMgmt")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ObjectGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ObjectGroup", "ModuleCompliance")
ModuleIdentity, ObjectIdentity, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, Integer32, Bits, TimeTicks, IpAddress, NotificationType, Unsigned32, iso, Gauge32, Counter32 = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "ObjectIdentity", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "Integer32", "Bits", "TimeTicks", "IpAddress", "NotificationType", "Unsigned32", "iso", "Gauge32", "Counter32")
TruthValue, DisplayString, TimeStamp, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "DisplayString", "TimeStamp", "TextualConvention")
ciscoPortStormControlMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 9, 9, 362))
ciscoPortStormControlMIB.setRevisions(('2007-10-19 00:00', '2003-07-03 00:00',))
if mibBuilder.loadTexts: ciscoPortStormControlMIB.setLastUpdated('200710190000Z')
if mibBuilder.loadTexts: ciscoPortStormControlMIB.setOrganization('Cisco Systems, Inc.')
ciscoPortStormControlMIBNotifs = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 0))
ciscoPortStormControlMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1))
ciscoPortStormControlMIBConform = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2))
cpscConfigObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1))
cpscStatusObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2))
class CPortStormControlTrafficType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("broadcast", 1), ("multicast", 2), ("unicast", 3), ("all", 4))
class CPortStormControlActionType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("filter", 1), ("shutdown", 2))
class CPortStormControlStatusType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5))
namedValues = NamedValues(("inactive", 1), ("forwarding", 2), ("trafficTypeFiltered", 3), ("allTrafficFiltered", 4), ("shutdown", 5))
cpscThresholdTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1), )
if mibBuilder.loadTexts: cpscThresholdTable.setStatus('current')
cpscThresholdEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscTrafficType"))
if mibBuilder.loadTexts: cpscThresholdEntry.setStatus('current')
cpscTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 1), CPortStormControlTrafficType())
if mibBuilder.loadTexts: cpscTrafficType.setStatus('current')
cpscUpperThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscUpperThreshold.setStatus('current')
cpscLowerThreshold = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscLowerThreshold.setStatus('current')
cpscActionTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2), )
if mibBuilder.loadTexts: cpscActionTable.setStatus('current')
cpscActionEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: cpscActionEntry.setStatus('current')
cpscAction = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1, 1), CPortStormControlActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscAction.setStatus('current')
cpscNotificationControl = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 2, 1, 2), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("none", 1), ("stormOccurred", 2), ("stormCleared", 3), ("both", 4)))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscNotificationControl.setStatus('current')
cpscNotificationThreshold = MibScalar((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 1000))).setUnits('Notifications per Minute').setMaxAccess("readwrite")
if mibBuilder.loadTexts: cpscNotificationThreshold.setStatus('current')
cpscStatusTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1), )
if mibBuilder.loadTexts: cpscStatusTable.setStatus('current')
cpscStatusEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscTrafficType"))
if mibBuilder.loadTexts: cpscStatusEntry.setStatus('current')
cpscStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 1), CPortStormControlStatusType()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscStatus.setStatus('current')
cpscCurrentLevel = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setUnits('0.01 Percentage').setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscCurrentLevel.setStatus('current')
cpscSuppressedPacket = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 1, 1, 3), Counter64()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscSuppressedPacket.setStatus('current')
cpscHistoryTable = MibTable((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2), )
if mibBuilder.loadTexts: cpscHistoryTable.setStatus('current')
cpscHistoryEntry = MibTableRow((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryTrafficType"), (0, "CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryIndex"))
if mibBuilder.loadTexts: cpscHistoryEntry.setStatus('current')
cpscHistoryTrafficType = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 1), CPortStormControlTrafficType())
if mibBuilder.loadTexts: cpscHistoryTrafficType.setStatus('current')
cpscHistoryIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 1024)))
if mibBuilder.loadTexts: cpscHistoryIndex.setStatus('current')
cpscHistoryStartTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 3), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscHistoryStartTime.setStatus('current')
cpscHistoryEndTime = MibTableColumn((1, 3, 6, 1, 4, 1, 9, 9, 362, 1, 2, 2, 1, 4), TimeStamp()).setMaxAccess("readonly")
if mibBuilder.loadTexts: cpscHistoryEndTime.setStatus('current')
cpscNotificationsPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 1))
cpscEventRev1 = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"))
if mibBuilder.loadTexts: cpscEventRev1.setStatus('current')
cpscEvent = NotificationType((1, 3, 6, 1, 4, 1, 9, 9, 362, 0, 1, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"))
if mibBuilder.loadTexts: cpscEvent.setStatus('deprecated')
ciscoPortStormControlMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1))
ciscoPortStormControlMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2))
ciscoPortStormControlMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotifConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatusGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatisticsGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPortStormControlMIBCompliance = ciscoPortStormControlMIBCompliance.setStatus('deprecated')
ciscoPortStormControlMIBComplianceRev1 = ModuleCompliance((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 1, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotifConfigurationGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationGroupRev1"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatusGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatisticsGroup"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
ciscoPortStormControlMIBComplianceRev1 = ciscoPortStormControlMIBComplianceRev1.setStatus('current')
cpscConfigurationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 1)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscUpperThreshold"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscLowerThreshold"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscAction"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscConfigurationGroup = cpscConfigurationGroup.setStatus('current')
cpscStatusGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 2)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscStatus"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscCurrentLevel"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscStatusGroup = cpscStatusGroup.setStatus('current')
cpscNotificationGroup = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 3)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscEvent"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotificationGroup = cpscNotificationGroup.setStatus('deprecated')
cpscNotifConfigurationGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 4)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationControl"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscNotificationThreshold"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotifConfigurationGroup = cpscNotifConfigurationGroup.setStatus('current')
cpscStatisticsGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 5)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscSuppressedPacket"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscStatisticsGroup = cpscStatisticsGroup.setStatus('current')
cpscHistoryGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 6)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryStartTime"), ("CISCO-PORT-STORM-CONTROL-MIB", "cpscHistoryEndTime"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscHistoryGroup = cpscHistoryGroup.setStatus('current')
cpscNotificationGroupRev1 = NotificationGroup((1, 3, 6, 1, 4, 1, 9, 9, 362, 2, 2, 7)).setObjects(("CISCO-PORT-STORM-CONTROL-MIB", "cpscEventRev1"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
cpscNotificationGroupRev1 = cpscNotificationGroupRev1.setStatus('current')
mibBuilder.exportSymbols("CISCO-PORT-STORM-CONTROL-MIB", CPortStormControlActionType=CPortStormControlActionType, cpscHistoryEntry=cpscHistoryEntry, cpscHistoryStartTime=cpscHistoryStartTime, PYSNMP_MODULE_ID=ciscoPortStormControlMIB, cpscEventRev1=cpscEventRev1, ciscoPortStormControlMIBConform=ciscoPortStormControlMIBConform, cpscLowerThreshold=cpscLowerThreshold, CPortStormControlTrafficType=CPortStormControlTrafficType, cpscAction=cpscAction, cpscHistoryTrafficType=cpscHistoryTrafficType, ciscoPortStormControlMIBObjects=ciscoPortStormControlMIBObjects, cpscStatusEntry=cpscStatusEntry, cpscStatusGroup=cpscStatusGroup, cpscStatusTable=cpscStatusTable, cpscActionEntry=cpscActionEntry, cpscSuppressedPacket=cpscSuppressedPacket, ciscoPortStormControlMIBCompliances=ciscoPortStormControlMIBCompliances, ciscoPortStormControlMIBComplianceRev1=ciscoPortStormControlMIBComplianceRev1, cpscThresholdTable=cpscThresholdTable, cpscNotificationControl=cpscNotificationControl, cpscNotificationThreshold=cpscNotificationThreshold, ciscoPortStormControlMIBGroups=ciscoPortStormControlMIBGroups, cpscConfigurationGroup=cpscConfigurationGroup, cpscHistoryEndTime=cpscHistoryEndTime, cpscTrafficType=cpscTrafficType, cpscHistoryIndex=cpscHistoryIndex, CPortStormControlStatusType=CPortStormControlStatusType, cpscCurrentLevel=cpscCurrentLevel, cpscEvent=cpscEvent, cpscThresholdEntry=cpscThresholdEntry, cpscHistoryTable=cpscHistoryTable, ciscoPortStormControlMIBCompliance=ciscoPortStormControlMIBCompliance, ciscoPortStormControlMIB=ciscoPortStormControlMIB, cpscUpperThreshold=cpscUpperThreshold, cpscNotificationGroup=cpscNotificationGroup, cpscHistoryGroup=cpscHistoryGroup, cpscStatusObjects=cpscStatusObjects, cpscStatisticsGroup=cpscStatisticsGroup, cpscActionTable=cpscActionTable, cpscStatus=cpscStatus, cpscConfigObjects=cpscConfigObjects, cpscNotificationsPrefix=cpscNotificationsPrefix, cpscNotificationGroupRev1=cpscNotificationGroupRev1, ciscoPortStormControlMIBNotifs=ciscoPortStormControlMIBNotifs, cpscNotifConfigurationGroup=cpscNotifConfigurationGroup)
| true
| true
|
1c3e4a2aa5c64da844f61435080a6ca743e744d5
| 5,823
|
py
|
Python
|
project/user/views.py
|
ownpush/otp_demo_server
|
a3ec5515cf17c2c7a9411fc05f77de2a46ba7d99
|
[
"MIT"
] | 5
|
2016-03-01T02:04:47.000Z
|
2017-12-28T22:28:53.000Z
|
project/user/views.py
|
ownpush/otp_demo_server
|
a3ec5515cf17c2c7a9411fc05f77de2a46ba7d99
|
[
"MIT"
] | null | null | null |
project/user/views.py
|
ownpush/otp_demo_server
|
a3ec5515cf17c2c7a9411fc05f77de2a46ba7d99
|
[
"MIT"
] | 3
|
2016-03-01T02:04:49.000Z
|
2019-02-08T09:55:18.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# project/user/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, url_for, \
redirect, flash, request
from flask.ext.login import login_user, logout_user, login_required, current_user
from project import bcrypt, db
from project.models import User, PushDevice
from project.user.forms import *
from project.push.tasks import sendpush
import binascii
import os
import json
################
#### config ####
################
user_blueprint = Blueprint('user', __name__,)
################
#### routes ####
################
'''
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if form.validate_on_submit():
user = User(
email=form.email.data,
password=form.password.data
)
db.session.add(user)
db.session.commit()
login_user(user)
flash('Thank you for registering.', 'success')
return redirect(url_for("user.members"))
return render_template('user/register.html', form=form)
'''
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
flash("User not found", "danger")
return render_template('user/login.html', form=form)
devices = PushDevice.query.filter_by(user_id=user.id).all()
if len(devices) > 0:
otp = binascii.b2a_hex(os.urandom(4)).decode()
user.otp = bcrypt.generate_password_hash(otp)
print(otp)
device = devices[0]
push_status_txt = sendpush(device.push_id, otp)
push_json = json.loads(push_status_txt)
if "status" in push_json:
if push_json['status'] == "OK":
flash("One Time Password Sent To Device", "success")
else :
flash("Could Not Communicate With Device", "danger")
db.session.commit()
return redirect(url_for('user.two_factor_login'))
if user and bcrypt.check_password_hash(
user.password, request.form['password']):
login_user(user)
flash('You are logged in. Welcome!', 'success')
return redirect(url_for('user.members'))
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/login.html', form=form)
return render_template('user/login.html', title='Please Login', form=form)
@user_blueprint.route('/2FA', methods=['GET', 'POST'])
def two_factor_login():
form = TwoFactorLoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
if bcrypt.check_password_hash(user.otp, form.otp.data):
login_user(user)
flash('You are logged in. Welcome!', 'success')
user.otp = None
db.session.commit()
return redirect(url_for('user.members'))
else:
flash('Invalid one time password.', 'danger')
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/two_factor_login.html', form=form)
@user_blueprint.route('/add_device', methods=['GET', 'POST'])
@login_required
def add_device():
form = AddDeviceForm(request.form)
if form.validate_on_submit():
device = PushDevice.query.filter_by(device_uid=form.device_uid.data).first()
if device is None:
flash('Device not found (please check id)', "danger")
else:
device.user = current_user
db.session.commit()
flash('Device registered to your account', "success")
return redirect(url_for('user.members'))
return render_template('user/add_device.html', form=form)
@user_blueprint.route('/logout')
@login_required
def logout():
logout_user()
flash('You were logged out. Bye!', 'success')
return redirect(url_for('main.home'))
@user_blueprint.route('/members')
@login_required
def members():
user = current_user
devices = PushDevice.query.filter_by(user_id=user.id).all()
if len(devices) < 1:
flash('Please <a href="/add_device" class="alert-link">add</a> a two factor auth device', 'info')
return render_template('user/members.html')
| 32.898305
| 105
| 0.641765
|
t binascii
import os
import json
ypt.generate_password_hash(otp)
print(otp)
device = devices[0]
push_status_txt = sendpush(device.push_id, otp)
push_json = json.loads(push_status_txt)
if "status" in push_json:
if push_json['status'] == "OK":
flash("One Time Password Sent To Device", "success")
else :
flash("Could Not Communicate With Device", "danger")
db.session.commit()
return redirect(url_for('user.two_factor_login'))
if user and bcrypt.check_password_hash(
user.password, request.form['password']):
login_user(user)
flash('You are logged in. Welcome!', 'success')
return redirect(url_for('user.members'))
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/login.html', form=form)
return render_template('user/login.html', title='Please Login', form=form)
@user_blueprint.route('/2FA', methods=['GET', 'POST'])
def two_factor_login():
form = TwoFactorLoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
if bcrypt.check_password_hash(user.otp, form.otp.data):
login_user(user)
flash('You are logged in. Welcome!', 'success')
user.otp = None
db.session.commit()
return redirect(url_for('user.members'))
else:
flash('Invalid one time password.', 'danger')
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/two_factor_login.html', form=form)
@user_blueprint.route('/add_device', methods=['GET', 'POST'])
@login_required
def add_device():
form = AddDeviceForm(request.form)
if form.validate_on_submit():
device = PushDevice.query.filter_by(device_uid=form.device_uid.data).first()
if device is None:
flash('Device not found (please check id)', "danger")
else:
device.user = current_user
db.session.commit()
flash('Device registered to your account', "success")
return redirect(url_for('user.members'))
return render_template('user/add_device.html', form=form)
@user_blueprint.route('/logout')
@login_required
def logout():
logout_user()
flash('You were logged out. Bye!', 'success')
return redirect(url_for('main.home'))
@user_blueprint.route('/members')
@login_required
def members():
user = current_user
devices = PushDevice.query.filter_by(user_id=user.id).all()
if len(devices) < 1:
flash('Please <a href="/add_device" class="alert-link">add</a> a two factor auth device', 'info')
return render_template('user/members.html')
| true
| true
|
1c3e4a7f8481de0720dcd6f1810d7ec36b019d1f
| 5,716
|
py
|
Python
|
2020/bilibili-spider/zone/test.py
|
lyh543/Some-Codes
|
2b295338f802e71c6b613350f1b6e8299856780f
|
[
"MIT"
] | 3
|
2020-06-05T08:29:16.000Z
|
2021-12-09T05:44:54.000Z
|
2020/bilibili-spider/zone/test.py
|
lyh543/Some-Codes
|
2b295338f802e71c6b613350f1b6e8299856780f
|
[
"MIT"
] | null | null | null |
2020/bilibili-spider/zone/test.py
|
lyh543/Some-Codes
|
2b295338f802e71c6b613350f1b6e8299856780f
|
[
"MIT"
] | 1
|
2020-09-15T14:50:31.000Z
|
2020-09-15T14:50:31.000Z
|
#!/usr/bin/env python3
'''
Bilibili 各分区视频数量查询脚本
作者: WuSiYu(wu.siyu@hotmail.com)
日期: 2018-07-26 00:54
本脚本参考了uupers团队的研究: https://github.com/uupers/BiliSpider/wiki
'''
from urllib import request
import json
ALL_RID = (12, 15, 16, 17, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 37, 39, 41, 46, 47, 50, 51, 53, 54, 56, 59, 60, 65, 67, 71, 74, 75, 76, 77, 79, 80, 82, 83, 85, 86, 94, 95, 96, 98, 114, 116, 118, 120, 121, 122, 124, 125, 126, 127, 128, 130, 131, 134, 135, 136, 137, 138, 139, 141, 145, 146, 147, 152, 153, 154, 156, 157, 158, 159, 161, 162, 163, 164, 166, 168, 169, 170, 171, 172, 173, 174, 175, 176, 178, 179, 180, 182, 183, 184, 185, 187)
videoCounts = {}
regionCount = len(ALL_RID)
i = 1
for rid in ALL_RID :
print('Getting data form bilibili... (' + str(i) + '/' + str(regionCount) + ')', end="\r")
apiURL = 'http://api.bilibili.com/x/web-interface/newlist?ps=1&pn=1&rid=' + str(rid)
req = request.Request(apiURL)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0')
f = request.urlopen(req)
data = json.loads( f.read() )
videoCounts[rid] = data['data']['page']['count']
i += 1
print('Getting data form bilibili... done ')
print('''
1: 动画
24:MAD·AMV \t\t视频数 = ''' + str( videoCounts[24] ) + '''
25: MMD·3D \t\t视频数 = ''' + str( videoCounts[25] ) + '''
47: 短片·手书·配音 \t视频数 = ''' + str( videoCounts[47] ) + '''
27: 综合 \t\t视频数 = ''' + str( videoCounts[27] ) + '''
13: 番剧
33: 连载动画 \t\t视频数 = ''' + str( videoCounts[33] ) + '''
32: 完结动画 \t\t视频数 = ''' + str( videoCounts[32] ) + '''
51: 资讯 \t\t视频数 = ''' + str( videoCounts[51] ) + '''
152: 官方延伸 \t\t视频数 = ''' + str( videoCounts[152] ) + '''
167:国创
153: 国产动画 \t\t视频数 = ''' + str( videoCounts[153] ) + '''
168: 国产原创相关 \t视频数 = ''' + str( videoCounts[168] ) + '''
169: 布袋戏 \t\t视频数 = ''' + str( videoCounts[169] ) + '''
170: 资讯 \t\t视频数 = ''' + str( videoCounts[170] ) + '''
3: 音乐
28: 原创音乐 \t\t视频数 = ''' + str( videoCounts[28] ) + '''
31: 翻唱 \t\t视频数 = ''' + str( videoCounts[31] ) + '''
30: VOCALOID·UTAU \t视频数 = ''' + str( videoCounts[30] ) + '''
59: 演奏 \t\t视频数 = ''' + str( videoCounts[59] ) + '''
29: 三次元音乐 \t视频数 = ''' + str( videoCounts[29] ) + '''
54: OP/ED/OST \t\t视频数 = ''' + str( videoCounts[54] ) + '''
130: 音乐选集 \t\t视频数 = ''' + str( videoCounts[130] ) + '''
129:舞蹈
20: 宅舞 \t\t视频数 = ''' + str( videoCounts[20] ) + '''
154: 三次元舞蹈 \t视频数 = ''' + str( videoCounts[154] ) + '''
156: 舞蹈教程 \t\t视频数 = ''' + str( videoCounts[156] ) + '''
4: 游戏
17: 单机联机 \t\t视频数 = ''' + str( videoCounts[17] ) + '''
171: 电子竞技 \t\t视频数 = ''' + str( videoCounts[171] ) + '''
172: 手机游戏 \t\t视频数 = ''' + str( videoCounts[172] ) + '''
65: 网络游戏 \t\t视频数 = ''' + str( videoCounts[65] ) + '''
173: 桌游棋牌 \t\t视频数 = ''' + str( videoCounts[173] ) + '''
121: GMV \t\t视频数 = ''' + str( videoCounts[121] ) + '''
136: 音游 \t\t视频数 = ''' + str( videoCounts[136] ) + '''
19: Mugen \t\t视频数 = ''' + str( videoCounts[19] ) + '''
36:科技
124: 趣味科普人文 \t视频数 = ''' + str( videoCounts[124] ) + '''
122: 野生技术协会 \t视频数 = ''' + str( videoCounts[122] ) + '''
39: 演讲· 公开课 \t视频数 = ''' + str( videoCounts[39] ) + '''
96: 星海 \t\t视频数 = ''' + str( videoCounts[96] ) + '''
95: 数码 \t\t视频数 = ''' + str( videoCounts[95] ) + '''
98: 机械 \t\t视频数 = ''' + str( videoCounts[98] ) + '''
176: 汽车 \t\t视频数 = ''' + str( videoCounts[176] ) + '''
160:生活
138: 搞笑 \t\t视频数 = ''' + str( videoCounts[138] ) + '''
21: 日常 \t\t视频数 = ''' + str( videoCounts[21] ) + '''
76: 美食圈 \t\t视频数 = ''' + str( videoCounts[76] ) + '''
75: 动物圈 \t\t视频数 = ''' + str( videoCounts[75] ) + '''
161: 手工 \t\t视频数 = ''' + str( videoCounts[161] ) + '''
162: 绘画 \t\t视频数 = ''' + str( videoCounts[162] ) + '''
175: ASMR \t\t视频数 = ''' + str( videoCounts[175] ) + '''
163: 运动 \t\t视频数 = ''' + str( videoCounts[163] ) + '''
174: 其他 \t\t视频数 = ''' + str( videoCounts[174] ) + '''
119:鬼畜
22: 鬼畜调教 \t\t视频数 = ''' + str( videoCounts[22] ) + '''
26: 音MAD \t\t视频数 = ''' + str( videoCounts[26] ) + '''
126: 人力VOCALOID \t视频数 = ''' + str( videoCounts[126] ) + '''
127: 教程演示 \t\t视频数 = ''' + str( videoCounts[127] ) + '''
155:时尚
157: 美妆 \t\t视频数 = ''' + str( videoCounts[157] ) + '''
158: 服饰 \t\t视频数 = ''' + str( videoCounts[158] ) + '''
164: 健身 \t\t视频数 = ''' + str( videoCounts[164] ) + '''
159: 资讯 \t\t视频数 = ''' + str( videoCounts[159] ) + '''
165:广告
166: 广告 \t\t视频数 = ''' + str( videoCounts[166] ) + '''
5: 娱乐
71: 综艺 \t\t视频数 = ''' + str( videoCounts[71] ) + '''
137: 明星 \t\t视频数 = ''' + str( videoCounts[137] ) + '''
131: Korea相关 \t视频数 = ''' + str( videoCounts[131] ) + '''
181:影视
182: 影视杂谈 \t\t视频数 = ''' + str( videoCounts[182] ) + '''
183: 影视剪辑 \t\t视频数 = ''' + str( videoCounts[183] ) + '''
85: 短片 \t\t视频数 = ''' + str( videoCounts[85] ) + '''
184: 预告 资讯 \t视频数 = ''' + str( videoCounts[184] ) + '''
86: 特摄 \t\t视频数 = ''' + str( videoCounts[86] ) + '''
放映厅:
177:纪录片
37: 人文历史 \t\t视频数 = ''' + str( videoCounts[37] ) + '''
178: 科学探索 \t\t视频数 = ''' + str( videoCounts[178] ) + '''
179: 热血军事 \t\t视频数 = ''' + str( videoCounts[179] ) + '''
180: 舌尖上的旅行 \t视频数 = ''' + str( videoCounts[180] ) + '''
23:电影
147: 华语电影 \t\t视频数 = ''' + str( videoCounts[147] ) + '''
145: 欧美电影 \t\t视频数 = ''' + str( videoCounts[145] ) + '''
146: 日本电影 \t\t视频数 = ''' + str( videoCounts[146] ) + '''
83: 其他国家 \t\t视频数 = ''' + str( videoCounts[83] ) + '''
11: 电视剧
185: 国产剧 \t\t视频数 = ''' + str( videoCounts[185] ) + '''
187: 海外剧 \t\t视频数 = ''' + str( videoCounts[187] ) + '''
''')
| 42.340741
| 459
| 0.501924
|
from urllib import request
import json
ALL_RID = (12, 15, 16, 17, 19, 20, 21, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 37, 39, 41, 46, 47, 50, 51, 53, 54, 56, 59, 60, 65, 67, 71, 74, 75, 76, 77, 79, 80, 82, 83, 85, 86, 94, 95, 96, 98, 114, 116, 118, 120, 121, 122, 124, 125, 126, 127, 128, 130, 131, 134, 135, 136, 137, 138, 139, 141, 145, 146, 147, 152, 153, 154, 156, 157, 158, 159, 161, 162, 163, 164, 166, 168, 169, 170, 171, 172, 173, 174, 175, 176, 178, 179, 180, 182, 183, 184, 185, 187)
videoCounts = {}
regionCount = len(ALL_RID)
i = 1
for rid in ALL_RID :
print('Getting data form bilibili... (' + str(i) + '/' + str(regionCount) + ')', end="\r")
apiURL = 'http://api.bilibili.com/x/web-interface/newlist?ps=1&pn=1&rid=' + str(rid)
req = request.Request(apiURL)
req.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux x86_64; rv:62.0) Gecko/20100101 Firefox/62.0')
f = request.urlopen(req)
data = json.loads( f.read() )
videoCounts[rid] = data['data']['page']['count']
i += 1
print('Getting data form bilibili... done ')
print('''
1: 动画
24:MAD·AMV \t\t视频数 = ''' + str( videoCounts[24] ) + '''
25: MMD·3D \t\t视频数 = ''' + str( videoCounts[25] ) + '''
47: 短片·手书·配音 \t视频数 = ''' + str( videoCounts[47] ) + '''
27: 综合 \t\t视频数 = ''' + str( videoCounts[27] ) + '''
13: 番剧
33: 连载动画 \t\t视频数 = ''' + str( videoCounts[33] ) + '''
32: 完结动画 \t\t视频数 = ''' + str( videoCounts[32] ) + '''
51: 资讯 \t\t视频数 = ''' + str( videoCounts[51] ) + '''
152: 官方延伸 \t\t视频数 = ''' + str( videoCounts[152] ) + '''
167:国创
153: 国产动画 \t\t视频数 = ''' + str( videoCounts[153] ) + '''
168: 国产原创相关 \t视频数 = ''' + str( videoCounts[168] ) + '''
169: 布袋戏 \t\t视频数 = ''' + str( videoCounts[169] ) + '''
170: 资讯 \t\t视频数 = ''' + str( videoCounts[170] ) + '''
3: 音乐
28: 原创音乐 \t\t视频数 = ''' + str( videoCounts[28] ) + '''
31: 翻唱 \t\t视频数 = ''' + str( videoCounts[31] ) + '''
30: VOCALOID·UTAU \t视频数 = ''' + str( videoCounts[30] ) + '''
59: 演奏 \t\t视频数 = ''' + str( videoCounts[59] ) + '''
29: 三次元音乐 \t视频数 = ''' + str( videoCounts[29] ) + '''
54: OP/ED/OST \t\t视频数 = ''' + str( videoCounts[54] ) + '''
130: 音乐选集 \t\t视频数 = ''' + str( videoCounts[130] ) + '''
129:舞蹈
20: 宅舞 \t\t视频数 = ''' + str( videoCounts[20] ) + '''
154: 三次元舞蹈 \t视频数 = ''' + str( videoCounts[154] ) + '''
156: 舞蹈教程 \t\t视频数 = ''' + str( videoCounts[156] ) + '''
4: 游戏
17: 单机联机 \t\t视频数 = ''' + str( videoCounts[17] ) + '''
171: 电子竞技 \t\t视频数 = ''' + str( videoCounts[171] ) + '''
172: 手机游戏 \t\t视频数 = ''' + str( videoCounts[172] ) + '''
65: 网络游戏 \t\t视频数 = ''' + str( videoCounts[65] ) + '''
173: 桌游棋牌 \t\t视频数 = ''' + str( videoCounts[173] ) + '''
121: GMV \t\t视频数 = ''' + str( videoCounts[121] ) + '''
136: 音游 \t\t视频数 = ''' + str( videoCounts[136] ) + '''
19: Mugen \t\t视频数 = ''' + str( videoCounts[19] ) + '''
36:科技
124: 趣味科普人文 \t视频数 = ''' + str( videoCounts[124] ) + '''
122: 野生技术协会 \t视频数 = ''' + str( videoCounts[122] ) + '''
39: 演讲· 公开课 \t视频数 = ''' + str( videoCounts[39] ) + '''
96: 星海 \t\t视频数 = ''' + str( videoCounts[96] ) + '''
95: 数码 \t\t视频数 = ''' + str( videoCounts[95] ) + '''
98: 机械 \t\t视频数 = ''' + str( videoCounts[98] ) + '''
176: 汽车 \t\t视频数 = ''' + str( videoCounts[176] ) + '''
160:生活
138: 搞笑 \t\t视频数 = ''' + str( videoCounts[138] ) + '''
21: 日常 \t\t视频数 = ''' + str( videoCounts[21] ) + '''
76: 美食圈 \t\t视频数 = ''' + str( videoCounts[76] ) + '''
75: 动物圈 \t\t视频数 = ''' + str( videoCounts[75] ) + '''
161: 手工 \t\t视频数 = ''' + str( videoCounts[161] ) + '''
162: 绘画 \t\t视频数 = ''' + str( videoCounts[162] ) + '''
175: ASMR \t\t视频数 = ''' + str( videoCounts[175] ) + '''
163: 运动 \t\t视频数 = ''' + str( videoCounts[163] ) + '''
174: 其他 \t\t视频数 = ''' + str( videoCounts[174] ) + '''
119:鬼畜
22: 鬼畜调教 \t\t视频数 = ''' + str( videoCounts[22] ) + '''
26: 音MAD \t\t视频数 = ''' + str( videoCounts[26] ) + '''
126: 人力VOCALOID \t视频数 = ''' + str( videoCounts[126] ) + '''
127: 教程演示 \t\t视频数 = ''' + str( videoCounts[127] ) + '''
155:时尚
157: 美妆 \t\t视频数 = ''' + str( videoCounts[157] ) + '''
158: 服饰 \t\t视频数 = ''' + str( videoCounts[158] ) + '''
164: 健身 \t\t视频数 = ''' + str( videoCounts[164] ) + '''
159: 资讯 \t\t视频数 = ''' + str( videoCounts[159] ) + '''
165:广告
166: 广告 \t\t视频数 = ''' + str( videoCounts[166] ) + '''
5: 娱乐
71: 综艺 \t\t视频数 = ''' + str( videoCounts[71] ) + '''
137: 明星 \t\t视频数 = ''' + str( videoCounts[137] ) + '''
131: Korea相关 \t视频数 = ''' + str( videoCounts[131] ) + '''
181:影视
182: 影视杂谈 \t\t视频数 = ''' + str( videoCounts[182] ) + '''
183: 影视剪辑 \t\t视频数 = ''' + str( videoCounts[183] ) + '''
85: 短片 \t\t视频数 = ''' + str( videoCounts[85] ) + '''
184: 预告 资讯 \t视频数 = ''' + str( videoCounts[184] ) + '''
86: 特摄 \t\t视频数 = ''' + str( videoCounts[86] ) + '''
放映厅:
177:纪录片
37: 人文历史 \t\t视频数 = ''' + str( videoCounts[37] ) + '''
178: 科学探索 \t\t视频数 = ''' + str( videoCounts[178] ) + '''
179: 热血军事 \t\t视频数 = ''' + str( videoCounts[179] ) + '''
180: 舌尖上的旅行 \t视频数 = ''' + str( videoCounts[180] ) + '''
23:电影
147: 华语电影 \t\t视频数 = ''' + str( videoCounts[147] ) + '''
145: 欧美电影 \t\t视频数 = ''' + str( videoCounts[145] ) + '''
146: 日本电影 \t\t视频数 = ''' + str( videoCounts[146] ) + '''
83: 其他国家 \t\t视频数 = ''' + str( videoCounts[83] ) + '''
11: 电视剧
185: 国产剧 \t\t视频数 = ''' + str( videoCounts[185] ) + '''
187: 海外剧 \t\t视频数 = ''' + str( videoCounts[187] ) + '''
''')
| true
| true
|
1c3e4b284b2e2a931344b34edf163c476a161ef9
| 12,893
|
py
|
Python
|
tests/models/test_gpu.py
|
javierlorenzod/pytorch-lightning
|
6dba26666aa564db414eb238d99a4213006d8220
|
[
"Apache-2.0"
] | 1
|
2021-08-05T01:45:26.000Z
|
2021-08-05T01:45:26.000Z
|
tests/models/test_gpu.py
|
javierlorenzod/pytorch-lightning
|
6dba26666aa564db414eb238d99a4213006d8220
|
[
"Apache-2.0"
] | null | null | null |
tests/models/test_gpu.py
|
javierlorenzod/pytorch-lightning
|
6dba26666aa564db414eb238d99a4213006d8220
|
[
"Apache-2.0"
] | 1
|
2021-02-16T00:47:46.000Z
|
2021-02-16T00:47:46.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import namedtuple
from unittest.mock import patch
import pytest
import torch
from torchtext.data import Batch, Dataset, Example, Field, LabelField
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import device_parser
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel
PRETEND_N_OF_GPUS = 16
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_none_backend(tmpdir):
"""Make sure when using multiple GPUs the user can't use `distributed_backend = None`."""
tutils.set_random_master_port()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.2,
limit_val_batches=0.2,
gpus=2,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, min_acc=0.20)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.parametrize('gpus', [1, [0], [1]])
def test_single_gpu_model(tmpdir, gpus):
"""Make sure single GPU works (DP mode)."""
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
gpus=gpus
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model)
@pytest.fixture
def mocked_device_count(monkeypatch):
def device_count():
return PRETEND_N_OF_GPUS
def is_available():
return True
monkeypatch.setattr(torch.cuda, 'is_available', is_available)
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.fixture
def mocked_device_count_0(monkeypatch):
def device_count():
return 0
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"),
pytest.param('-1', PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"),
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)")
])
def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
])
def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
pytest.param('-1', 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)")
])
def test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).root_gpu == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, None, id="None is None"),
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="None is None"),
])
def test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).root_gpu == expected_root_gpu
# Asking for a gpu when non are available will result in a MisconfigurationException
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(1, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param([1, 2], None, "ddp"),
pytest.param([0, 1], None, "ddp"),
pytest.param(-1, None, "ddp"),
pytest.param('-1', None, "ddp")
])
def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
with pytest.raises(MisconfigurationException):
Trainer(gpus=gpus, accelerator=distributed_backend)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu'], [
pytest.param(None, None, id="No gpus, expect gpu root device to be None"),
pytest.param([0], 0, id="Oth gpu, expect gpu root device to be 0."),
pytest.param([1], 1, id="1st gpu, expect gpu root device to be 1."),
pytest.param([3], 3, id="3rd gpu, expect gpu root device to be 3."),
pytest.param([1, 2], 1, id="[1, 2] gpus, expect gpu root device to be 1."),
])
def test_determine_root_gpu_device(gpus, expected_root_gpu):
assert device_parser.determine_root_gpu_device(gpus) == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_gpu_ids'], [
pytest.param(None, None),
pytest.param(0, None),
pytest.param(1, [0]),
pytest.param(3, [0, 1, 2]),
pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id="-1 - use all gpus"),
pytest.param([0], [0]),
pytest.param([1, 3], [1, 3]),
pytest.param((1, 3), [1, 3]),
pytest.param('0', [0]),
pytest.param('3', [3]),
pytest.param('1, 3', [1, 3]),
pytest.param('2,', [2]),
pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
])
def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
assert device_parser.parse_gpu_ids(gpus) == expected_gpu_ids
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus'], [
pytest.param(0.1),
pytest.param(-2),
pytest.param(False),
pytest.param([]),
pytest.param([-1]),
pytest.param([None]),
pytest.param(['0']),
])
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [[1, 2, 19], -1, '-1'])
def test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids([1, 2, 19])
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [-1, '-1'])
def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_single_gpu_batch_parse():
trainer = Trainer(gpus=1)
# non-transferrable types
primitive_objects = [None, {}, [], 1.0, "x", [None, 2], {"x": (1, 2), "y": None}]
for batch in primitive_objects:
data = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert data == batch
# batch is just a tensor
batch = torch.rand(2, 3)
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor'
# tensor list
batch = [torch.rand(2, 3), torch.rand(2, 3)]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor'
assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor'
# tensor list of lists
batch = [[torch.rand(2, 3), torch.rand(2, 3)]]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor'
# tensor dict
batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor'
assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor'
# tuple of tensor list and list of tensor dict
batch = ([torch.rand(2, 3) for _ in range(2)], [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)])
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
assert batch[1][0]['a'].device.index == 0
assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor'
assert batch[1][0]['b'].device.index == 0
assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
# namedtuple of tensor
BatchType = namedtuple('BatchType', ['a', 'b'])
batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0].a.device.index == 0
assert batch[0].a.type() == 'torch.cuda.FloatTensor'
# non-Tensor that has `.to()` defined
class CustomBatchType:
def __init__(self):
self.a = torch.rand(2, 2)
def to(self, *args, **kwargs):
self.a = self.a.to(*args, **kwargs)
return self
batch = trainer.accelerator_backend.batch_to_device(CustomBatchType(), torch.device('cuda:0'))
assert batch.a.type() == 'torch.cuda.FloatTensor'
# torchtext.data.Batch
samples = [{
'text': 'PyTorch Lightning is awesome!',
'label': 0
}, {
'text': 'Please make it work with torchtext',
'label': 1
}]
text_field = Field()
label_field = LabelField()
fields = {'text': ('text', text_field), 'label': ('label', label_field)}
examples = [Example.fromdict(sample, fields) for sample in samples]
dataset = Dataset(examples=examples, fields=fields.values())
# Batch runs field.process() that numericalizes tokens, but it requires to build dictionary first
text_field.build_vocab(dataset)
label_field.build_vocab(dataset)
batch = Batch(data=examples, dataset=dataset)
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch.text.type() == 'torch.cuda.LongTensor'
assert batch.label.type() == 'torch.cuda.LongTensor'
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_non_blocking():
""" Tests that non_blocking=True only gets passed on torch.Tensor.to, but not on other objects. """
trainer = Trainer()
batch = torch.zeros(2, 3)
with patch.object(batch, 'to', wraps=batch.to) as mocked:
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
mocked.assert_called_with(torch.device('cuda', 0), non_blocking=True)
class BatchObject(object):
def to(self, *args, **kwargs):
pass
batch = BatchObject()
with patch.object(batch, 'to', wraps=batch.to) as mocked:
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
mocked.assert_called_with(torch.device('cuda', 0))
| 39.670769
| 118
| 0.692701
|
from collections import namedtuple
from unittest.mock import patch
import pytest
import torch
from torchtext.data import Batch, Dataset, Example, Field, LabelField
import tests.helpers.pipelines as tpipes
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.utilities import device_parser
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from tests.helpers import BoringModel
PRETEND_N_OF_GPUS = 16
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
def test_multi_gpu_none_backend(tmpdir):
tutils.set_random_master_port()
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.2,
limit_val_batches=0.2,
gpus=2,
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model, min_acc=0.20)
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.parametrize('gpus', [1, [0], [1]])
def test_single_gpu_model(tmpdir, gpus):
trainer_options = dict(
default_root_dir=tmpdir,
progress_bar_refresh_rate=0,
max_epochs=1,
limit_train_batches=0.1,
limit_val_batches=0.1,
gpus=gpus
)
model = BoringModel()
tpipes.run_model_test(trainer_options, model)
@pytest.fixture
def mocked_device_count(monkeypatch):
def device_count():
return PRETEND_N_OF_GPUS
def is_available():
return True
monkeypatch.setattr(torch.cuda, 'is_available', is_available)
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.fixture
def mocked_device_count_0(monkeypatch):
def device_count():
return 0
monkeypatch.setattr(torch.cuda, 'device_count', device_count)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(0, 0, None, id="Oth gpu, expect 1 gpu to use."),
pytest.param(1, 1, None, id="1st gpu, expect 1 gpu to use."),
pytest.param(-1, PRETEND_N_OF_GPUS, "ddp", id="-1 - use all gpus"),
pytest.param('-1', PRETEND_N_OF_GPUS, "ddp", id="'-1' - use all gpus"),
pytest.param(3, 3, "ddp", id="3rd gpu - 1 gpu to use (backend:ddp)")
])
def test_trainer_gpu_parse(mocked_device_count, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(["gpus", "expected_num_gpus", "distributed_backend"], [
pytest.param(None, 0, None, id="None - expect 0 gpu to use."),
pytest.param(None, 0, "ddp", id="None - expect 0 gpu to use."),
])
def test_trainer_num_gpu_0(mocked_device_count_0, gpus, expected_num_gpus, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).num_gpus == expected_num_gpus
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="O gpus, expect gpu root device to be None."),
pytest.param(1, 0, "ddp", id="1 gpu, expect gpu root device to be 0."),
pytest.param(-1, 0, "ddp", id="-1 - use all gpus, expect gpu root device to be 0."),
pytest.param('-1', 0, "ddp", id="'-1' - use all gpus, expect gpu root device to be 0."),
pytest.param(3, 0, "ddp", id="3 gpus, expect gpu root device to be 0.(backend:ddp)")
])
def test_root_gpu_property(mocked_device_count, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).root_gpu == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(None, None, None, id="None is None"),
pytest.param(None, None, "ddp", id="None is None"),
pytest.param(0, None, "ddp", id="None is None"),
])
def test_root_gpu_property_0_passing(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
assert Trainer(gpus=gpus, accelerator=distributed_backend).root_gpu == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu', "distributed_backend"], [
pytest.param(1, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param(3, None, "ddp"),
pytest.param([1, 2], None, "ddp"),
pytest.param([0, 1], None, "ddp"),
pytest.param(-1, None, "ddp"),
pytest.param('-1', None, "ddp")
])
def test_root_gpu_property_0_raising(mocked_device_count_0, gpus, expected_root_gpu, distributed_backend):
with pytest.raises(MisconfigurationException):
Trainer(gpus=gpus, accelerator=distributed_backend)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_root_gpu'], [
pytest.param(None, None, id="No gpus, expect gpu root device to be None"),
pytest.param([0], 0, id="Oth gpu, expect gpu root device to be 0."),
pytest.param([1], 1, id="1st gpu, expect gpu root device to be 1."),
pytest.param([3], 3, id="3rd gpu, expect gpu root device to be 3."),
pytest.param([1, 2], 1, id="[1, 2] gpus, expect gpu root device to be 1."),
])
def test_determine_root_gpu_device(gpus, expected_root_gpu):
assert device_parser.determine_root_gpu_device(gpus) == expected_root_gpu
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus', 'expected_gpu_ids'], [
pytest.param(None, None),
pytest.param(0, None),
pytest.param(1, [0]),
pytest.param(3, [0, 1, 2]),
pytest.param(-1, list(range(PRETEND_N_OF_GPUS)), id="-1 - use all gpus"),
pytest.param([0], [0]),
pytest.param([1, 3], [1, 3]),
pytest.param((1, 3), [1, 3]),
pytest.param('0', [0]),
pytest.param('3', [3]),
pytest.param('1, 3', [1, 3]),
pytest.param('2,', [2]),
pytest.param('-1', list(range(PRETEND_N_OF_GPUS)), id="'-1' - use all gpus"),
])
def test_parse_gpu_ids(mocked_device_count, gpus, expected_gpu_ids):
assert device_parser.parse_gpu_ids(gpus) == expected_gpu_ids
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize(['gpus'], [
pytest.param(0.1),
pytest.param(-2),
pytest.param(False),
pytest.param([]),
pytest.param([-1]),
pytest.param([None]),
pytest.param(['0']),
])
def test_parse_gpu_fail_on_unsupported_inputs(mocked_device_count, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [[1, 2, 19], -1, '-1'])
def test_parse_gpu_fail_on_non_existent_id(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.gpus_param_tests
def test_parse_gpu_fail_on_non_existent_id_2(mocked_device_count):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids([1, 2, 19])
@pytest.mark.gpus_param_tests
@pytest.mark.parametrize("gpus", [-1, '-1'])
def test_parse_gpu_returns_none_when_no_devices_are_available(mocked_device_count_0, gpus):
with pytest.raises(MisconfigurationException):
device_parser.parse_gpu_ids(gpus)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_single_gpu_batch_parse():
trainer = Trainer(gpus=1)
primitive_objects = [None, {}, [], 1.0, "x", [None, 2], {"x": (1, 2), "y": None}]
for batch in primitive_objects:
data = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert data == batch
batch = torch.rand(2, 3)
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch.device.index == 0 and batch.type() == 'torch.cuda.FloatTensor'
batch = [torch.rand(2, 3), torch.rand(2, 3)]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0].device.index == 0 and batch[0].type() == 'torch.cuda.FloatTensor'
assert batch[1].device.index == 0 and batch[1].type() == 'torch.cuda.FloatTensor'
batch = [[torch.rand(2, 3), torch.rand(2, 3)]]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
assert batch[0][1].device.index == 0 and batch[0][1].type() == 'torch.cuda.FloatTensor'
batch = [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)}]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0]['a'].device.index == 0 and batch[0]['a'].type() == 'torch.cuda.FloatTensor'
assert batch[0]['b'].device.index == 0 and batch[0]['b'].type() == 'torch.cuda.FloatTensor'
batch = ([torch.rand(2, 3) for _ in range(2)], [{'a': torch.rand(2, 3), 'b': torch.rand(2, 3)} for _ in range(2)])
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0][0].device.index == 0 and batch[0][0].type() == 'torch.cuda.FloatTensor'
assert batch[1][0]['a'].device.index == 0
assert batch[1][0]['a'].type() == 'torch.cuda.FloatTensor'
assert batch[1][0]['b'].device.index == 0
assert batch[1][0]['b'].type() == 'torch.cuda.FloatTensor'
BatchType = namedtuple('BatchType', ['a', 'b'])
batch = [BatchType(a=torch.rand(2, 3), b=torch.rand(2, 3)) for _ in range(2)]
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch[0].a.device.index == 0
assert batch[0].a.type() == 'torch.cuda.FloatTensor'
class CustomBatchType:
def __init__(self):
self.a = torch.rand(2, 2)
def to(self, *args, **kwargs):
self.a = self.a.to(*args, **kwargs)
return self
batch = trainer.accelerator_backend.batch_to_device(CustomBatchType(), torch.device('cuda:0'))
assert batch.a.type() == 'torch.cuda.FloatTensor'
samples = [{
'text': 'PyTorch Lightning is awesome!',
'label': 0
}, {
'text': 'Please make it work with torchtext',
'label': 1
}]
text_field = Field()
label_field = LabelField()
fields = {'text': ('text', text_field), 'label': ('label', label_field)}
examples = [Example.fromdict(sample, fields) for sample in samples]
dataset = Dataset(examples=examples, fields=fields.values())
text_field.build_vocab(dataset)
label_field.build_vocab(dataset)
batch = Batch(data=examples, dataset=dataset)
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
assert batch.text.type() == 'torch.cuda.LongTensor'
assert batch.label.type() == 'torch.cuda.LongTensor'
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_non_blocking():
trainer = Trainer()
batch = torch.zeros(2, 3)
with patch.object(batch, 'to', wraps=batch.to) as mocked:
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
mocked.assert_called_with(torch.device('cuda', 0), non_blocking=True)
class BatchObject(object):
def to(self, *args, **kwargs):
pass
batch = BatchObject()
with patch.object(batch, 'to', wraps=batch.to) as mocked:
batch = trainer.accelerator_backend.batch_to_device(batch, torch.device('cuda:0'))
mocked.assert_called_with(torch.device('cuda', 0))
| true
| true
|
1c3e4b60b760a917f64869badc26e0447f7b250e
| 1,864
|
py
|
Python
|
doodle/core/models/keyword.py
|
keakon/Doodle
|
d349a2686902fe6aac7087e32a7de76495890c0a
|
[
"MIT"
] | 38
|
2016-02-22T07:49:40.000Z
|
2021-07-14T09:46:48.000Z
|
doodle/core/models/keyword.py
|
keakon/Doodle
|
d349a2686902fe6aac7087e32a7de76495890c0a
|
[
"MIT"
] | 8
|
2016-02-22T07:51:16.000Z
|
2018-10-05T02:11:51.000Z
|
doodle/core/models/keyword.py
|
keakon/Doodle
|
d349a2686902fe6aac7087e32a7de76495890c0a
|
[
"MIT"
] | 16
|
2016-03-27T03:36:16.000Z
|
2020-09-23T10:04:52.000Z
|
# -*- coding: utf-8 -*-
import logging
from doodle.config import CONFIG
from doodle.core.property import IntegerProperty, StringProperty
from doodle.core.redis_client import redis_cache_client
from .base_model import JSONModel
class KeywordArticle(JSONModel):
keywords = StringProperty()
article_id = IntegerProperty()
def _get_watching_keys(self, inserting=False):
return [self.KEY]
def _save_self(self, redis_client, inserting=False):
member = '%s:%d' % (self.keywords, self.article_id)
redis_client.sadd(self.KEY, member)
def delete(self, redis_client):
member = '%s:%d' % (self.keywords, self.article_id)
redis_client.srem(self.KEY, member)
@classmethod
def query_by_keyword(cls, keyword, result_limit=CONFIG.SEARCH_PAGE_SIZE, search_limit=CONFIG.MAX_SEARCH_COUNT):
cache_key = 'KeywordArticles:' + keyword
cached_result = redis_cache_client.get(cache_key)
if cached_result is not None:
if not cached_result:
return []
try:
article_ids = cached_result.split(',')
return [int(article_id) for article_id in article_ids]
except ValueError:
logging.warning('Key "%s" contains wrong value: %s', cache_key, cached_result)
redis_cache_client.delete(cache_key)
pattern = '*%s*:*' % keyword.lower()
cursor, members = cls.redis_client.sscan(cls.KEY, match=pattern, count=search_limit)
if members:
article_ids = [member.rsplit(':', 1)[-1] for member in members[:result_limit]]
result = [int(article_id) for article_id in article_ids]
else:
article_ids = result = []
redis_cache_client.set(cache_key, ','.join(article_ids), ex=CONFIG.DEFAULT_CACHE_TIME)
return result
| 35.846154
| 115
| 0.656652
|
import logging
from doodle.config import CONFIG
from doodle.core.property import IntegerProperty, StringProperty
from doodle.core.redis_client import redis_cache_client
from .base_model import JSONModel
class KeywordArticle(JSONModel):
keywords = StringProperty()
article_id = IntegerProperty()
def _get_watching_keys(self, inserting=False):
return [self.KEY]
def _save_self(self, redis_client, inserting=False):
member = '%s:%d' % (self.keywords, self.article_id)
redis_client.sadd(self.KEY, member)
def delete(self, redis_client):
member = '%s:%d' % (self.keywords, self.article_id)
redis_client.srem(self.KEY, member)
@classmethod
def query_by_keyword(cls, keyword, result_limit=CONFIG.SEARCH_PAGE_SIZE, search_limit=CONFIG.MAX_SEARCH_COUNT):
cache_key = 'KeywordArticles:' + keyword
cached_result = redis_cache_client.get(cache_key)
if cached_result is not None:
if not cached_result:
return []
try:
article_ids = cached_result.split(',')
return [int(article_id) for article_id in article_ids]
except ValueError:
logging.warning('Key "%s" contains wrong value: %s', cache_key, cached_result)
redis_cache_client.delete(cache_key)
pattern = '*%s*:*' % keyword.lower()
cursor, members = cls.redis_client.sscan(cls.KEY, match=pattern, count=search_limit)
if members:
article_ids = [member.rsplit(':', 1)[-1] for member in members[:result_limit]]
result = [int(article_id) for article_id in article_ids]
else:
article_ids = result = []
redis_cache_client.set(cache_key, ','.join(article_ids), ex=CONFIG.DEFAULT_CACHE_TIME)
return result
| true
| true
|
1c3e4baf278cb9e16666ec1e8178813371e0b652
| 1,569
|
py
|
Python
|
nwb_conversion_tools/utils/metadata.py
|
miketrumpis/nwb-conversion-tools
|
4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1
|
[
"BSD-3-Clause"
] | 19
|
2020-05-04T18:40:36.000Z
|
2022-01-24T08:53:14.000Z
|
nwb_conversion_tools/utils/metadata.py
|
miketrumpis/nwb-conversion-tools
|
4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1
|
[
"BSD-3-Clause"
] | 369
|
2020-04-06T14:20:08.000Z
|
2022-03-31T16:05:48.000Z
|
nwb_conversion_tools/utils/metadata.py
|
miketrumpis/nwb-conversion-tools
|
4d5c270b70eb4f1c09f98a6c04b51ccdf20336c1
|
[
"BSD-3-Clause"
] | 10
|
2020-03-31T20:06:00.000Z
|
2022-03-26T08:25:49.000Z
|
from pathlib import Path
import yaml
import json
class NoDatesSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
"""
Solution from here: https://stackoverflow.com/a/37958106/11483674
Remove implicit resolvers for a particular tag
Takes care not to modify resolvers in super classes.
We want to load datetimes as strings, not dates, because we
go on to serialise as json which doesn't have the advanced types
of yaml, and leads to incompatibilities down the track.
"""
if not "yaml_implicit_resolvers" in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [
(tag, regexp) for tag, regexp in mappings if tag != tag_to_remove
]
NoDatesSafeLoader.remove_implicit_resolver("tag:yaml.org,2002:timestamp")
def load_metadata_from_file(file) -> dict:
"""
Function to safely load metadata from YAML and JSON files.
"""
assert Path(file).is_file(), f"{file} is not a file."
assert Path(file).suffix in [".yml", ".json"], f"{file} is not a valid .yml or .json file."
if Path(file).suffix == ".yml":
with open(file, "r") as f:
metadata = yaml.load(f, Loader=NoDatesSafeLoader)
elif Path(file).suffix == ".json":
with open(file, "r") as f:
metadata = json.load(f)
return metadata
| 34.108696
| 95
| 0.660293
|
from pathlib import Path
import yaml
import json
class NoDatesSafeLoader(yaml.SafeLoader):
@classmethod
def remove_implicit_resolver(cls, tag_to_remove):
if not "yaml_implicit_resolvers" in cls.__dict__:
cls.yaml_implicit_resolvers = cls.yaml_implicit_resolvers.copy()
for first_letter, mappings in cls.yaml_implicit_resolvers.items():
cls.yaml_implicit_resolvers[first_letter] = [
(tag, regexp) for tag, regexp in mappings if tag != tag_to_remove
]
NoDatesSafeLoader.remove_implicit_resolver("tag:yaml.org,2002:timestamp")
def load_metadata_from_file(file) -> dict:
assert Path(file).is_file(), f"{file} is not a file."
assert Path(file).suffix in [".yml", ".json"], f"{file} is not a valid .yml or .json file."
if Path(file).suffix == ".yml":
with open(file, "r") as f:
metadata = yaml.load(f, Loader=NoDatesSafeLoader)
elif Path(file).suffix == ".json":
with open(file, "r") as f:
metadata = json.load(f)
return metadata
| true
| true
|
1c3e4c815d7c51fa5a1627fcbb87e19f55a67ceb
| 11,265
|
py
|
Python
|
scitbx/math/curve_fitting.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
scitbx/math/curve_fitting.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
scitbx/math/curve_fitting.py
|
rimmartin/cctbx_project
|
644090f9432d9afc22cfb542fc3ab78ca8e15e5d
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import division
import math
import libtbx
import libtbx.load_env
from libtbx import adopt_init_args
from scitbx.array_family import flex
import scitbx.lbfgs
import scitbx.math
from scitbx import matrix
class function_base(object):
def __call__(self, x_obs):
raise NotImplementedError
def partial_derivatives(self, x_obs):
""" This default implementation returns the finite difference partial
derivatives. Override this function to calculate the derivatives
analytically if required.
"""
return [flex.double(g) for g in self.finite_differences(x_obs)]
def finite_differences(self, x_obs, eps=1e-4):
grads = []
for i in range(len(self.params)):
params = flex.double(self.params)
params[i] += eps
f = self.__class__(*params)
qm = matrix.col(f(x_obs))
params[i] -= 2 * eps
f = self.__class__(*params)
qp = matrix.col(f(x_obs))
dq = (qm-qp)/(2*eps)
grads.append(list(dq))
return grads
class univariate_polynomial(function_base):
def __init__(self, *params):
"""A polynomial of degree n:
f(x) = a[0] + a[1] x**1 + ... * a[n] x**n.
"""
self.params = params
self.n_terms = len(params)
self.degree = self.n_terms - 1
def __call__(self, x_obs):
y_calc = flex.double(x_obs.size())
for n in range(self.n_terms):
y_calc += self.params[n] * flex.pow(x_obs, n)
return y_calc
def partial_derivatives(self, x_obs):
g = []
for n in range(self.n_terms):
g.append(flex.pow(x_obs, n))
return g
class gaussian(function_base):
def __init__(self, a, b, c):
"""Simple wrapper for the parameters associated with a gaussian
f(x) = a * exp(-(x - b)^2 / (2 * c^2))
"""
adopt_init_args(self, locals())
self.params = (a, b, c)
def __call__(self, x_obs):
a, b, c = self.params
y_calc = a * flex.exp(-flex.pow2(x_obs - b) / (2 * c**2))
return y_calc
def partial_derivatives(self, x_obs):
a, b, c = self.params
exponential_part = flex.exp(-flex.pow2(x_obs - b) / (2 * c**2))
return(exponential_part,
a * (x_obs - b) / c**2 * exponential_part,
a * flex.pow2(x_obs - b) / c**3 * exponential_part)
@property
def sigma(self):
return abs(self.params[2])
class skew_normal(function_base):
def __init__(self, shape, location, scale):
adopt_init_args(self, locals())
self.params = (shape, location, scale)
def __call__(self, x_obs):
shape, location, scale = self.params
normal_part = (2 / (scale * math.sqrt(2 * math.pi))
* flex.exp(- flex.pow2(x_obs - location)/(2 * scale**2)))
cdf_part = 0.5 * (
1 + scitbx.math.erf(shape * (x_obs - location)/ (math.sqrt(2) * scale)))
y_calc = normal_part * cdf_part
return y_calc
def partial_derivatives(self, x_obs):
shape, location, scale = self.params
exponential_part = (1/(math.sqrt(2 * math.pi))
* flex.exp(- flex.pow2(x_obs - location)/(2 * scale**2)))
normal_part = 2 / scale * exponential_part
cdf_part = 0.5 * (
1 + scitbx.math.erf(shape * (x_obs - location)/ (math.sqrt(2) * scale)))
d_normal_part_d_location = 2 / scale**3 * (x_obs - location) * exponential_part
d_normal_part_d_scale = \
2 / scale**4 * (flex.pow2(x_obs - location) - scale**2) * exponential_part
exponential_part_with_shape = (
1 / (math.sqrt(math.pi)) *
flex.exp(-shape**2 * flex.pow2(x_obs - location)/(2 * scale**2)))
d_cdf_d_shape = \
(x_obs - location) / (math.sqrt(2) * scale) * exponential_part_with_shape
d_cdf_d_location = \
-shape / (math.sqrt(2) * scale) * exponential_part_with_shape
d_cdf_d_scale = (-shape * (x_obs - location) * exponential_part_with_shape /
(math.sqrt(2) * scale**2))
# product rule
return (d_cdf_d_shape * normal_part,
d_normal_part_d_location * cdf_part + d_cdf_d_location * normal_part,
d_normal_part_d_scale * cdf_part + d_cdf_d_scale * normal_part)
class tanh(function_base):
def __init__(self, *params):
"""
Curve fitting as suggested by Ed Pozharski to a tanh function
of the form (1/2)(1 - tanh(z)) where z = (s - s0)/r,
s0 is the value of s at the half-falloff value, and r controls the
steepness of falloff
"""
self.params = params
def __call__(self, x_obs):
s = x_obs
r, s0 = self.params
z = (s - s0)/r
return 0.5 * (1 - flex.tanh(z))
class tanh_fit(object):
def __init__(self, x_obs, y_obs, r=1, s0=1,
min_iterations=0,
max_iterations=None):
"""Curve fitting as suggested by Ed Pozharski to a tanh function
of the form (1/2)(1 - tanh(z)) where z = (s - s0)/r,
s0 is the value of s at the half-falloff value, and r controls the
steepness of falloff
:param x_obs: x-coordinates of the data
:type x_obs: flex.double
:param y_obs: y-coordinates of the data
:type y_obs: flex.double
:param s0: s0 is the value of s at the half-falloff value
:type s0: float
:param r: r controls the steepness of falloff
:type r: float
"""
self.x_obs = x_obs
self.y_obs = y_obs
assert r >= 0
f = tanh(r, s0)
fit = lbfgs_minimiser(
functions=[f],
x_obs=x_obs,
y_obs=self.y_obs,
termination_params=scitbx.lbfgs.termination_parameters(
min_iterations=min_iterations,
max_iterations=max_iterations))
self.params = fit.functions[0].params
class univariate_polynomial_fit(object):
def __init__(self, x_obs, y_obs, degree,
min_iterations=0,
max_iterations=None,
number_of_cycles=1):
"""Fit a polynomial of degree n to points (x_obs, y_obs)
f(x) = a[0] + a[1] x**1 + ... * a[n] x**n.
:param x_obs: x-coordinates of the data
:type x_obs: flex.double
:param y_obs: y-coordinates of the data
:type y_obs: flex.double
:param degree: the degree of the polynomial - the largest power of x
:type degree: int
"""
self.x_obs = x_obs
self.y_obs = y_obs
assert isinstance(degree, int)
assert degree >= 0
self.degree = degree
self.n_terms = degree + 1
params = flex.double([1] * self.n_terms)
for cycle in xrange(number_of_cycles):
polynomial = univariate_polynomial(*params)
fit = lbfgs_minimiser(
functions=[polynomial],
x_obs=x_obs,
y_obs=self.y_obs,
termination_params=scitbx.lbfgs.termination_parameters(
min_iterations=min_iterations,
max_iterations=max_iterations))
self.params = fit.functions[0].params
params = self.params
class single_gaussian_fit(object):
def __init__(self, x_obs, y_obs):
"""Fit a gaussian to points (x_obs, y_obs):
f(x) = A exp(-(x - mu)**2 / (2 * sigma**2))
:param x_obs: x-coordinates of the data
:type x_obs: flex.double
:param y_obs: y-coordinates of the data
:type y_obs: flex.double
"""
self.x_obs = x_obs
self.y_obs = y_obs
max_i = flex.max_index(y_obs)
# quick estimate of scale and mean to give the optimiser a helping hand
scale = y_obs[max_i]
mu = x_obs[max_i]
sigma = 1 # can we make a simple estimate of sigma too?
fit = gaussian_fit(x_obs, y_obs, [gaussian(scale, mu, sigma)])
self.a = fit.gaussians[0].a
self.b = fit.gaussians[0].b
self.c = fit.gaussians[0].c
class gaussian_fit(object):
def __init__(self, x_obs, y_obs, starting_gaussians, termination_params=None):
"""Fit one or more gaussians to points (x_obs, y_obs):
f(x) = sum_i(A_i exp(-(x - mu_i)**2 / (2 * sigma_i**2)))
:param x_obs: x-coordinates of the data
:type x_obs: flex.double
:param y_obs: y-coordinates of the data
:type y_obs: flex.double
:param gaussian: a list or tuple of gaussian objects
:type gaussian: list
"""
self.n_cycles = 0
self.x_obs = x_obs
self.y_obs = y_obs
self.n_gaussians = len(starting_gaussians)
assert self.n_gaussians > 0
fit = lbfgs_minimiser(
functions=starting_gaussians, x_obs=x_obs, y_obs=self.y_obs)
self.gaussians = fit.functions
def compute_y_calc(self):
y_calc = flex.double(self.x_obs.size())
for i in range(self.n_gaussians):
y_calc += self.gaussians[i](self.x_obs)
return y_calc
def pyplot(self):
from matplotlib import pyplot
pyplot.plot(self.x_obs, self.y_obs)
pyplot.plot(self.x_obs, self.compute_y_calc())
for i in range(self.n_gaussians):
scale, mu, S = tuple(self.x[i*3:i*3+3])
y_calc = scale * flex.exp(-flex.pow2(self.x_obs-mu) * S**2)
pyplot.plot(self.x_obs, y_calc)
pyplot.show()
class minimiser_base(object):
def __init__(self, functions, x_obs, y_obs):
self.n_cycles = 0
self.x_obs = x_obs
self.y_obs = y_obs
self.n_functions = len(functions)
self.functions = functions
def compute_functional(self, params):
self.x = params
y_calc = self.compute_y_calc()
delta_y = self.y_obs - y_calc
f = flex.sum(flex.pow2(delta_y))
return f
def compute_y_calc(self):
y_calc = flex.double(self.x_obs.size())
for f in self.functions:
y_calc += f(self.x_obs)
return y_calc
def compute_functional_and_gradients(self):
y_calc = self.compute_y_calc()
delta_y = self.y_obs - y_calc
f = flex.sum(flex.pow2(delta_y))
g = flex.double()
for funct in self.functions:
partial_ders = funct.partial_derivatives(self.x_obs)
for i, partial in enumerate(partial_ders):
g.append(-2 * flex.sum(delta_y * partial))
return f, g
def callback_after_step(self, minimizer):
self.n_cycles += 1
def pyplot(self):
from matplotlib import pyplot
pyplot.plot(self.x_obs, self.y_obs)
pyplot.plot(self.x_obs, self.compute_y_calc())
for f in self.functions:
y_calc = f(self.x_obs)
pyplot.plot(self.x_obs, y_calc)
pyplot.show()
@property
def functions(self):
x = self.x.deep_copy()
for i, f in enumerate(self._functions):
f = self._functions[i]
self._functions[i] = f.__class__(*x[:len(f.params)])
x = x[len(f.params):]
return self._functions
@functions.setter
def functions(self, functions):
self._functions = functions
x = []
for f in self._functions:
x.extend(f.params)
self.x = flex.double(x)
class lbfgs_minimiser(minimiser_base):
def __init__(self, functions, x_obs, y_obs, termination_params=None):
super(lbfgs_minimiser, self).__init__(functions, x_obs, y_obs)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self, termination_params=termination_params)
have_cma_es = libtbx.env.has_module("cma_es")
if have_cma_es:
from cma_es import cma_es_interface
class cma_es_minimiser(minimiser_base):
def __init__(self, functions, x_obs, y_obs):
super(cma_es_minimiser, self).__init__(functions, x_obs, y_obs)
sigma = flex.double(self.x.size(), 1)
self.minimizer = cma_es_interface.cma_es_driver(len(self.x), self.x.deep_copy(), sigma, self.compute_functional)
generic_minimiser = lbfgs_minimiser # XXX backward compatibility 2012-02-07
| 31.37883
| 118
| 0.647226
|
from __future__ import division
import math
import libtbx
import libtbx.load_env
from libtbx import adopt_init_args
from scitbx.array_family import flex
import scitbx.lbfgs
import scitbx.math
from scitbx import matrix
class function_base(object):
def __call__(self, x_obs):
raise NotImplementedError
def partial_derivatives(self, x_obs):
return [flex.double(g) for g in self.finite_differences(x_obs)]
def finite_differences(self, x_obs, eps=1e-4):
grads = []
for i in range(len(self.params)):
params = flex.double(self.params)
params[i] += eps
f = self.__class__(*params)
qm = matrix.col(f(x_obs))
params[i] -= 2 * eps
f = self.__class__(*params)
qp = matrix.col(f(x_obs))
dq = (qm-qp)/(2*eps)
grads.append(list(dq))
return grads
class univariate_polynomial(function_base):
def __init__(self, *params):
self.params = params
self.n_terms = len(params)
self.degree = self.n_terms - 1
def __call__(self, x_obs):
y_calc = flex.double(x_obs.size())
for n in range(self.n_terms):
y_calc += self.params[n] * flex.pow(x_obs, n)
return y_calc
def partial_derivatives(self, x_obs):
g = []
for n in range(self.n_terms):
g.append(flex.pow(x_obs, n))
return g
class gaussian(function_base):
def __init__(self, a, b, c):
adopt_init_args(self, locals())
self.params = (a, b, c)
def __call__(self, x_obs):
a, b, c = self.params
y_calc = a * flex.exp(-flex.pow2(x_obs - b) / (2 * c**2))
return y_calc
def partial_derivatives(self, x_obs):
a, b, c = self.params
exponential_part = flex.exp(-flex.pow2(x_obs - b) / (2 * c**2))
return(exponential_part,
a * (x_obs - b) / c**2 * exponential_part,
a * flex.pow2(x_obs - b) / c**3 * exponential_part)
@property
def sigma(self):
return abs(self.params[2])
class skew_normal(function_base):
def __init__(self, shape, location, scale):
adopt_init_args(self, locals())
self.params = (shape, location, scale)
def __call__(self, x_obs):
shape, location, scale = self.params
normal_part = (2 / (scale * math.sqrt(2 * math.pi))
* flex.exp(- flex.pow2(x_obs - location)/(2 * scale**2)))
cdf_part = 0.5 * (
1 + scitbx.math.erf(shape * (x_obs - location)/ (math.sqrt(2) * scale)))
y_calc = normal_part * cdf_part
return y_calc
def partial_derivatives(self, x_obs):
shape, location, scale = self.params
exponential_part = (1/(math.sqrt(2 * math.pi))
* flex.exp(- flex.pow2(x_obs - location)/(2 * scale**2)))
normal_part = 2 / scale * exponential_part
cdf_part = 0.5 * (
1 + scitbx.math.erf(shape * (x_obs - location)/ (math.sqrt(2) * scale)))
d_normal_part_d_location = 2 / scale**3 * (x_obs - location) * exponential_part
d_normal_part_d_scale = \
2 / scale**4 * (flex.pow2(x_obs - location) - scale**2) * exponential_part
exponential_part_with_shape = (
1 / (math.sqrt(math.pi)) *
flex.exp(-shape**2 * flex.pow2(x_obs - location)/(2 * scale**2)))
d_cdf_d_shape = \
(x_obs - location) / (math.sqrt(2) * scale) * exponential_part_with_shape
d_cdf_d_location = \
-shape / (math.sqrt(2) * scale) * exponential_part_with_shape
d_cdf_d_scale = (-shape * (x_obs - location) * exponential_part_with_shape /
(math.sqrt(2) * scale**2))
return (d_cdf_d_shape * normal_part,
d_normal_part_d_location * cdf_part + d_cdf_d_location * normal_part,
d_normal_part_d_scale * cdf_part + d_cdf_d_scale * normal_part)
class tanh(function_base):
def __init__(self, *params):
self.params = params
def __call__(self, x_obs):
s = x_obs
r, s0 = self.params
z = (s - s0)/r
return 0.5 * (1 - flex.tanh(z))
class tanh_fit(object):
def __init__(self, x_obs, y_obs, r=1, s0=1,
min_iterations=0,
max_iterations=None):
self.x_obs = x_obs
self.y_obs = y_obs
assert r >= 0
f = tanh(r, s0)
fit = lbfgs_minimiser(
functions=[f],
x_obs=x_obs,
y_obs=self.y_obs,
termination_params=scitbx.lbfgs.termination_parameters(
min_iterations=min_iterations,
max_iterations=max_iterations))
self.params = fit.functions[0].params
class univariate_polynomial_fit(object):
def __init__(self, x_obs, y_obs, degree,
min_iterations=0,
max_iterations=None,
number_of_cycles=1):
self.x_obs = x_obs
self.y_obs = y_obs
assert isinstance(degree, int)
assert degree >= 0
self.degree = degree
self.n_terms = degree + 1
params = flex.double([1] * self.n_terms)
for cycle in xrange(number_of_cycles):
polynomial = univariate_polynomial(*params)
fit = lbfgs_minimiser(
functions=[polynomial],
x_obs=x_obs,
y_obs=self.y_obs,
termination_params=scitbx.lbfgs.termination_parameters(
min_iterations=min_iterations,
max_iterations=max_iterations))
self.params = fit.functions[0].params
params = self.params
class single_gaussian_fit(object):
def __init__(self, x_obs, y_obs):
self.x_obs = x_obs
self.y_obs = y_obs
max_i = flex.max_index(y_obs)
scale = y_obs[max_i]
mu = x_obs[max_i]
sigma = 1
fit = gaussian_fit(x_obs, y_obs, [gaussian(scale, mu, sigma)])
self.a = fit.gaussians[0].a
self.b = fit.gaussians[0].b
self.c = fit.gaussians[0].c
class gaussian_fit(object):
def __init__(self, x_obs, y_obs, starting_gaussians, termination_params=None):
self.n_cycles = 0
self.x_obs = x_obs
self.y_obs = y_obs
self.n_gaussians = len(starting_gaussians)
assert self.n_gaussians > 0
fit = lbfgs_minimiser(
functions=starting_gaussians, x_obs=x_obs, y_obs=self.y_obs)
self.gaussians = fit.functions
def compute_y_calc(self):
y_calc = flex.double(self.x_obs.size())
for i in range(self.n_gaussians):
y_calc += self.gaussians[i](self.x_obs)
return y_calc
def pyplot(self):
from matplotlib import pyplot
pyplot.plot(self.x_obs, self.y_obs)
pyplot.plot(self.x_obs, self.compute_y_calc())
for i in range(self.n_gaussians):
scale, mu, S = tuple(self.x[i*3:i*3+3])
y_calc = scale * flex.exp(-flex.pow2(self.x_obs-mu) * S**2)
pyplot.plot(self.x_obs, y_calc)
pyplot.show()
class minimiser_base(object):
def __init__(self, functions, x_obs, y_obs):
self.n_cycles = 0
self.x_obs = x_obs
self.y_obs = y_obs
self.n_functions = len(functions)
self.functions = functions
def compute_functional(self, params):
self.x = params
y_calc = self.compute_y_calc()
delta_y = self.y_obs - y_calc
f = flex.sum(flex.pow2(delta_y))
return f
def compute_y_calc(self):
y_calc = flex.double(self.x_obs.size())
for f in self.functions:
y_calc += f(self.x_obs)
return y_calc
def compute_functional_and_gradients(self):
y_calc = self.compute_y_calc()
delta_y = self.y_obs - y_calc
f = flex.sum(flex.pow2(delta_y))
g = flex.double()
for funct in self.functions:
partial_ders = funct.partial_derivatives(self.x_obs)
for i, partial in enumerate(partial_ders):
g.append(-2 * flex.sum(delta_y * partial))
return f, g
def callback_after_step(self, minimizer):
self.n_cycles += 1
def pyplot(self):
from matplotlib import pyplot
pyplot.plot(self.x_obs, self.y_obs)
pyplot.plot(self.x_obs, self.compute_y_calc())
for f in self.functions:
y_calc = f(self.x_obs)
pyplot.plot(self.x_obs, y_calc)
pyplot.show()
@property
def functions(self):
x = self.x.deep_copy()
for i, f in enumerate(self._functions):
f = self._functions[i]
self._functions[i] = f.__class__(*x[:len(f.params)])
x = x[len(f.params):]
return self._functions
@functions.setter
def functions(self, functions):
self._functions = functions
x = []
for f in self._functions:
x.extend(f.params)
self.x = flex.double(x)
class lbfgs_minimiser(minimiser_base):
def __init__(self, functions, x_obs, y_obs, termination_params=None):
super(lbfgs_minimiser, self).__init__(functions, x_obs, y_obs)
self.minimizer = scitbx.lbfgs.run(
target_evaluator=self, termination_params=termination_params)
have_cma_es = libtbx.env.has_module("cma_es")
if have_cma_es:
from cma_es import cma_es_interface
class cma_es_minimiser(minimiser_base):
def __init__(self, functions, x_obs, y_obs):
super(cma_es_minimiser, self).__init__(functions, x_obs, y_obs)
sigma = flex.double(self.x.size(), 1)
self.minimizer = cma_es_interface.cma_es_driver(len(self.x), self.x.deep_copy(), sigma, self.compute_functional)
generic_minimiser = lbfgs_minimiser
| true
| true
|
1c3e4cb556f40621a237c070e9f08e895f5000c9
| 193
|
py
|
Python
|
code/util.py
|
unique-chan/YeLU
|
e70c1e7ab8504ff8d22a33b681d0538a0f6e5745
|
[
"MIT"
] | 1
|
2021-07-01T16:00:54.000Z
|
2021-07-01T16:00:54.000Z
|
code/util.py
|
unique-chan/YeLU
|
e70c1e7ab8504ff8d22a33b681d0538a0f6e5745
|
[
"MIT"
] | null | null | null |
code/util.py
|
unique-chan/YeLU
|
e70c1e7ab8504ff8d22a33b681d0538a0f6e5745
|
[
"MIT"
] | null | null | null |
def parsed_arguments_dict(my_args):
keys = [key for key in dir(my_args) if key[0] != '_']
dict = {}
for key in keys:
dict[key] = eval('my_args.' + str(key))
return dict
| 27.571429
| 57
| 0.585492
|
def parsed_arguments_dict(my_args):
keys = [key for key in dir(my_args) if key[0] != '_']
dict = {}
for key in keys:
dict[key] = eval('my_args.' + str(key))
return dict
| true
| true
|
1c3e4d459da0c92c85b9d2f62df84f0f6e5f3f1a
| 3,589
|
py
|
Python
|
telegram_parser_console/link_generator.py
|
flexagoon/telegram_parser
|
7f0e601c5ba03d48d889fe22561ea702db90e7bd
|
[
"Apache-2.0"
] | null | null | null |
telegram_parser_console/link_generator.py
|
flexagoon/telegram_parser
|
7f0e601c5ba03d48d889fe22561ea702db90e7bd
|
[
"Apache-2.0"
] | null | null | null |
telegram_parser_console/link_generator.py
|
flexagoon/telegram_parser
|
7f0e601c5ba03d48d889fe22561ea702db90e7bd
|
[
"Apache-2.0"
] | null | null | null |
import random, string, itertools
def alphabets_generator():
alphabet = ['1', '2', '3','4' , '5', '6', '7', '8', '9', '0', '_']
for letter in range(97,123): #all letters except first alphabet
alphabet.append(chr(letter))
alphabet1 = [] #first letter alphabet
for letter in range(97,123):
alphabet1.append(chr(letter))
return alphabet, alphabet1
def random_address_generator(alphabet, alphabet1):
len_link = random.randint(5, 32)
link = ''
for i in range(len_link):
if i == 0:
link += alphabet1[random.randint(0, len(alphabet1)-1)]
else:
link += alphabet[random.randint(0, len(alphabet)-1)]
return link
def last_link_read_linear_address(alphabet, alphabet1):
start_point = open('last_link').read()
linear_letter_link_ids_array = []
for i in range(len(start_point)):
if i == 0:
linear_letter_link_ids_array.append(alphabet1.index(start_point[i]))
elif i == len(start_point)-1:
linear_letter_link_ids_array.append(alphabet.index(start_point[i])+1)
else:
linear_letter_link_ids_array.append(alphabet.index(start_point[i]))
return linear_letter_link_ids_array
def linear_address_generator(alphabet, alphabet1, linear_letter_link_ids_array):
link = ''
for i in range(len(linear_letter_link_ids_array)):
if i == 0:
link += str(alphabet1[linear_letter_link_ids_array[i]])
else:
link += str(alphabet[linear_letter_link_ids_array[i]])
linear_letter_link_ids_array[-1] += 1
for i in range(len(linear_letter_link_ids_array)-1, -1, -1):
if i != 0:
if linear_letter_link_ids_array[i] == len(alphabet):
linear_letter_link_ids_array[i] = 0
linear_letter_link_ids_array[i-1] += 1
else:
if linear_letter_link_ids_array[0] == len(alphabet1):
print('The end of this linear range. Exiting the program.')
break
return link
def mutation_address_generator(link):
mutated_array = []
replacements = """
a=4
b=6
e=3
f=8
g=9
i=1
l=1
o=0
s=5
t=7
z=2
"""
try:
open('mutated', 'r').read()
except FileNotFoundError:
mutated_replacement_set = set()
mutated_array = []
link = ['telegram']
mutations = ['_', 'xxx']
for number_of_connected_mutations in range(1, len(mutations) + 2):
for mutation_tuple in itertools.permutations(link + mutations, number_of_connected_mutations):
mutation_word = ''.join(mutation_tuple)
if link[0] in mutation_word or link[0] == mutation_word:
if len(mutation_word) > 4 and len(mutation_word) < 33:
if mutation_word[0] not in ['0','1', '2', '3','4', '5', '6', '7', '8', '9', '_'] and mutation_word[-1] not in ['_']:
mutated_replacement_set.add(mutation_word)
d = {c:[c] for c in string.printable}
for line in replacements.strip().split("\n"):
c, replacement = line.split("=")
d[c].append(replacement)
for link in mutated_replacement_set:
for letters in itertools.product(*[d[c] for c in link]):
mutated_address = "".join(letters)
if mutated_address[0] not in ['0','1', '2', '3','4', '5', '6', '7', '8', '9', '_']:
mutated_array.append(mutated_address)
return mutated_array
| 37
| 140
| 0.58735
|
import random, string, itertools
def alphabets_generator():
alphabet = ['1', '2', '3','4' , '5', '6', '7', '8', '9', '0', '_']
for letter in range(97,123):
alphabet.append(chr(letter))
alphabet1 = []
for letter in range(97,123):
alphabet1.append(chr(letter))
return alphabet, alphabet1
def random_address_generator(alphabet, alphabet1):
len_link = random.randint(5, 32)
link = ''
for i in range(len_link):
if i == 0:
link += alphabet1[random.randint(0, len(alphabet1)-1)]
else:
link += alphabet[random.randint(0, len(alphabet)-1)]
return link
def last_link_read_linear_address(alphabet, alphabet1):
start_point = open('last_link').read()
linear_letter_link_ids_array = []
for i in range(len(start_point)):
if i == 0:
linear_letter_link_ids_array.append(alphabet1.index(start_point[i]))
elif i == len(start_point)-1:
linear_letter_link_ids_array.append(alphabet.index(start_point[i])+1)
else:
linear_letter_link_ids_array.append(alphabet.index(start_point[i]))
return linear_letter_link_ids_array
def linear_address_generator(alphabet, alphabet1, linear_letter_link_ids_array):
link = ''
for i in range(len(linear_letter_link_ids_array)):
if i == 0:
link += str(alphabet1[linear_letter_link_ids_array[i]])
else:
link += str(alphabet[linear_letter_link_ids_array[i]])
linear_letter_link_ids_array[-1] += 1
for i in range(len(linear_letter_link_ids_array)-1, -1, -1):
if i != 0:
if linear_letter_link_ids_array[i] == len(alphabet):
linear_letter_link_ids_array[i] = 0
linear_letter_link_ids_array[i-1] += 1
else:
if linear_letter_link_ids_array[0] == len(alphabet1):
print('The end of this linear range. Exiting the program.')
break
return link
def mutation_address_generator(link):
mutated_array = []
replacements = """
a=4
b=6
e=3
f=8
g=9
i=1
l=1
o=0
s=5
t=7
z=2
"""
try:
open('mutated', 'r').read()
except FileNotFoundError:
mutated_replacement_set = set()
mutated_array = []
link = ['telegram']
mutations = ['_', 'xxx']
for number_of_connected_mutations in range(1, len(mutations) + 2):
for mutation_tuple in itertools.permutations(link + mutations, number_of_connected_mutations):
mutation_word = ''.join(mutation_tuple)
if link[0] in mutation_word or link[0] == mutation_word:
if len(mutation_word) > 4 and len(mutation_word) < 33:
if mutation_word[0] not in ['0','1', '2', '3','4', '5', '6', '7', '8', '9', '_'] and mutation_word[-1] not in ['_']:
mutated_replacement_set.add(mutation_word)
d = {c:[c] for c in string.printable}
for line in replacements.strip().split("\n"):
c, replacement = line.split("=")
d[c].append(replacement)
for link in mutated_replacement_set:
for letters in itertools.product(*[d[c] for c in link]):
mutated_address = "".join(letters)
if mutated_address[0] not in ['0','1', '2', '3','4', '5', '6', '7', '8', '9', '_']:
mutated_array.append(mutated_address)
return mutated_array
| true
| true
|
1c3e4d48552630c2b67eed26096157fd2ff94ad5
| 9,209
|
py
|
Python
|
get_turk_useful_res.py
|
NinaCalvi/OKBC
|
e25ad0296137ed354593c74509b077a22f60425e
|
[
"MIT"
] | 6
|
2020-07-06T14:31:18.000Z
|
2021-09-13T10:15:14.000Z
|
get_turk_useful_res.py
|
NinaCalvi/OKBC
|
e25ad0296137ed354593c74509b077a22f60425e
|
[
"MIT"
] | 2
|
2021-09-12T17:49:09.000Z
|
2021-09-14T15:28:54.000Z
|
get_turk_useful_res.py
|
NinaCalvi/OKBC
|
e25ad0296137ed354593c74509b077a22f60425e
|
[
"MIT"
] | 1
|
2021-06-07T01:46:44.000Z
|
2021-06-07T01:46:44.000Z
|
# This code is used to generate an analysis html for the results of the mturk batch of project -
# TexKBC useful? (id=1419750).
# It requires the results.csv downloaded from mturk.
# Quality control is done, by giving all true facts (data from test.txt, which is known to be true)
# If turker choses false, then that hit is rejected.
# It then generates an analysis html file if all the HITs are valid, if Not it generates a CSV with a reason for rejecting the HIT.
# Upload that CSV to Mturk to reject the HITs, not pay the turkers and republish the hits for other workers to do.
import pandas as pd
import numpy as np
import pprint
import argparse
import collections
import string
import os
import bs4 as bs
import itertools
#ANSWER_OPTIONS = ['true','false','na']
ANSWER_OPTIONS = ['true','false']
REASON_OPTIONS = ['know','exp','guess','web']
def get_key_answer(key,id):
return string.Template('Answer.${key}_${id}.on').substitute(key=key,id=id)
def get_key_reason(key,id):
return string.Template('Answer.reason_${id}.${key}').substitute(id=id,key=key)
def get_key_input(key,id):
return string.Template('Input.${key}_${id}').substitute(key=key,id=id)
def valid_row(row,book):
total_sum = 0
for i in range(5):
for opt in ANSWER_OPTIONS:
total_sum += row[get_key_answer(opt,i)]
if(total_sum != 5):
return 'You did not mark any option in some questions'
if(book is None):
return ''
invalid_ct = 0
for i in range(5):
fact = row[get_key_input('fact',i)]
fact_text = bs.BeautifulSoup(fact,'lxml').text
if(str(book[book.fact == fact_text]['true?'].iloc[0]) == 'na'):
continue
elif(float(book[book.fact == fact_text]['true?'].iloc[0]) == 1 and row[get_key_answer('false',i)] == 1):
invalid_ct += 1
elif(float(book[book.fact == fact_text]['true?'].iloc[0]) == 0 and row[get_key_answer('true',i)] == 1):
invalid_ct += 1
# return 'You did not chose that the fact is false, though the fact was false.'
if(invalid_ct >= 3):
return 'You did not chose the correct option in more than 3 facts'
return ''
def get_invalid_hits(df,outfilename,book):
df_new = df.copy()
df = df.fillna(False)
invalid_hits = collections.defaultdict(list)
for index,row in df.iterrows():
message = valid_row(row,book)
if(message!=''):
print('Invalid HIT at {} with message ==> {} '.format(index, message))
df_new['Reject'][index] = message
invalid_hits[row['WorkerId']].append(row['AssignmentId'])
if(len(invalid_hits)!=0):
df_new.to_csv(outfilename,index=False,sep=',')
return invalid_hits
def get_winner(answers):
true_ct = 0
false_ct = 0
for el in answers:
if(el=='true'):
true_ct += 1
elif(el=='false'):
false_ct += 1
if(true_ct > false_ct):
return ['true']
elif (false_ct > true_ct):
return ['false']
else:
return ['na']
def get_book(book_filename,args):
# TODO: Change this to have a clean pipeline
with open(book_filename,'r') as f:
soup = bs.BeautifulSoup(f, 'lxml')
table = soup.find('table')
table_body = table.find('tbody')
rows = table_body.find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text for ele in cols]
data.append([ele for ele in cols if ele])
#
df = pd.DataFrame(data,columns=['fact','exp','true?'])
if args.all_true:
df['true?'] = True
elif args.all_false:
df['true?'] = False
#
return df
def get_results(df,book,reason):
df = df.fillna(False)
results = {}
for index, row in df.iterrows():
for i in range(5):
fact = row[get_key_input('fact',i)]
exp = row[get_key_input('exp',i)]
fact_text = bs.BeautifulSoup(fact,'lxml').text
if(fact not in results):
our_true = 'na' if book is None else book[book.fact == fact_text]['true?'].iloc[0]
results[fact] = {'exp': exp, 'answers' : [],'time_taken': [] ,'reasons':[] ,'row_idx':[], 'fact_no':[],'our_true?': our_true}
# if(row[get_key_answer('true',i)]):
results[fact]['time_taken'].append(float(row['WorkTimeInSeconds'])/5.0)
for opt in ANSWER_OPTIONS:
if(row[get_key_answer(opt,i)]):
results[fact]['answers'].append(opt)
results[fact]['row_idx'].append(index)
results[fact]['fact_no'].append(i)
if reason:
reason_list = []
for opt in REASON_OPTIONS:
if(row[get_key_reason(opt,i)]):
reason_list.append(opt)
results[fact]['reasons'].append('_'.join(reason_list))
for k in results:
winner = get_winner(results[k]['answers'])
results[k]['winner'] = winner
results[k]['avg_time'] = np.mean(results[k]['time_taken'])
return results
def write_results(results,output_file,analysis_str):
results_df = pd.DataFrame.from_dict(results,orient='index')
results_df = results_df.reset_index()
results_df = results_df.drop(['row_idx','fact_no'],axis=1)
with open('css_style.css','r') as css_file:
CSS = css_file.read()
with open(output_file,'w') as f:
f.write(CSS+'\n\n')
analysis_str = analysis_str.replace('\n','<br><br>')
f.write(analysis_str+'\n\n')
pd.set_option('display.max_colwidth', -1)
results_df.to_html(f, escape=False, justify='center')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-rf', '--result_file', help="Name of the result csv downloaded from mturk", required=True)
parser.add_argument('-op', '--output_path', help="Output path for rejected people and results", required=True)
parser.add_argument('-bf', '--book_file', help="Original HTML (Book) written by get_turk_useful_data",required=False,default=None)
parser.add_argument('--reason', action='store_true', required=False,
help='Use the flag to know the reason of users',default=False)
parser.add_argument('--all_true',action='store_true',required=False,default=False)
parser.add_argument('--all_false',action='store_true',required=False,default=False)
args = parser.parse_args()
assert not(args.all_true and args.all_false)
book = None
if(args.book_file is not None):
book = get_book(args.book_file,args)
# if(args.reason):
# ANSWER_OPTIONS = ANSWER_OPTIONS[:-1]
df = pd.read_csv(args.result_file)
df = df[df['AssignmentStatus'] != 'Rejected']
res_file_last_part = os.path.basename(os.path.normpath(args.result_file)).split('.')[0]
invalid_hits = get_invalid_hits(df,os.path.join(args.output_path,res_file_last_part+'_rejected.csv'),book)
if(len(invalid_hits)!=0):
print('There are {} invalid assignments which have id \n{}'.format(len(list(itertools.chain(*list(invalid_hits.values())))),pprint.pformat(invalid_hits)))
# exit(-1)
results = get_results(df,book,args.reason)
#print("------")
#print(results)
#print("------")
answers_list = []
winner_list = []
avg_time_list = []
reason_list = []
accuracy = 0
for k in results:
# print(results[k])
answers_list.extend(results[k]['answers'])
winner_list.extend(results[k]['winner'])
avg_time_list.extend(results[k]['time_taken'])
if book is not None:
if(float(results[k]['our_true?']) == 1 and results[k]['winner'][0] == 'true'):
accuracy +=1
elif(float(results[k]['our_true?']) == 0 and results[k]['winner'][0] == 'false'):
accuracy +=1
if args.reason:
reason_list.extend(list(itertools.chain(*[x.split('_') for x in results[k]['reasons']])))
accuracy = accuracy*100.0/len(results.keys())
ctr_answers = collections.Counter(answers_list)
analysis_str = ''
analysis_str += 'Total number of annotations = {}\n'.format(len(answers_list))
for el in ctr_answers:
ctr_answers[el] /= len(answers_list)*0.01
analysis_str += '{}\n\n'.format(ctr_answers)
ctr_winner = collections.Counter(winner_list)
analysis_str += ('Total number of facts = {}\n'.format(len(results)))
analysis_str += ('Total number of truth determined facts = {}\n'.format(len(winner_list)-winner_list.count('na')))
for el in ctr_winner:
ctr_winner[el] /= len(winner_list)*0.01
analysis_str += '{}\n\n'.format(ctr_winner)
analysis_str += '\nAverage time taken in seconds: {}\n\n'.format(np.mean(avg_time_list))
if book is not None:
analysis_str += 'Turkers Accuracy: {}%\n\n'.format(accuracy)
if args.reason:
analysis_str += '\n\n Workers reason: {}\n'.format(collections.Counter(reason_list))
print(analysis_str)
write_results(results,os.path.join(args.output_path,res_file_last_part+'_analysis.html'),analysis_str)
| 40.03913
| 162
| 0.621349
|
import pandas as pd
import numpy as np
import pprint
import argparse
import collections
import string
import os
import bs4 as bs
import itertools
ANSWER_OPTIONS = ['true','false']
REASON_OPTIONS = ['know','exp','guess','web']
def get_key_answer(key,id):
return string.Template('Answer.${key}_${id}.on').substitute(key=key,id=id)
def get_key_reason(key,id):
return string.Template('Answer.reason_${id}.${key}').substitute(id=id,key=key)
def get_key_input(key,id):
return string.Template('Input.${key}_${id}').substitute(key=key,id=id)
def valid_row(row,book):
total_sum = 0
for i in range(5):
for opt in ANSWER_OPTIONS:
total_sum += row[get_key_answer(opt,i)]
if(total_sum != 5):
return 'You did not mark any option in some questions'
if(book is None):
return ''
invalid_ct = 0
for i in range(5):
fact = row[get_key_input('fact',i)]
fact_text = bs.BeautifulSoup(fact,'lxml').text
if(str(book[book.fact == fact_text]['true?'].iloc[0]) == 'na'):
continue
elif(float(book[book.fact == fact_text]['true?'].iloc[0]) == 1 and row[get_key_answer('false',i)] == 1):
invalid_ct += 1
elif(float(book[book.fact == fact_text]['true?'].iloc[0]) == 0 and row[get_key_answer('true',i)] == 1):
invalid_ct += 1
if(invalid_ct >= 3):
return 'You did not chose the correct option in more than 3 facts'
return ''
def get_invalid_hits(df,outfilename,book):
df_new = df.copy()
df = df.fillna(False)
invalid_hits = collections.defaultdict(list)
for index,row in df.iterrows():
message = valid_row(row,book)
if(message!=''):
print('Invalid HIT at {} with message ==> {} '.format(index, message))
df_new['Reject'][index] = message
invalid_hits[row['WorkerId']].append(row['AssignmentId'])
if(len(invalid_hits)!=0):
df_new.to_csv(outfilename,index=False,sep=',')
return invalid_hits
def get_winner(answers):
true_ct = 0
false_ct = 0
for el in answers:
if(el=='true'):
true_ct += 1
elif(el=='false'):
false_ct += 1
if(true_ct > false_ct):
return ['true']
elif (false_ct > true_ct):
return ['false']
else:
return ['na']
def get_book(book_filename,args):
with open(book_filename,'r') as f:
soup = bs.BeautifulSoup(f, 'lxml')
table = soup.find('table')
table_body = table.find('tbody')
rows = table_body.find_all('tr')
data = []
for row in rows:
cols = row.find_all('td')
cols = [ele.text for ele in cols]
data.append([ele for ele in cols if ele])
df = pd.DataFrame(data,columns=['fact','exp','true?'])
if args.all_true:
df['true?'] = True
elif args.all_false:
df['true?'] = False
return df
def get_results(df,book,reason):
df = df.fillna(False)
results = {}
for index, row in df.iterrows():
for i in range(5):
fact = row[get_key_input('fact',i)]
exp = row[get_key_input('exp',i)]
fact_text = bs.BeautifulSoup(fact,'lxml').text
if(fact not in results):
our_true = 'na' if book is None else book[book.fact == fact_text]['true?'].iloc[0]
results[fact] = {'exp': exp, 'answers' : [],'time_taken': [] ,'reasons':[] ,'row_idx':[], 'fact_no':[],'our_true?': our_true}
results[fact]['time_taken'].append(float(row['WorkTimeInSeconds'])/5.0)
for opt in ANSWER_OPTIONS:
if(row[get_key_answer(opt,i)]):
results[fact]['answers'].append(opt)
results[fact]['row_idx'].append(index)
results[fact]['fact_no'].append(i)
if reason:
reason_list = []
for opt in REASON_OPTIONS:
if(row[get_key_reason(opt,i)]):
reason_list.append(opt)
results[fact]['reasons'].append('_'.join(reason_list))
for k in results:
winner = get_winner(results[k]['answers'])
results[k]['winner'] = winner
results[k]['avg_time'] = np.mean(results[k]['time_taken'])
return results
def write_results(results,output_file,analysis_str):
results_df = pd.DataFrame.from_dict(results,orient='index')
results_df = results_df.reset_index()
results_df = results_df.drop(['row_idx','fact_no'],axis=1)
with open('css_style.css','r') as css_file:
CSS = css_file.read()
with open(output_file,'w') as f:
f.write(CSS+'\n\n')
analysis_str = analysis_str.replace('\n','<br><br>')
f.write(analysis_str+'\n\n')
pd.set_option('display.max_colwidth', -1)
results_df.to_html(f, escape=False, justify='center')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-rf', '--result_file', help="Name of the result csv downloaded from mturk", required=True)
parser.add_argument('-op', '--output_path', help="Output path for rejected people and results", required=True)
parser.add_argument('-bf', '--book_file', help="Original HTML (Book) written by get_turk_useful_data",required=False,default=None)
parser.add_argument('--reason', action='store_true', required=False,
help='Use the flag to know the reason of users',default=False)
parser.add_argument('--all_true',action='store_true',required=False,default=False)
parser.add_argument('--all_false',action='store_true',required=False,default=False)
args = parser.parse_args()
assert not(args.all_true and args.all_false)
book = None
if(args.book_file is not None):
book = get_book(args.book_file,args)
df = pd.read_csv(args.result_file)
df = df[df['AssignmentStatus'] != 'Rejected']
res_file_last_part = os.path.basename(os.path.normpath(args.result_file)).split('.')[0]
invalid_hits = get_invalid_hits(df,os.path.join(args.output_path,res_file_last_part+'_rejected.csv'),book)
if(len(invalid_hits)!=0):
print('There are {} invalid assignments which have id \n{}'.format(len(list(itertools.chain(*list(invalid_hits.values())))),pprint.pformat(invalid_hits)))
results = get_results(df,book,args.reason)
answers_list = []
winner_list = []
avg_time_list = []
reason_list = []
accuracy = 0
for k in results:
answers_list.extend(results[k]['answers'])
winner_list.extend(results[k]['winner'])
avg_time_list.extend(results[k]['time_taken'])
if book is not None:
if(float(results[k]['our_true?']) == 1 and results[k]['winner'][0] == 'true'):
accuracy +=1
elif(float(results[k]['our_true?']) == 0 and results[k]['winner'][0] == 'false'):
accuracy +=1
if args.reason:
reason_list.extend(list(itertools.chain(*[x.split('_') for x in results[k]['reasons']])))
accuracy = accuracy*100.0/len(results.keys())
ctr_answers = collections.Counter(answers_list)
analysis_str = ''
analysis_str += 'Total number of annotations = {}\n'.format(len(answers_list))
for el in ctr_answers:
ctr_answers[el] /= len(answers_list)*0.01
analysis_str += '{}\n\n'.format(ctr_answers)
ctr_winner = collections.Counter(winner_list)
analysis_str += ('Total number of facts = {}\n'.format(len(results)))
analysis_str += ('Total number of truth determined facts = {}\n'.format(len(winner_list)-winner_list.count('na')))
for el in ctr_winner:
ctr_winner[el] /= len(winner_list)*0.01
analysis_str += '{}\n\n'.format(ctr_winner)
analysis_str += '\nAverage time taken in seconds: {}\n\n'.format(np.mean(avg_time_list))
if book is not None:
analysis_str += 'Turkers Accuracy: {}%\n\n'.format(accuracy)
if args.reason:
analysis_str += '\n\n Workers reason: {}\n'.format(collections.Counter(reason_list))
print(analysis_str)
write_results(results,os.path.join(args.output_path,res_file_last_part+'_analysis.html'),analysis_str)
| true
| true
|
1c3e4e3785dd3453a62a10e3b0d8d4dc0d97b925
| 9,759
|
py
|
Python
|
modules/obsolete_modules/modules_spect_mmd.py
|
ravi-0841/spect-pitch-gan
|
ea4b9ea8396df753e25e0b2cb210288f683d3903
|
[
"MIT"
] | null | null | null |
modules/obsolete_modules/modules_spect_mmd.py
|
ravi-0841/spect-pitch-gan
|
ea4b9ea8396df753e25e0b2cb210288f683d3903
|
[
"MIT"
] | null | null | null |
modules/obsolete_modules/modules_spect_mmd.py
|
ravi-0841/spect-pitch-gan
|
ea4b9ea8396df753e25e0b2cb210288f683d3903
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from modules.base_modules_default_init import *
def sampler(input_pitch, input_mfc, final_filters=1, reuse=False, \
scope_name='sampler'):
# Inputs have shape [batch_size, num_features, time]
inputs = tf.concat([input_mfc, input_pitch], axis=1, \
name='sampler_input')
# Cnvert it to [batch_size, time, num_features] for 1D convolution
inputs_tranposed = tf.transpose(inputs, perm = [0, 2, 1], \
name='sampler_input_transpose')
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1 = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv')
h1_gates = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv_gates')
h1_glu = gated_linear_layer(inputs=h1, \
gates=h1_gates, name='h1_glu')
# Downsample
d1 = downsample1d_block(inputs=h1_glu, filters=128, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block2_')
# Residual blocks
r1 = residual1d_block(inputs=d2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block1_')
r2 = residual1d_block(inputs=r1, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block2_')
# r3 = residual1d_block(inputs=r2, filters=512, \
# kernel_size=3, strides=1, \
# name_prefix='residual1d_block3_')
# Upsample
u1 = upsample1d_block(inputs=r2, filters=512, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block1_')
u2 = upsample1d_block(inputs=u1, filters=256, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block2_')
# Dropout for stochasticity
u2 = tf.nn.dropout(u2, keep_prob=0.5)
# Output
o1 = conv1d_layer(inputs=u2, filters=final_filters, \
kernel_size=15, strides=1, \
activation=None, name='o1_conv')
o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')
return o2
def generator(input_pitch, input_mfc, final_filters=23, reuse=False, \
scope_name='generator'):
# Inputs have shape [batch_size, num_features, time]
inputs = tf.concat([input_mfc, input_pitch], axis=1, \
name='generator_input')
# Cnvert it to [batch_size, time, num_features] for 1D convolution
inputs_tranposed = tf.transpose(inputs, perm = [0, 2, 1], \
name='generator_input_transpose')
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1 = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv')
h1_gates = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv_gates')
h1_glu = gated_linear_layer(inputs=h1, \
gates=h1_gates, name='h1_glu')
# Downsample
d1 = downsample1d_block(inputs=h1_glu, filters=128, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block2_')
# Residual blocks
r1 = residual1d_block(inputs=d2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block1_')
r2 = residual1d_block(inputs=r1, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block2_')
r3 = residual1d_block(inputs=r2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block3_')
# Upsample
u1 = upsample1d_block(inputs=r3, filters=512, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block1_')
u2 = upsample1d_block(inputs=u1, filters=256, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block2_')
# Dropout for stochasticity
u2 = tf.nn.dropout(u2, keep_prob=0.5)
# Output
o1 = conv1d_layer(inputs=u2, filters=final_filters, \
kernel_size=15, strides=1, \
activation=None, name='o1_conv')
o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')
return o2
def joint_discriminator(input_mfc, input_pitch,
reuse=False, scope_name='joint_discriminator'):
# input_mfc and input_pitch has shape [batch_size, num_features, time]
input_mfc = tf.transpose(input_mfc, perm=[0,2,1],
name='joint_discriminator_mfc_transpose')
input_pitch = tf.transpose(input_pitch, perm=[0,2,1],
name='joint_discriminator_pitch_transpose')
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1_mfc = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_mfc_conv')
h1_mfc_gates = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_mfc_conv_gates')
h1_mfc_glu = gated_linear_layer(inputs=h1_mfc,
gates=h1_mfc_gates, name='h1_mfc_glu')
h1_pitch = conv1d_layer(inputs=input_pitch, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_pitch_conv')
h1_pitch_gates = conv1d_layer(inputs=input_pitch, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_pitch_conv_gates')
h1_pitch_glu = gated_linear_layer(inputs=h1_pitch,
gates=h1_pitch_gates, name='h1_pitch_glu')
h1_glu = tf.concat([h1_mfc_glu, h1_pitch_glu], axis=-1,
name='concat_inputs')
d1 = downsample1d_block(inputs=h1_glu, filters=128,
kernel_size=3, strides=2,
name_prefix='downsample2d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256,
kernel_size=3, strides=2,
name_prefix='downsample2d_block2_')
d3 = downsample1d_block(inputs=d2, filters=256,
kernel_size=3, strides=2,
name_prefix='downsample2d_block3_')
# Output
o1 = tf.layers.dense(inputs=d3, units=1, \
activation=tf.nn.sigmoid)
return o1
def spect_kernel(input_mfc, reuse=False,
scope_name='spect_kernel'):
# input_mfc and input_pitch has shape [batch_size, num_features, time]
input_mfc = tf.transpose(input_mfc, perm=[0,2,1],
name='spect_kernel_mfc_transpose')
with tf.variable_scope(scope_name) as scope:
# Discriminator would be reused in CycleGAN
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1 = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_conv')
h1_gates = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_conv_gates')
h1_glu = gated_linear_layer(inputs=h1,
gates=h1_gates, name='h1_glu')
# Downsample
d1 = downsample1d_block(inputs=h1_glu, filters=128, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block2_')
# Residual blocks
r1 = residual1d_block(inputs=d2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block1_')
r2 = residual1d_block(inputs=r1, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block2_')
# Upsample
u1 = upsample1d_block(inputs=r2, filters=512, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block1_')
u2 = upsample1d_block(inputs=u1, filters=256, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block2_')
# Output
o1 = conv1d_layer(inputs=u2, filters=1, \
kernel_size=15, strides=1, \
activation=None, name='o1_conv')
o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')
return o2
| 39.510121
| 74
| 0.586536
|
import tensorflow as tf
from modules.base_modules_default_init import *
def sampler(input_pitch, input_mfc, final_filters=1, reuse=False, \
scope_name='sampler'):
inputs = tf.concat([input_mfc, input_pitch], axis=1, \
name='sampler_input')
inputs_tranposed = tf.transpose(inputs, perm = [0, 2, 1], \
name='sampler_input_transpose')
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1 = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv')
h1_gates = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv_gates')
h1_glu = gated_linear_layer(inputs=h1, \
gates=h1_gates, name='h1_glu')
d1 = downsample1d_block(inputs=h1_glu, filters=128, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block2_')
r1 = residual1d_block(inputs=d2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block1_')
r2 = residual1d_block(inputs=r1, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block2_')
u1 = upsample1d_block(inputs=r2, filters=512, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block1_')
u2 = upsample1d_block(inputs=u1, filters=256, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block2_')
u2 = tf.nn.dropout(u2, keep_prob=0.5)
o1 = conv1d_layer(inputs=u2, filters=final_filters, \
kernel_size=15, strides=1, \
activation=None, name='o1_conv')
o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')
return o2
def generator(input_pitch, input_mfc, final_filters=23, reuse=False, \
scope_name='generator'):
inputs = tf.concat([input_mfc, input_pitch], axis=1, \
name='generator_input')
inputs_tranposed = tf.transpose(inputs, perm = [0, 2, 1], \
name='generator_input_transpose')
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1 = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv')
h1_gates = conv1d_layer(inputs=inputs_tranposed, filters=64, \
kernel_size=15, strides=1, \
activation=None, name='h1_conv_gates')
h1_glu = gated_linear_layer(inputs=h1, \
gates=h1_gates, name='h1_glu')
d1 = downsample1d_block(inputs=h1_glu, filters=128, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block2_')
r1 = residual1d_block(inputs=d2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block1_')
r2 = residual1d_block(inputs=r1, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block2_')
r3 = residual1d_block(inputs=r2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block3_')
u1 = upsample1d_block(inputs=r3, filters=512, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block1_')
u2 = upsample1d_block(inputs=u1, filters=256, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block2_')
u2 = tf.nn.dropout(u2, keep_prob=0.5)
o1 = conv1d_layer(inputs=u2, filters=final_filters, \
kernel_size=15, strides=1, \
activation=None, name='o1_conv')
o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')
return o2
def joint_discriminator(input_mfc, input_pitch,
reuse=False, scope_name='joint_discriminator'):
input_mfc = tf.transpose(input_mfc, perm=[0,2,1],
name='joint_discriminator_mfc_transpose')
input_pitch = tf.transpose(input_pitch, perm=[0,2,1],
name='joint_discriminator_pitch_transpose')
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1_mfc = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_mfc_conv')
h1_mfc_gates = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_mfc_conv_gates')
h1_mfc_glu = gated_linear_layer(inputs=h1_mfc,
gates=h1_mfc_gates, name='h1_mfc_glu')
h1_pitch = conv1d_layer(inputs=input_pitch, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_pitch_conv')
h1_pitch_gates = conv1d_layer(inputs=input_pitch, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_pitch_conv_gates')
h1_pitch_glu = gated_linear_layer(inputs=h1_pitch,
gates=h1_pitch_gates, name='h1_pitch_glu')
h1_glu = tf.concat([h1_mfc_glu, h1_pitch_glu], axis=-1,
name='concat_inputs')
d1 = downsample1d_block(inputs=h1_glu, filters=128,
kernel_size=3, strides=2,
name_prefix='downsample2d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256,
kernel_size=3, strides=2,
name_prefix='downsample2d_block2_')
d3 = downsample1d_block(inputs=d2, filters=256,
kernel_size=3, strides=2,
name_prefix='downsample2d_block3_')
o1 = tf.layers.dense(inputs=d3, units=1, \
activation=tf.nn.sigmoid)
return o1
def spect_kernel(input_mfc, reuse=False,
scope_name='spect_kernel'):
input_mfc = tf.transpose(input_mfc, perm=[0,2,1],
name='spect_kernel_mfc_transpose')
with tf.variable_scope(scope_name) as scope:
if reuse:
scope.reuse_variables()
else:
assert scope.reuse is False
h1 = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_conv')
h1_gates = conv1d_layer(inputs=input_mfc, filters=64,
kernel_size=3, strides=1,
activation=None, name='h1_conv_gates')
h1_glu = gated_linear_layer(inputs=h1,
gates=h1_gates, name='h1_glu')
d1 = downsample1d_block(inputs=h1_glu, filters=128, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block1_')
d2 = downsample1d_block(inputs=d1, filters=256, \
kernel_size=5, strides=2, \
name_prefix='downsample1d_block2_')
r1 = residual1d_block(inputs=d2, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block1_')
r2 = residual1d_block(inputs=r1, filters=512, \
kernel_size=3, strides=1, \
name_prefix='residual1d_block2_')
u1 = upsample1d_block(inputs=r2, filters=512, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block1_')
u2 = upsample1d_block(inputs=u1, filters=256, \
kernel_size=5, strides=1, \
shuffle_size=2, name_prefix='upsample1d_block2_')
o1 = conv1d_layer(inputs=u2, filters=1, \
kernel_size=15, strides=1, \
activation=None, name='o1_conv')
o2 = tf.transpose(o1, perm=[0, 2, 1], name='output_transpose')
return o2
| true
| true
|
1c3e4e762da2a3ebd8df6777b090dcb9b9a5eb3e
| 251
|
py
|
Python
|
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-004/pg-4.4-grade-calculator.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-004/pg-4.4-grade-calculator.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
Python/Books/Learning-Programming-with-Python.Tamim-Shahriar-Subeen/chapter-004/pg-4.4-grade-calculator.py
|
shihab4t/Books-Code
|
b637b6b2ad42e11faf87d29047311160fe3b2490
|
[
"Unlicense"
] | null | null | null |
marks = input("Plase Enter your Marks: ")
marks = int(marks)
if marks >= 80:
grade = "A+"
elif marks >= 70:
grade = "A"
elif marks >= 60:
grade = "A-"
elif marks >= 50:
grade = "B"
else:
grade = "F"
print("Your Grade is", grade)
| 15.6875
| 41
| 0.553785
|
marks = input("Plase Enter your Marks: ")
marks = int(marks)
if marks >= 80:
grade = "A+"
elif marks >= 70:
grade = "A"
elif marks >= 60:
grade = "A-"
elif marks >= 50:
grade = "B"
else:
grade = "F"
print("Your Grade is", grade)
| true
| true
|
1c3e4f66c4687b21cb3f34b0350b3438ab41ebc9
| 1,431
|
py
|
Python
|
nsd1904/py02/day04/pymysql_crud.py
|
MrWangwf/nsd2019
|
5e859b4b1926dc098d236be3720779c50d0a55fc
|
[
"Apache-2.0"
] | 1
|
2019-09-19T04:53:22.000Z
|
2019-09-19T04:53:22.000Z
|
nsd1904/py02/day04/pymysql_crud.py
|
MrWangwf/nsd2019
|
5e859b4b1926dc098d236be3720779c50d0a55fc
|
[
"Apache-2.0"
] | null | null | null |
nsd1904/py02/day04/pymysql_crud.py
|
MrWangwf/nsd2019
|
5e859b4b1926dc098d236be3720779c50d0a55fc
|
[
"Apache-2.0"
] | 1
|
2021-12-28T04:26:02.000Z
|
2021-12-28T04:26:02.000Z
|
import pymysql
# 创建到数据的连接
conn = pymysql.connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='tedu.cn',
db='nsd1904',
charset='utf8'
)
cur = conn.cursor() # 创建游标,相当于文件对象
###################################
# 添加部门
# insert_dep = 'INSERT INTO departments(dep_id, dep_name) VALUES(%s, %s)'
# hr = [(1, '人事部')]
# deps = [(2, '财务部'), (3, '运维部'), (4, '开发部'), (5, '测试部'), (6, '市场部')]
# cur.executemany(insert_dep, hr)
# cur.executemany(insert_dep, deps)
###################################
# 查询
# select1 = 'SELECT * FROM departments ORDER BY dep_id'
# cur.execute(select1)
# result1 = cur.fetchone() # 取出一项
# result2 = cur.fetchmany(2) # 取出2项
# result3 = cur.fetchall() # 取出全部
# print(result1)
# print('*' * 30)
# print(result2)
# print('*' * 30)
# print(result3)
###################################
# 移动游标
# select1 = 'SELECT * FROM departments ORDER BY dep_id'
# cur.execute(select1)
# cur.scroll(2) # 默认以相对方式移动
# result1 = cur.fetchone()
# print(result1)
# print('*' * 30)
# cur.scroll(0, mode='absolute')
# result2 = cur.fetchone()
# print(result2)
###################################
# 修改
# update1 = 'UPDATE departments SET dep_name=%s WHERE dep_name=%s'
# cur.execute(update1, ('人力资源部', '人事部'))
###################################
# 删除
delete1 = 'DELETE FROM departments WHERE dep_id=%s'
cur.execute(delete1, (6,))
###################################
conn.commit()
cur.close()
conn.close()
| 25.105263
| 73
| 0.540881
|
import pymysql
conn = pymysql.connect(
host='127.0.0.1',
port=3306,
user='root',
passwd='tedu.cn',
db='nsd1904',
charset='utf8'
)
cur = conn.cursor()
| true
| true
|
1c3e506c7cd9bffe9f51f40d44ca105d09573419
| 663
|
py
|
Python
|
daeungram/notifications/migrations/0002_auto_20190513_2141.py
|
daeunii94/daeungram
|
7adea6bce03e2ff45cb8a6587c0a7612a0b855aa
|
[
"MIT"
] | null | null | null |
daeungram/notifications/migrations/0002_auto_20190513_2141.py
|
daeunii94/daeungram
|
7adea6bce03e2ff45cb8a6587c0a7612a0b855aa
|
[
"MIT"
] | 6
|
2020-09-04T21:25:37.000Z
|
2022-02-26T10:47:20.000Z
|
daeungram/notifications/migrations/0002_auto_20190513_2141.py
|
daeunii94/daeungram
|
7adea6bce03e2ff45cb8a6587c0a7612a0b855aa
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.0.13 on 2019-05-13 12:41
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notification',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| 25.5
| 93
| 0.615385
|
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('notifications', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='notification',
name='created_at',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='notification',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
]
| true
| true
|
1c3e50c250c7df75e0b0d02658e0878ba7b1ece9
| 506
|
py
|
Python
|
test/test.py
|
ShivanshMishra/beginners-tutorial
|
219b58bc6460d481b76cc8e92775a720a72f2d55
|
[
"MIT"
] | 4
|
2018-12-24T16:35:07.000Z
|
2021-08-29T08:59:58.000Z
|
test/test.py
|
ShivanshMishra/beginners-tutorial
|
219b58bc6460d481b76cc8e92775a720a72f2d55
|
[
"MIT"
] | 1
|
2019-01-15T18:04:07.000Z
|
2019-01-15T18:04:07.000Z
|
test/test.py
|
ShivanshMishra/beginners-tutorial
|
219b58bc6460d481b76cc8e92775a720a72f2d55
|
[
"MIT"
] | 19
|
2018-10-10T10:41:40.000Z
|
2022-02-22T19:39:15.000Z
|
from src.search import main
def test():
if len(main("", "PineaPple")) > 0 and main("", "PineaPple")[
0][2] == 'slice of pineapple upside-down cake.':
print("1. Case insensitive query working :)")
else:
print("1. Case insensitive query not working")
if main("", "miles at") == [
['bulolli2.txt', 1, ' "Land Ho! Four MILES AT starboard! Land-Ho!"']]:
print("2. Zero index bug fixed :D")
else:
print("2. Zero index bug present")
| 29.764706
| 86
| 0.55336
|
from src.search import main
def test():
if len(main("", "PineaPple")) > 0 and main("", "PineaPple")[
0][2] == 'slice of pineapple upside-down cake.':
print("1. Case insensitive query working :)")
else:
print("1. Case insensitive query not working")
if main("", "miles at") == [
['bulolli2.txt', 1, ' "Land Ho! Four MILES AT starboard! Land-Ho!"']]:
print("2. Zero index bug fixed :D")
else:
print("2. Zero index bug present")
| true
| true
|
1c3e5273f4da456a4adae683289a7a58bb42048b
| 857
|
py
|
Python
|
mdf.py
|
ferdielik/mdf
|
cf8cb4bb1ef55158f5e431ca8a8027a99a6c7f0e
|
[
"MIT"
] | null | null | null |
mdf.py
|
ferdielik/mdf
|
cf8cb4bb1ef55158f5e431ca8a8027a99a6c7f0e
|
[
"MIT"
] | null | null | null |
mdf.py
|
ferdielik/mdf
|
cf8cb4bb1ef55158f5e431ca8a8027a99a6c7f0e
|
[
"MIT"
] | null | null | null |
# mdf: mit document fetcher
import os, argparse, re, wget
import urllib.request as urllib2
from urllib.parse import urlparse
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser(description='Fetch MIT lecture notes.')
parser.add_argument('--url', type=str, help='an url for fetching documents', required=True)
parser.add_argument('--output', type=str, help='download path of documents', default=os.getcwd())
args = parser.parse_args()
base_url = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(args.url))
soup = BeautifulSoup(urllib2.urlopen(args.url).read(), features="html.parser")
def is_pdf_link(href):
return href and re.compile("pdf").search(href)
pdf_files = soup.find_all(href=is_pdf_link)
for pdf_url in pdf_files:
download_url = "%s%s" % (base_url, pdf_url.get('href'))
wget.download(download_url, out=args.output)
| 34.28
| 97
| 0.746791
|
import os, argparse, re, wget
import urllib.request as urllib2
from urllib.parse import urlparse
from bs4 import BeautifulSoup
parser = argparse.ArgumentParser(description='Fetch MIT lecture notes.')
parser.add_argument('--url', type=str, help='an url for fetching documents', required=True)
parser.add_argument('--output', type=str, help='download path of documents', default=os.getcwd())
args = parser.parse_args()
base_url = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(args.url))
soup = BeautifulSoup(urllib2.urlopen(args.url).read(), features="html.parser")
def is_pdf_link(href):
return href and re.compile("pdf").search(href)
pdf_files = soup.find_all(href=is_pdf_link)
for pdf_url in pdf_files:
download_url = "%s%s" % (base_url, pdf_url.get('href'))
wget.download(download_url, out=args.output)
| true
| true
|
1c3e52fc15eba82355db186e7615fceca4b2570d
| 387
|
py
|
Python
|
Chapter13/educa/educa/asgi.py
|
sabin-web/Django-3-by-Example
|
a0239c954d66fee190014fbd3fa975ddb6eeba17
|
[
"MIT"
] | 628
|
2019-11-13T14:13:40.000Z
|
2022-03-30T19:02:05.000Z
|
Chapter13/educa/educa/asgi.py
|
HAKN1999/Django-3-by-Example
|
a0239c954d66fee190014fbd3fa975ddb6eeba17
|
[
"MIT"
] | 96
|
2020-04-17T17:35:33.000Z
|
2022-02-17T09:25:06.000Z
|
Chapter13/educa/educa/asgi.py
|
HAKN1999/Django-3-by-Example
|
a0239c954d66fee190014fbd3fa975ddb6eeba17
|
[
"MIT"
] | 782
|
2019-10-15T07:29:27.000Z
|
2022-03-30T17:25:08.000Z
|
"""
ASGI config for educa project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'educa.settings')
application = get_asgi_application()
| 22.764706
| 78
| 0.782946
|
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'educa.settings')
application = get_asgi_application()
| true
| true
|
1c3e5369d9f22afc39fa65ce87df45fec511d081
| 904
|
py
|
Python
|
Lib/objc/_DataDetectorsCore.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 701
|
2018-10-22T11:54:09.000Z
|
2022-03-31T14:39:30.000Z
|
Lib/objc/_DataDetectorsCore.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 229
|
2018-10-24T09:15:31.000Z
|
2021-12-24T16:51:37.000Z
|
Lib/objc/_DataDetectorsCore.py
|
snazari/Pyto
|
bcea7bbef35cab21ce73087b1a0c00a07d07ec72
|
[
"MIT"
] | 131
|
2018-11-25T18:33:03.000Z
|
2022-03-24T03:18:07.000Z
|
"""
Classes from the 'DataDetectorsCore' framework.
"""
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
DDScannerResult = _Class("DDScannerResult")
DDMessageCache = _Class("DDMessageCache")
DDMessageCacheElement = _Class("DDMessageCacheElement")
DataDetectorsSourceAccess = _Class("DataDetectorsSourceAccess")
DDURLMatch = _Class("DDURLMatch")
DDURLifier = _Class("DDURLifier")
DDScannerService = _Class("DDScannerService")
DDScanServer = _Class("DDScanServer")
DDScanServerDispatcher = _Class("DDScanServerDispatcher")
DDScannerList = _Class("DDScannerList")
DDScanStepBlockContainer = _Class("DDScanStepBlockContainer")
DDScannerObject = _Class("DDScannerObject")
DDScannerServiceConfiguration = _Class("DDScannerServiceConfiguration")
| 27.393939
| 71
| 0.779867
|
try:
from rubicon.objc import ObjCClass
except ValueError:
def ObjCClass(name):
return None
def _Class(name):
try:
return ObjCClass(name)
except NameError:
return None
DDScannerResult = _Class("DDScannerResult")
DDMessageCache = _Class("DDMessageCache")
DDMessageCacheElement = _Class("DDMessageCacheElement")
DataDetectorsSourceAccess = _Class("DataDetectorsSourceAccess")
DDURLMatch = _Class("DDURLMatch")
DDURLifier = _Class("DDURLifier")
DDScannerService = _Class("DDScannerService")
DDScanServer = _Class("DDScanServer")
DDScanServerDispatcher = _Class("DDScanServerDispatcher")
DDScannerList = _Class("DDScannerList")
DDScanStepBlockContainer = _Class("DDScanStepBlockContainer")
DDScannerObject = _Class("DDScannerObject")
DDScannerServiceConfiguration = _Class("DDScannerServiceConfiguration")
| true
| true
|
1c3e54391ff27af20fc3bcfa1e1f3e00e98acf7f
| 2,558
|
py
|
Python
|
tests/ext/django/test_db.py
|
musicinmybrain/aws-xray-sdk-python
|
b8e59423f1891351ceb1a0bd585603e0cd46c74c
|
[
"Apache-2.0"
] | 294
|
2017-10-10T19:01:04.000Z
|
2022-03-18T15:52:19.000Z
|
tests/ext/django/test_db.py
|
musicinmybrain/aws-xray-sdk-python
|
b8e59423f1891351ceb1a0bd585603e0cd46c74c
|
[
"Apache-2.0"
] | 285
|
2017-10-20T09:27:21.000Z
|
2022-03-29T15:33:45.000Z
|
tests/ext/django/test_db.py
|
musicinmybrain/aws-xray-sdk-python
|
b8e59423f1891351ceb1a0bd585603e0cd46c74c
|
[
"Apache-2.0"
] | 134
|
2017-10-11T13:55:17.000Z
|
2022-03-23T07:21:17.000Z
|
import django
import pytest
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core.context import Context
from aws_xray_sdk.ext.django.db import patch_db
@pytest.fixture(scope='module', autouse=True)
def setup():
django.setup()
xray_recorder.configure(context=Context(),
context_missing='LOG_ERROR')
patch_db()
@pytest.fixture(scope='module')
def user_class(setup):
from django.db import models
from django_fake_model import models as f
class User(f.FakeModel):
name = models.CharField(max_length=255)
password = models.CharField(max_length=255)
return User
@pytest.fixture(
autouse=True,
params=[
False,
True,
]
)
@pytest.mark.django_db
def func_setup(request, user_class):
xray_recorder.stream_sql = request.param
xray_recorder.clear_trace_entities()
xray_recorder.begin_segment('name')
try:
user_class.create_table()
yield
finally:
xray_recorder.clear_trace_entities()
try:
user_class.delete_table()
finally:
xray_recorder.end_segment()
def _assert_query(sql_meta):
if xray_recorder.stream_sql:
assert 'sanitized_query' in sql_meta
assert sql_meta['sanitized_query']
assert sql_meta['sanitized_query'].startswith('SELECT')
else:
if 'sanitized_query' in sql_meta:
assert sql_meta['sanitized_query']
# Django internally executes queries for table checks, ignore those
assert not sql_meta['sanitized_query'].startswith('SELECT')
def test_all(user_class):
""" Test calling all() on get all records.
Verify we run the query and return the SQL as metadata"""
# Materialising the query executes the SQL
list(user_class.objects.all())
subsegment = xray_recorder.current_segment().subsegments[-1]
sql = subsegment.sql
assert sql['database_type'] == 'sqlite'
_assert_query(sql)
def test_filter(user_class):
""" Test calling filter() to get filtered records.
Verify we run the query and return the SQL as metadata"""
# Materialising the query executes the SQL
list(user_class.objects.filter(password='mypassword!').all())
subsegment = xray_recorder.current_segment().subsegments[-1]
sql = subsegment.sql
assert sql['database_type'] == 'sqlite'
_assert_query(sql)
if xray_recorder.stream_sql:
assert 'mypassword!' not in sql['sanitized_query']
assert '"password" = %s' in sql['sanitized_query']
| 29.068182
| 79
| 0.686083
|
import django
import pytest
from aws_xray_sdk.core import xray_recorder
from aws_xray_sdk.core.context import Context
from aws_xray_sdk.ext.django.db import patch_db
@pytest.fixture(scope='module', autouse=True)
def setup():
django.setup()
xray_recorder.configure(context=Context(),
context_missing='LOG_ERROR')
patch_db()
@pytest.fixture(scope='module')
def user_class(setup):
from django.db import models
from django_fake_model import models as f
class User(f.FakeModel):
name = models.CharField(max_length=255)
password = models.CharField(max_length=255)
return User
@pytest.fixture(
autouse=True,
params=[
False,
True,
]
)
@pytest.mark.django_db
def func_setup(request, user_class):
xray_recorder.stream_sql = request.param
xray_recorder.clear_trace_entities()
xray_recorder.begin_segment('name')
try:
user_class.create_table()
yield
finally:
xray_recorder.clear_trace_entities()
try:
user_class.delete_table()
finally:
xray_recorder.end_segment()
def _assert_query(sql_meta):
if xray_recorder.stream_sql:
assert 'sanitized_query' in sql_meta
assert sql_meta['sanitized_query']
assert sql_meta['sanitized_query'].startswith('SELECT')
else:
if 'sanitized_query' in sql_meta:
assert sql_meta['sanitized_query']
assert not sql_meta['sanitized_query'].startswith('SELECT')
def test_all(user_class):
list(user_class.objects.all())
subsegment = xray_recorder.current_segment().subsegments[-1]
sql = subsegment.sql
assert sql['database_type'] == 'sqlite'
_assert_query(sql)
def test_filter(user_class):
list(user_class.objects.filter(password='mypassword!').all())
subsegment = xray_recorder.current_segment().subsegments[-1]
sql = subsegment.sql
assert sql['database_type'] == 'sqlite'
_assert_query(sql)
if xray_recorder.stream_sql:
assert 'mypassword!' not in sql['sanitized_query']
assert '"password" = %s' in sql['sanitized_query']
| true
| true
|
1c3e546477e59e8ded61d921ed350c2e11799802
| 228
|
py
|
Python
|
accounts/admin.py
|
zizoneleesu/do_it_django_a_to_z
|
0b2e70bd9aa684016d080b89f15649b05643b865
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
zizoneleesu/do_it_django_a_to_z
|
0b2e70bd9aa684016d080b89f15649b05643b865
|
[
"MIT"
] | null | null | null |
accounts/admin.py
|
zizoneleesu/do_it_django_a_to_z
|
0b2e70bd9aa684016d080b89f15649b05643b865
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import User
# Register your models here.
class UserAdmin(admin.ModelAdmin):
list_display = ('username',
'password')
admin.site.register(User, UserAdmin)
| 17.538462
| 36
| 0.688596
|
from django.contrib import admin
from .models import User
class UserAdmin(admin.ModelAdmin):
list_display = ('username',
'password')
admin.site.register(User, UserAdmin)
| true
| true
|
1c3e5537f56c529cc6e68fecdfa6ff529fa172fb
| 2,238
|
py
|
Python
|
python/runtime/explainer.py
|
Smirenost/sqlflow
|
fe9da6995fe2625c9ebeb4ee108ada6bf1329ac2
|
[
"Apache-2.0"
] | 2
|
2020-08-09T14:30:15.000Z
|
2020-09-20T16:33:30.000Z
|
python/runtime/explainer.py
|
vmnet04/sqlflow
|
244366196e71834ea2a3a67b90406f7e99e4bcf0
|
[
"Apache-2.0"
] | 9
|
2020-08-09T11:12:05.000Z
|
2020-10-14T00:19:57.000Z
|
python/runtime/explainer.py
|
vmnet04/sqlflow
|
244366196e71834ea2a3a67b90406f7e99e4bcf0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 The SQLFlow Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import matplotlib
# The default backend
import matplotlib.pyplot as plt
from runtime.oss import copyfileobj
# TODO(shendiaomo): extract common code from tensorflow/explain.py
# and xgboost/explain.py
# TODO(shendiaomo): add a unit test for this file later
def plot_and_save(plotfunc,
oss_dest=None,
oss_ak=None,
oss_sk=None,
oss_endpoint=None,
oss_bucket_name=None,
filename='summary'):
'''
plot_and_save plots and saves matplotlib figures using different backends
Args:
plotfunc: A callable that plot the figures
oss_dest: The oss path to save the figures
oss_ak: The access key of the oss service
oss_sk: The security key of the oss service
oss_endpoint: The endpoint of the oss service
oss_bucket_name: The bucket name of the oss service
filename: The prefix of the figure files to be saved
Return:
None
'''
plotfunc()
plt.savefig(filename, bbox_inches='tight')
if oss_dest:
copyfileobj(filename + '.png', oss_dest, oss_ak, oss_sk, oss_endpoint,
oss_bucket_name)
else:
# NOTE(weiguoz), I failed test on the PAI platform here.
# If we plan to support plotille_text_backend on PAI, please test it.
# The plotille text backend
matplotlib.use('module://plotille_text_backend')
import matplotlib.pyplot as plt_text_backend
sys.stdout.isatty = lambda: True
plotfunc()
plt_text_backend.savefig(filename, bbox_inches='tight')
| 36.096774
| 78
| 0.682306
|
import sys
import matplotlib
import matplotlib.pyplot as plt
from runtime.oss import copyfileobj
def plot_and_save(plotfunc,
oss_dest=None,
oss_ak=None,
oss_sk=None,
oss_endpoint=None,
oss_bucket_name=None,
filename='summary'):
plotfunc()
plt.savefig(filename, bbox_inches='tight')
if oss_dest:
copyfileobj(filename + '.png', oss_dest, oss_ak, oss_sk, oss_endpoint,
oss_bucket_name)
else:
matplotlib.use('module://plotille_text_backend')
import matplotlib.pyplot as plt_text_backend
sys.stdout.isatty = lambda: True
plotfunc()
plt_text_backend.savefig(filename, bbox_inches='tight')
| true
| true
|
1c3e55d605c6247301920b6c2d2be5324e789cb5
| 5,160
|
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_azure_firewall_fqdn_tags_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 2,728
|
2015-01-09T10:19:32.000Z
|
2022-03-31T14:50:33.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_azure_firewall_fqdn_tags_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 17,773
|
2015-01-05T15:57:17.000Z
|
2022-03-31T23:50:25.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_02_01/operations/_azure_firewall_fqdn_tags_operations.py
|
rsdoherty/azure-sdk-for-python
|
6bba5326677468e6660845a703686327178bb7b1
|
[
"MIT"
] | 1,916
|
2015-01-19T05:05:41.000Z
|
2022-03-31T19:36:44.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallFqdnTagsOperations(object):
"""AzureFirewallFqdnTagsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.AzureFirewallFqdnTagListResult"]
"""Gets all the Azure Firewall FQDN Tags in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AzureFirewallFqdnTagListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_02_01.models.AzureFirewallFqdnTagListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AzureFirewallFqdnTagListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallFqdnTagListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewallFqdnTags'} # type: ignore
| 45.263158
| 133
| 0.661628
|
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
if TYPE_CHECKING:
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class AzureFirewallFqdnTagsOperations(object):
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list_all(
self,
**kwargs
):
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-02-01"
accept = "application/json"
def prepare_request(next_link=None):
header_parameters = {}
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
url = self.list_all.metadata['url']
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {}
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('AzureFirewallFqdnTagListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/azureFirewallFqdnTags'}
| true
| true
|
1c3e55d9cf8f0c9f568d667c30c0e10e59977b55
| 9,951
|
py
|
Python
|
meeko/preparation.py
|
forlilab/Meeko
|
39518e4215eeb20a6498751d890dbfb09dc5f37a
|
[
"Apache-2.0"
] | 19
|
2021-11-02T17:56:04.000Z
|
2022-03-30T18:05:20.000Z
|
meeko/preparation.py
|
forlilab/Meeko
|
39518e4215eeb20a6498751d890dbfb09dc5f37a
|
[
"Apache-2.0"
] | 6
|
2021-12-25T04:42:09.000Z
|
2022-03-14T17:49:06.000Z
|
meeko/preparation.py
|
forlilab/Meeko
|
39518e4215eeb20a6498751d890dbfb09dc5f37a
|
[
"Apache-2.0"
] | 5
|
2021-12-08T12:30:40.000Z
|
2022-01-28T06:30:03.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Meeko preparation
#
import os
import sys
from collections import OrderedDict
import warnings
from rdkit import Chem
from .molsetup import OBMoleculeSetup
from .molsetup import RDKitMoleculeSetup
from .atomtyper import AtomTyper
from .bondtyper import BondTyperLegacy
from .hydrate import HydrateMoleculeLegacy
from .macrocycle import FlexMacrocycle
from .flexibility import FlexibilityBuilder
from .writer import PDBQTWriterLegacy
try:
from openbabel import openbabel as ob
except ImportError:
_has_openbabel = False
else:
_has_openbabel = True
class MoleculePreparation:
def __init__(self, keep_nonpolar_hydrogens=False,
hydrate=False, flexible_amides=False,
rigid_macrocycles=False, min_ring_size=7, max_ring_size=33,
keep_chorded_rings=False, keep_equivalent_rings=False,
rigidify_bonds_smarts=[], rigidify_bonds_indices=[],
double_bond_penalty=50, atom_type_smarts={},
add_index_map=False,
stop_at_defaults=False, remove_smiles=False):
self.keep_nonpolar_hydrogens = keep_nonpolar_hydrogens
self.hydrate = hydrate
self.flexible_amides = flexible_amides
self.rigid_macrocycles = rigid_macrocycles
self.min_ring_size = min_ring_size
self.max_ring_size = max_ring_size
self.keep_chorded_rings = keep_chorded_rings
self.keep_equivalent_rings = keep_equivalent_rings
self.rigidify_bonds_smarts = rigidify_bonds_smarts
self.rigidify_bonds_indices = rigidify_bonds_indices
self.double_bond_penalty = double_bond_penalty
self.atom_type_smarts = atom_type_smarts
self.add_index_map = add_index_map
self.remove_smiles = remove_smiles
if stop_at_defaults: return # create an object to show just the defaults (e.g. to argparse)
self.setup = None
self._atom_typer = AtomTyper(self.atom_type_smarts)
self._bond_typer = BondTyperLegacy()
self._macrocycle_typer = FlexMacrocycle(
self.min_ring_size, self.max_ring_size, self.double_bond_penalty)
self._flex_builder = FlexibilityBuilder()
self._water_builder = HydrateMoleculeLegacy()
self._writer = PDBQTWriterLegacy()
self.is_ok = None
self.log = None
self._classes_setup = {Chem.rdchem.Mol: RDKitMoleculeSetup}
if _has_openbabel:
self._classes_setup[ob.OBMol] = OBMoleculeSetup
if keep_chorded_rings and keep_equivalent_rings==False:
warnings.warn("keep_equivalent_rings=False ignored because keep_chorded_rings=True", RuntimeWarning)
@classmethod
def init_just_defaults(cls):
return cls(stop_at_defaults=True)
@ classmethod
def from_config(cls, config):
expected_keys = cls.init_just_defaults().__dict__.keys()
bad_keys = [k for k in config if k not in expected_keys]
for key in bad_keys:
print("ERROR: unexpected key \"%s\" in MoleculePreparation.from_config()" % key, file=sys.stderr)
if len(bad_keys) > 0:
raise ValueError
p = cls(**config)
return p
def prepare(self, mol, root_atom_index=None, not_terminal_atoms=[]):
""" if protein_sidechain, C H N O will be removed,
root will be CA, and BEGIN/END_RES will be added.
"""
mol_type = type(mol)
if not mol_type in self._classes_setup:
raise TypeError("Molecule is not an instance of supported types: %s" % type(mol))
setup_class = self._classes_setup[mol_type]
setup = setup_class(mol,
keep_chorded_rings=self.keep_chorded_rings,
keep_equivalent_rings=self.keep_equivalent_rings)
self.setup = setup
# 1. assign atom types (including HB types, vectors and stuff)
# DISABLED TODO self.atom_typer.set_parm(mol)
self._atom_typer(setup)
# 2a. add pi-model + merge_h_pi (THIS CHANGE SOME ATOM TYPES)
# disabled
# 2b. merge_h_classic
if not self.keep_nonpolar_hydrogens:
setup.merge_hydrogen()
# 3. assign bond types by using SMARTS...
# - bonds should be typed even in rings (but set as non-rotatable)
# - if macrocycle is selected, they will be enabled (so they must be typed already!)
self._bond_typer(setup, self.flexible_amides, self.rigidify_bonds_smarts, self.rigidify_bonds_indices, not_terminal_atoms)
# 4 . hydrate molecule
if self.hydrate:
self._water_builder.hydrate(setup)
# 5. break macrocycles into open/linear form
if self.rigid_macrocycles:
break_combo_data = None
bonds_in_rigid_rings = None # not true, but this is only needed when breaking macrocycles
else:
break_combo_data, bonds_in_rigid_rings = self._macrocycle_typer.search_macrocycle(setup)
# 6. build flexibility...
# 6.1 if macrocycles typed:
# - walk the setup graph by skipping proposed closures
# and score resulting flex_trees basing on the lenght
# of the branches generated
# - actually break the best closure bond (THIS CHANGES SOME ATOM TYPES)
# 6.2 - walk the graph and build the flextree
# 7. but disable all bonds that are in rings and not
# in flexible macrocycles
# TODO restore legacy AD types for PDBQT
#self._atom_typer.set_param_legacy(mol)
new_setup = self._flex_builder(setup,
root_atom_index=root_atom_index,
break_combo_data=break_combo_data,
bonds_in_rigid_rings=bonds_in_rigid_rings)
self.setup = new_setup
# TODO re-run typing after breaking bonds
# self.bond_typer.set_types_legacy(mol, exclude=[macrocycle_bonds])
self.is_ok = self._check()
def _check(self):
# verify that all atoms have been typed
is_ok = True
msg = ""
for idx in self.setup.atom_type:
atom_type = self.setup.atom_type[idx]
if atom_type is None:
msg += 'atom number %d has None type, mol name: %s' % (idx, self.setup.get_mol_name())
is_ok = False
self.log = msg
return is_ok
def show_setup(self):
if self.setup is not None:
tot_charge = 0
print("Molecule setup\n")
print("==============[ ATOMS ]===================================================")
print("idx | coords | charge |ign| atype | connections")
print("-----+----------------------------+--------+---+----------+--------------- . . . ")
for k, v in list(self.setup.coord.items()):
print("% 4d | % 8.3f % 8.3f % 8.3f | % 1.3f | %d" % (k, v[0], v[1], v[2],
self.setup.charge[k], self.setup.atom_ignore[k]),
"| % -8s |" % self.setup.atom_type[k],
self.setup.graph[k])
tot_charge += self.setup.charge[k]
print("-----+----------------------------+--------+---+----------+--------------- . . . ")
print(" TOT CHARGE: %3.3f" % tot_charge)
print("\n======[ DIRECTIONAL VECTORS ]==========")
for k, v in list(self.setup.coord.items()):
if k in self.setup.interaction_vector:
print("% 4d " % k, self.setup.atom_type[k], end=' ')
print("\n==============[ BONDS ]================")
# For sanity users, we won't show those keys for now
keys_to_not_show = ['bond_order', 'type']
for k, v in list(self.setup.bond.items()):
t = ', '.join('%s: %s' % (i, j) for i, j in v.items() if not i in keys_to_not_show)
print("% 8s - " % str(k), t)
self._macrocycle_typer.show_macrocycle_scores(self.setup)
print('')
def write_pdbqt_string(self, add_index_map=None, remove_smiles=None):
if self.is_ok == False:
raise RuntimeError("Molecule not OK, refusing to write PDBQT\n\nLOG:\n%s" % self.log)
if add_index_map is None: add_index_map = self.add_index_map
if remove_smiles is None: remove_smiles = self.remove_smiles
if self.setup is not None:
return self._writer.write_string(self.setup, add_index_map, remove_smiles)
else:
raise RuntimeError('Cannot generate PDBQT file, the molecule is not prepared.')
def write_pdbqt_file(self, pdbqt_filename, add_index_map=None, remove_smiles=None):
with open(pdbqt_filename,'w') as w:
w.write(self.write_pdbqt_string(add_index_map, remove_smiles))
def adapt_pdbqt_for_autodock4_flexres(self, pdbqt_string, res, chain, num):
""" adapt pdbqt_string to be compatible with AutoDock4 requirements:
- first and second atoms named CA and CB
- write BEGIN_RES / END_RES
- remove TORSDOF
this is for covalent docking (tethered)
"""
new_string = "BEGIN_RES %s %s %s\n" % (res, chain, num)
atom_number = 0
for line in pdbqt_string.split("\n"):
if line == "":
continue
if line.startswith("TORSDOF"):
continue
if line.startswith("ATOM"):
atom_number+=1
if atom_number == 1:
line = line[:13] + 'CA' + line[15:]
elif atom_number == 2:
line = line[:13] + 'CB' + line[15:]
new_string += line + '\n'
continue
new_string += line + '\n'
new_string += "END_RES %s %s %s\n" % (res, chain, num)
return new_string
| 42.892241
| 130
| 0.60195
|
import os
import sys
from collections import OrderedDict
import warnings
from rdkit import Chem
from .molsetup import OBMoleculeSetup
from .molsetup import RDKitMoleculeSetup
from .atomtyper import AtomTyper
from .bondtyper import BondTyperLegacy
from .hydrate import HydrateMoleculeLegacy
from .macrocycle import FlexMacrocycle
from .flexibility import FlexibilityBuilder
from .writer import PDBQTWriterLegacy
try:
from openbabel import openbabel as ob
except ImportError:
_has_openbabel = False
else:
_has_openbabel = True
class MoleculePreparation:
def __init__(self, keep_nonpolar_hydrogens=False,
hydrate=False, flexible_amides=False,
rigid_macrocycles=False, min_ring_size=7, max_ring_size=33,
keep_chorded_rings=False, keep_equivalent_rings=False,
rigidify_bonds_smarts=[], rigidify_bonds_indices=[],
double_bond_penalty=50, atom_type_smarts={},
add_index_map=False,
stop_at_defaults=False, remove_smiles=False):
self.keep_nonpolar_hydrogens = keep_nonpolar_hydrogens
self.hydrate = hydrate
self.flexible_amides = flexible_amides
self.rigid_macrocycles = rigid_macrocycles
self.min_ring_size = min_ring_size
self.max_ring_size = max_ring_size
self.keep_chorded_rings = keep_chorded_rings
self.keep_equivalent_rings = keep_equivalent_rings
self.rigidify_bonds_smarts = rigidify_bonds_smarts
self.rigidify_bonds_indices = rigidify_bonds_indices
self.double_bond_penalty = double_bond_penalty
self.atom_type_smarts = atom_type_smarts
self.add_index_map = add_index_map
self.remove_smiles = remove_smiles
if stop_at_defaults: return
self.setup = None
self._atom_typer = AtomTyper(self.atom_type_smarts)
self._bond_typer = BondTyperLegacy()
self._macrocycle_typer = FlexMacrocycle(
self.min_ring_size, self.max_ring_size, self.double_bond_penalty)
self._flex_builder = FlexibilityBuilder()
self._water_builder = HydrateMoleculeLegacy()
self._writer = PDBQTWriterLegacy()
self.is_ok = None
self.log = None
self._classes_setup = {Chem.rdchem.Mol: RDKitMoleculeSetup}
if _has_openbabel:
self._classes_setup[ob.OBMol] = OBMoleculeSetup
if keep_chorded_rings and keep_equivalent_rings==False:
warnings.warn("keep_equivalent_rings=False ignored because keep_chorded_rings=True", RuntimeWarning)
@classmethod
def init_just_defaults(cls):
return cls(stop_at_defaults=True)
@ classmethod
def from_config(cls, config):
expected_keys = cls.init_just_defaults().__dict__.keys()
bad_keys = [k for k in config if k not in expected_keys]
for key in bad_keys:
print("ERROR: unexpected key \"%s\" in MoleculePreparation.from_config()" % key, file=sys.stderr)
if len(bad_keys) > 0:
raise ValueError
p = cls(**config)
return p
def prepare(self, mol, root_atom_index=None, not_terminal_atoms=[]):
mol_type = type(mol)
if not mol_type in self._classes_setup:
raise TypeError("Molecule is not an instance of supported types: %s" % type(mol))
setup_class = self._classes_setup[mol_type]
setup = setup_class(mol,
keep_chorded_rings=self.keep_chorded_rings,
keep_equivalent_rings=self.keep_equivalent_rings)
self.setup = setup
self._atom_typer(setup)
if not self.keep_nonpolar_hydrogens:
setup.merge_hydrogen()
self._bond_typer(setup, self.flexible_amides, self.rigidify_bonds_smarts, self.rigidify_bonds_indices, not_terminal_atoms)
if self.hydrate:
self._water_builder.hydrate(setup)
if self.rigid_macrocycles:
break_combo_data = None
bonds_in_rigid_rings = None
else:
break_combo_data, bonds_in_rigid_rings = self._macrocycle_typer.search_macrocycle(setup)
new_setup = self._flex_builder(setup,
root_atom_index=root_atom_index,
break_combo_data=break_combo_data,
bonds_in_rigid_rings=bonds_in_rigid_rings)
self.setup = new_setup
self.is_ok = self._check()
def _check(self):
is_ok = True
msg = ""
for idx in self.setup.atom_type:
atom_type = self.setup.atom_type[idx]
if atom_type is None:
msg += 'atom number %d has None type, mol name: %s' % (idx, self.setup.get_mol_name())
is_ok = False
self.log = msg
return is_ok
def show_setup(self):
if self.setup is not None:
tot_charge = 0
print("Molecule setup\n")
print("==============[ ATOMS ]===================================================")
print("idx | coords | charge |ign| atype | connections")
print("-----+----------------------------+--------+---+----------+--------------- . . . ")
for k, v in list(self.setup.coord.items()):
print("% 4d | % 8.3f % 8.3f % 8.3f | % 1.3f | %d" % (k, v[0], v[1], v[2],
self.setup.charge[k], self.setup.atom_ignore[k]),
"| % -8s |" % self.setup.atom_type[k],
self.setup.graph[k])
tot_charge += self.setup.charge[k]
print("-----+----------------------------+--------+---+----------+--------------- . . . ")
print(" TOT CHARGE: %3.3f" % tot_charge)
print("\n======[ DIRECTIONAL VECTORS ]==========")
for k, v in list(self.setup.coord.items()):
if k in self.setup.interaction_vector:
print("% 4d " % k, self.setup.atom_type[k], end=' ')
print("\n==============[ BONDS ]================")
keys_to_not_show = ['bond_order', 'type']
for k, v in list(self.setup.bond.items()):
t = ', '.join('%s: %s' % (i, j) for i, j in v.items() if not i in keys_to_not_show)
print("% 8s - " % str(k), t)
self._macrocycle_typer.show_macrocycle_scores(self.setup)
print('')
def write_pdbqt_string(self, add_index_map=None, remove_smiles=None):
if self.is_ok == False:
raise RuntimeError("Molecule not OK, refusing to write PDBQT\n\nLOG:\n%s" % self.log)
if add_index_map is None: add_index_map = self.add_index_map
if remove_smiles is None: remove_smiles = self.remove_smiles
if self.setup is not None:
return self._writer.write_string(self.setup, add_index_map, remove_smiles)
else:
raise RuntimeError('Cannot generate PDBQT file, the molecule is not prepared.')
def write_pdbqt_file(self, pdbqt_filename, add_index_map=None, remove_smiles=None):
with open(pdbqt_filename,'w') as w:
w.write(self.write_pdbqt_string(add_index_map, remove_smiles))
def adapt_pdbqt_for_autodock4_flexres(self, pdbqt_string, res, chain, num):
new_string = "BEGIN_RES %s %s %s\n" % (res, chain, num)
atom_number = 0
for line in pdbqt_string.split("\n"):
if line == "":
continue
if line.startswith("TORSDOF"):
continue
if line.startswith("ATOM"):
atom_number+=1
if atom_number == 1:
line = line[:13] + 'CA' + line[15:]
elif atom_number == 2:
line = line[:13] + 'CB' + line[15:]
new_string += line + '\n'
continue
new_string += line + '\n'
new_string += "END_RES %s %s %s\n" % (res, chain, num)
return new_string
| true
| true
|
1c3e55e808e02330d778b3785ff224fbb576fff4
| 1,017
|
py
|
Python
|
Leetcode/Python/_1403.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | 1
|
2021-11-28T15:03:32.000Z
|
2021-11-28T15:03:32.000Z
|
Leetcode/Python/_1403.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
Leetcode/Python/_1403.py
|
Xrenya/algorithms
|
aded82cacde2f4f2114241907861251e0e2e5638
|
[
"MIT"
] | null | null | null |
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
def sortInplace(array):
for j in range(1, len(array)):
key = array[j]
i = j - 1
while i>-1 and key<array[i]:
array[i+1] = array[i]
i -= 1
array[i+1] = key
return array
nums = sortInplace(nums)
acc = 0
for num in nums:
acc += num
greater = 0
array = []
while greater<acc+1:
num = nums.pop()
array.append(num)
greater += num
acc -= num
return array
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
nums = sorted(nums)
acc = sum(nums)
greater = 0
array = []
while greater<acc+1:
num = nums.pop()
array.append(num)
greater += num
acc -= num
return array
| 26.763158
| 65
| 0.427729
|
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
def sortInplace(array):
for j in range(1, len(array)):
key = array[j]
i = j - 1
while i>-1 and key<array[i]:
array[i+1] = array[i]
i -= 1
array[i+1] = key
return array
nums = sortInplace(nums)
acc = 0
for num in nums:
acc += num
greater = 0
array = []
while greater<acc+1:
num = nums.pop()
array.append(num)
greater += num
acc -= num
return array
class Solution:
def minSubsequence(self, nums: List[int]) -> List[int]:
nums = sorted(nums)
acc = sum(nums)
greater = 0
array = []
while greater<acc+1:
num = nums.pop()
array.append(num)
greater += num
acc -= num
return array
| true
| true
|
1c3e5785efc594605381dfaf666e99e54412cda7
| 558
|
py
|
Python
|
cgn/regop/operators/identity_operator.py
|
FabianKP/cgn
|
9963e60c4a4bf4f3869e43d1dfbe11da74887ba5
|
[
"MIT"
] | 1
|
2022-03-21T00:40:23.000Z
|
2022-03-21T00:40:23.000Z
|
cgn/regop/operators/identity_operator.py
|
FabianKP/cgn
|
9963e60c4a4bf4f3869e43d1dfbe11da74887ba5
|
[
"MIT"
] | null | null | null |
cgn/regop/operators/identity_operator.py
|
FabianKP/cgn
|
9963e60c4a4bf4f3869e43d1dfbe11da74887ba5
|
[
"MIT"
] | null | null | null |
import numpy as np
from ..regularization_operator import RegularizationOperator
class IdentityOperator(RegularizationOperator):
"""
Corresponds to to the identity operator :math:`I(v) = v`.
"""
def __init__(self, dim):
self._mat = np.identity(dim)
def fwd(self, v: np.ndarray) -> np.ndarray:
"""
See :py:attr:`RegularizationOperator.fwd`.
"""
return v
def adj(self, v: np.ndarray) -> np.ndarray:
"""
See :py:attr:`RegularizationOperator.adj`.
"""
return v
| 22.32
| 61
| 0.596774
|
import numpy as np
from ..regularization_operator import RegularizationOperator
class IdentityOperator(RegularizationOperator):
def __init__(self, dim):
self._mat = np.identity(dim)
def fwd(self, v: np.ndarray) -> np.ndarray:
return v
def adj(self, v: np.ndarray) -> np.ndarray:
return v
| true
| true
|
1c3e58d667c0b70d47852665e13afc344c61b3be
| 1,524
|
py
|
Python
|
var/spack/repos/builtin/packages/py-ford/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-ford/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/py-ford/package.py
|
renjithravindrankannath/spack
|
043b2cbb7c99d69a373f3ecbf35bc3b4638bcf85
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack.package import *
class PyFord(PythonPackage):
"""FORD, standing for FORtran Documenter, is an automatic documentation generator
for modern Fortran programs."""
pypi = "FORD/FORD-6.1.11.tar.gz"
maintainers = ['wscullin']
version('6.1.12', sha256='101191e1aa33cfe780ea5b2d66d02c7281b9b314e82bb138d76809a49c08506a')
version('6.1.11', sha256='feb9a88040e717e84c632e4b023904ab36a463fc9a8ff80c8c7f86454e5d8043')
depends_on('py-wheel@0.29:', type='build')
depends_on('py-setuptools@48:', type='build')
depends_on('py-setuptools-scm@4:5+toml', type='build')
depends_on('py-setuptools-scm-git-archive', type='build')
depends_on('py-markdown', type=('build', 'run'))
depends_on('py-markdown-include@0.5.1:', type='run')
depends_on('py-md-environ', type=('build', 'run'), when='@:6.1.8')
depends_on('py-python-markdown-math@0.8:0', type='run')
depends_on('py-toposort', type=('build', 'run'))
depends_on('py-jinja2@2.1:', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
depends_on('py-beautifulsoup4@4.5.1:', type=('build', 'run'))
depends_on('py-graphviz', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-importlib-metadata', when='^python@:3.7', type=('build', 'run'))
| 41.189189
| 96
| 0.681102
|
from spack.package import *
class PyFord(PythonPackage):
pypi = "FORD/FORD-6.1.11.tar.gz"
maintainers = ['wscullin']
version('6.1.12', sha256='101191e1aa33cfe780ea5b2d66d02c7281b9b314e82bb138d76809a49c08506a')
version('6.1.11', sha256='feb9a88040e717e84c632e4b023904ab36a463fc9a8ff80c8c7f86454e5d8043')
depends_on('py-wheel@0.29:', type='build')
depends_on('py-setuptools@48:', type='build')
depends_on('py-setuptools-scm@4:5+toml', type='build')
depends_on('py-setuptools-scm-git-archive', type='build')
depends_on('py-markdown', type=('build', 'run'))
depends_on('py-markdown-include@0.5.1:', type='run')
depends_on('py-md-environ', type=('build', 'run'), when='@:6.1.8')
depends_on('py-python-markdown-math@0.8:0', type='run')
depends_on('py-toposort', type=('build', 'run'))
depends_on('py-jinja2@2.1:', type=('build', 'run'))
depends_on('py-pygments', type=('build', 'run'))
depends_on('py-beautifulsoup4@4.5.1:', type=('build', 'run'))
depends_on('py-graphviz', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-importlib-metadata', when='^python@:3.7', type=('build', 'run'))
| true
| true
|
1c3e59634d8299d2a8cbde37093eaae92000aa89
| 1,686
|
py
|
Python
|
process_data.py
|
aaron-zou/messenger-analytics
|
ef44df54d3b5851236e296d973ba0c62aabefcf9
|
[
"MIT"
] | null | null | null |
process_data.py
|
aaron-zou/messenger-analytics
|
ef44df54d3b5851236e296d973ba0c62aabefcf9
|
[
"MIT"
] | null | null | null |
process_data.py
|
aaron-zou/messenger-analytics
|
ef44df54d3b5851236e296d973ba0c62aabefcf9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
from collections import Counter
import click
import msgpack # TODO: add cache saving and loading
import util
@click.command()
@click.option('--data-path',
required=True,
type=click.Path(writable=False),
help="Path to messages subfolder inside downloaded data folder")
@click.option('-n', default=50, type=int, help="Number of friends to output")
@click.option('--output-path',
default='./data/cache.bin',
type=click.Path(writable=True),
help="Path to where to save object cache")
def main(data_path, n, output_path):
message_threads = []
# Walk the downloaded data directory and construct MessageThread objects
for root, dirs, files in os.walk(data_path):
if dirs or "message.json" not in files:
continue
message_threads.append(util.MessageThread(
os.path.join(root, "message.json")))
click.echo("Total number of Messenger conversations: {}".format(
len(message_threads)))
# Print descending-sorted list of highest-event conversations
click.echo("Displaying top {}".format(n))
counter = Counter()
for message_thread in message_threads:
counter += message_thread.message_counts()
most_common = counter.most_common(n)
max_name_len = max(len(item[0]) for item in most_common)
max_count_len = len(str(most_common[0][1]))
for item in most_common:
name, count = item[0], str(item[1])
print('{} {} messages'.format(name.ljust(
max_name_len), count.ljust(max_count_len)))
if __name__ == '__main__':
main() # pylint: disable=E1120
| 33.72
| 78
| 0.657177
|
import os
from collections import Counter
import click
import msgpack
import util
@click.command()
@click.option('--data-path',
required=True,
type=click.Path(writable=False),
help="Path to messages subfolder inside downloaded data folder")
@click.option('-n', default=50, type=int, help="Number of friends to output")
@click.option('--output-path',
default='./data/cache.bin',
type=click.Path(writable=True),
help="Path to where to save object cache")
def main(data_path, n, output_path):
message_threads = []
for root, dirs, files in os.walk(data_path):
if dirs or "message.json" not in files:
continue
message_threads.append(util.MessageThread(
os.path.join(root, "message.json")))
click.echo("Total number of Messenger conversations: {}".format(
len(message_threads)))
click.echo("Displaying top {}".format(n))
counter = Counter()
for message_thread in message_threads:
counter += message_thread.message_counts()
most_common = counter.most_common(n)
max_name_len = max(len(item[0]) for item in most_common)
max_count_len = len(str(most_common[0][1]))
for item in most_common:
name, count = item[0], str(item[1])
print('{} {} messages'.format(name.ljust(
max_name_len), count.ljust(max_count_len)))
if __name__ == '__main__':
main()
| true
| true
|
1c3e5e5858537d5973709b737be108ceedebf419
| 1,435
|
py
|
Python
|
cogs/war-reporter.py
|
BeyondBoy1/WookieForce
|
9c47bc60d0672e929ce9bcbd47931e235ff81036
|
[
"MIT"
] | 1
|
2021-05-31T11:45:48.000Z
|
2021-05-31T11:45:48.000Z
|
cogs/war-reporter.py
|
BeyondBoy1/WookieForce
|
9c47bc60d0672e929ce9bcbd47931e235ff81036
|
[
"MIT"
] | null | null | null |
cogs/war-reporter.py
|
BeyondBoy1/WookieForce
|
9c47bc60d0672e929ce9bcbd47931e235ff81036
|
[
"MIT"
] | null | null | null |
import coc
import creds
from discord.ext import commands
CLAN_TAG = creds.clan_tag
WAR_REPORT_CHANNEL_ID = creds.war_channel
REPORT_STYLE = """
{att.attacker.name} (No. {att.attacker.map_position}, TH{att.attacker.town_hall}) just {verb} {att.defender.name}
(No. {att.defender.map_position}, TH{att.defender.town_hall}) for {att.stars} stars and {att.destruction}%.
"""
class WarReporter(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.coc.add_events(
self.on_war_attack,
self.on_war_state_change
)
self.bot.coc.add_war_updates(CLAN_TAG)
def cog_unload(self):
self.bot.coc.remove_events(
self.on_war_attack,
self.on_war_state_change
)
self.bot.coc.stop_updates("war")
@property
def report_channel(self):
return self.bot.get_chanel(WAR_REPORT_CHANNEL_ID)
@coc.WarEvents.war_attack()
async def on_war_attack(self, attack, war):
if attack.attacker.is_opponenet:
verb = "defended"
else:
verb = "attacked"
await self.report_channel.send(REPORT_STYLE.format(att=attack, verb=verb))
@coc.WarEvents.state()
async def on_war_state_change(self, current_state, war):
await self.report_channel.send("{0.clan.name} just entered {1} state!".format(war, current_state))
def setup(bot):
bot.add_cog(WarReporter(bot))
| 27.596154
| 114
| 0.666899
|
import coc
import creds
from discord.ext import commands
CLAN_TAG = creds.clan_tag
WAR_REPORT_CHANNEL_ID = creds.war_channel
REPORT_STYLE = """
{att.attacker.name} (No. {att.attacker.map_position}, TH{att.attacker.town_hall}) just {verb} {att.defender.name}
(No. {att.defender.map_position}, TH{att.defender.town_hall}) for {att.stars} stars and {att.destruction}%.
"""
class WarReporter(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.bot.coc.add_events(
self.on_war_attack,
self.on_war_state_change
)
self.bot.coc.add_war_updates(CLAN_TAG)
def cog_unload(self):
self.bot.coc.remove_events(
self.on_war_attack,
self.on_war_state_change
)
self.bot.coc.stop_updates("war")
@property
def report_channel(self):
return self.bot.get_chanel(WAR_REPORT_CHANNEL_ID)
@coc.WarEvents.war_attack()
async def on_war_attack(self, attack, war):
if attack.attacker.is_opponenet:
verb = "defended"
else:
verb = "attacked"
await self.report_channel.send(REPORT_STYLE.format(att=attack, verb=verb))
@coc.WarEvents.state()
async def on_war_state_change(self, current_state, war):
await self.report_channel.send("{0.clan.name} just entered {1} state!".format(war, current_state))
def setup(bot):
bot.add_cog(WarReporter(bot))
| true
| true
|
1c3e5e95c0929efda0ad6e413536850bdd6dd9bb
| 1,340
|
py
|
Python
|
savu/test/travis/plugin_tests/saver_tests/hdf5_saver_test.py
|
jacob720/Savu
|
7afc9e10ea4944ceb39a83574f3142f025cf81e1
|
[
"Apache-2.0"
] | 39
|
2015-03-30T14:03:42.000Z
|
2022-03-16T16:50:33.000Z
|
savu/test/travis/plugin_tests/saver_tests/hdf5_saver_test.py
|
jacob720/Savu
|
7afc9e10ea4944ceb39a83574f3142f025cf81e1
|
[
"Apache-2.0"
] | 670
|
2015-02-11T11:08:09.000Z
|
2022-03-21T09:27:57.000Z
|
savu/test/travis/plugin_tests/saver_tests/hdf5_saver_test.py
|
jacob720/Savu
|
7afc9e10ea4944ceb39a83574f3142f025cf81e1
|
[
"Apache-2.0"
] | 54
|
2015-02-13T14:09:52.000Z
|
2022-01-24T13:57:09.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: hdf5_saver_test
:platform: Unix
:synopsis: unittest for hdf5 saver
.. moduleauthor:: Jessica Vershoyle <jessica.verschoyle@diamond.ac.uk>
"""
import unittest
import savu.test.test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class Hdf5SaverTest(unittest.TestCase):
global data_file, experiment
data_file = '24737.nxs'
experiment = None
def test_hdf5_saver(self):
process_list = 'savers/hdf5_saver_test.nxs'
options = tu.initialise_options(data_file, experiment, process_list)
run_protected_plugin_runner(options)
tu.cleanup(options)
if __name__ == "__main__":
unittest.main()
| 30.454545
| 76
| 0.73806
|
import unittest
import savu.test.test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class Hdf5SaverTest(unittest.TestCase):
global data_file, experiment
data_file = '24737.nxs'
experiment = None
def test_hdf5_saver(self):
process_list = 'savers/hdf5_saver_test.nxs'
options = tu.initialise_options(data_file, experiment, process_list)
run_protected_plugin_runner(options)
tu.cleanup(options)
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c3e5f127a35de9cd737fa25f47565a78e3c6411
| 5,190
|
py
|
Python
|
PaddleCV/image_classification/legacy/models/mobilenet.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 5
|
2021-09-28T13:28:01.000Z
|
2021-12-21T07:25:44.000Z
|
PaddleCV/image_classification/legacy/models/mobilenet.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 1
|
2019-11-18T03:03:37.000Z
|
2019-11-18T03:03:37.000Z
|
PaddleCV/image_classification/legacy/models/mobilenet.py
|
suytingwan/models
|
ccdbfe77d071cc19b55fb9f4b738912e35d982ef
|
[
"Apache-2.0"
] | 4
|
2021-08-11T08:25:10.000Z
|
2021-10-16T07:41:59.000Z
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = ['MobileNet']
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class MobileNet():
def __init__(self):
self.params = train_parameters
def net(self, input, class_dim=1000, scale=1.0):
# conv1: 112x112
input = self.conv_bn_layer(
input,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1)
# 56x56
input = self.depthwise_separable(
input,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale)
# 28x28
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale)
# 14x14
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale)
# 14x14
for i in range(5):
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale)
# 7x7
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale)
input = fluid.layers.pool2d(
input=input,
pool_size=0,
pool_stride=1,
pool_type='avg',
global_pooling=True)
output = fluid.layers.fc(input=input,
size=class_dim,
param_attr=ParamAttr(initializer=MSRA()))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(initializer=MSRA()),
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def depthwise_separable(self, input, num_filters1, num_filters2, num_groups,
stride, scale):
depthwise_conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False)
pointwise_conv = self.conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0)
return pointwise_conv
| 28.833333
| 80
| 0.529672
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import paddle.fluid as fluid
from paddle.fluid.initializer import MSRA
from paddle.fluid.param_attr import ParamAttr
__all__ = ['MobileNet']
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"batch_size": 256,
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class MobileNet():
def __init__(self):
self.params = train_parameters
def net(self, input, class_dim=1000, scale=1.0):
input = self.conv_bn_layer(
input,
filter_size=3,
channels=3,
num_filters=int(32 * scale),
stride=2,
padding=1)
input = self.depthwise_separable(
input,
num_filters1=32,
num_filters2=64,
num_groups=32,
stride=1,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=64,
num_filters2=128,
num_groups=64,
stride=2,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=128,
num_groups=128,
stride=1,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=128,
num_filters2=256,
num_groups=128,
stride=2,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=256,
num_groups=256,
stride=1,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=256,
num_filters2=512,
num_groups=256,
stride=2,
scale=scale)
for i in range(5):
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=512,
num_groups=512,
stride=1,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=512,
num_filters2=1024,
num_groups=512,
stride=2,
scale=scale)
input = self.depthwise_separable(
input,
num_filters1=1024,
num_filters2=1024,
num_groups=1024,
stride=1,
scale=scale)
input = fluid.layers.pool2d(
input=input,
pool_size=0,
pool_stride=1,
pool_type='avg',
global_pooling=True)
output = fluid.layers.fc(input=input,
size=class_dim,
param_attr=ParamAttr(initializer=MSRA()))
return output
def conv_bn_layer(self,
input,
filter_size,
num_filters,
stride,
padding,
channels=None,
num_groups=1,
act='relu',
use_cudnn=True):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=num_groups,
act=None,
use_cudnn=use_cudnn,
param_attr=ParamAttr(initializer=MSRA()),
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def depthwise_separable(self, input, num_filters1, num_filters2, num_groups,
stride, scale):
depthwise_conv = self.conv_bn_layer(
input=input,
filter_size=3,
num_filters=int(num_filters1 * scale),
stride=stride,
padding=1,
num_groups=int(num_groups * scale),
use_cudnn=False)
pointwise_conv = self.conv_bn_layer(
input=depthwise_conv,
filter_size=1,
num_filters=int(num_filters2 * scale),
stride=1,
padding=0)
return pointwise_conv
| true
| true
|
1c3e60118f62a2af01266746e8cb66503874e06b
| 810
|
py
|
Python
|
backend/app/deployments/info.py
|
ComplexData-MILA/HTUI
|
2aa7b2c83a2deb7f6fd79d9604913fc85dc25f91
|
[
"Apache-2.0"
] | null | null | null |
backend/app/deployments/info.py
|
ComplexData-MILA/HTUI
|
2aa7b2c83a2deb7f6fd79d9604913fc85dc25f91
|
[
"Apache-2.0"
] | 20
|
2021-11-22T15:16:53.000Z
|
2022-01-04T16:55:26.000Z
|
backend/app/deployments/info.py
|
ComplexData-MILA/HTUI
|
2aa7b2c83a2deb7f6fd79d9604913fc85dc25f91
|
[
"Apache-2.0"
] | null | null | null |
import ray
from ray import serve
import urllib.parse
from ray.worker import get
from ..app import app
def get_endpoint(deployment_name: str):
return urllib.parse.urlparse(serve.get_deployment(deployment_name).url).path
@serve.deployment(name='info', route_prefix="/", ray_actor_options={"num_cpus": 0.1})
@serve.ingress(app)
class APIInfoDeployment:
@app.get("/")
async def index(self):
return 'Hello!'
@app.get("/graph")
async def graphs(self):
return ['pole']
@app.get("/provider")
async def providers(self):
return {
'Random' : { 'name': 'Random', 'description': '', 'endpoint': get_endpoint('provider.random') },
'PageRank' : { 'name': 'PageRank', 'description': '', 'endpoint': get_endpoint('provider.pagerank') }
}
| 28.928571
| 113
| 0.644444
|
import ray
from ray import serve
import urllib.parse
from ray.worker import get
from ..app import app
def get_endpoint(deployment_name: str):
return urllib.parse.urlparse(serve.get_deployment(deployment_name).url).path
@serve.deployment(name='info', route_prefix="/", ray_actor_options={"num_cpus": 0.1})
@serve.ingress(app)
class APIInfoDeployment:
@app.get("/")
async def index(self):
return 'Hello!'
@app.get("/graph")
async def graphs(self):
return ['pole']
@app.get("/provider")
async def providers(self):
return {
'Random' : { 'name': 'Random', 'description': '', 'endpoint': get_endpoint('provider.random') },
'PageRank' : { 'name': 'PageRank', 'description': '', 'endpoint': get_endpoint('provider.pagerank') }
}
| true
| true
|
1c3e6117bda03f7d7b432c1399b91fe766ad7fa8
| 1,105
|
py
|
Python
|
psp/cainfo.py
|
ZLLentz/pyca
|
5e18525fcd2b39c68c5d8dd4b543d4d8e12dd5f2
|
[
"BSD-3-Clause-LBNL"
] | 5
|
2017-04-11T17:47:35.000Z
|
2021-08-06T17:38:47.000Z
|
psp/cainfo.py
|
ZLLentz/pyca
|
5e18525fcd2b39c68c5d8dd4b543d4d8e12dd5f2
|
[
"BSD-3-Clause-LBNL"
] | 11
|
2017-09-25T23:32:59.000Z
|
2018-06-25T23:38:44.000Z
|
psp/cainfo.py
|
ZLLentz/pyca
|
5e18525fcd2b39c68c5d8dd4b543d4d8e12dd5f2
|
[
"BSD-3-Clause-LBNL"
] | 7
|
2017-09-23T01:56:14.000Z
|
2020-12-18T02:24:11.000Z
|
#!/usr/bin/env python
import sys
import pyca
from options import Options
from Pv import Pv
if __name__ == '__main__':
options = Options(['pvnames'], ['timeout'], [])
try:
options.parse()
except Exception as msg:
options.usage(str(msg))
sys.exit()
pvnames = options.pvnames.split()
if options.timeout is not None:
timeout = float(options.timeout)
else:
timeout = 1.0
states = ["never connected", "previously connected", "connected", "closed"]
access = ['none', 'read only', 'write only', 'read-write']
for pvname in pvnames:
try:
pv = Pv(pvname)
pv.connect(timeout)
print(pv.name)
print(' State: ', states[pv.state()])
print(' Host: ', pv.host())
print(' Access:', access[pv.rwaccess()])
print(' Type: ', pv.type())
print(' Count: ', pv.count())
except pyca.pyexc as e:
print('pyca exception: %s' % (e))
except pyca.caexc as e:
print('channel access exception: %s' % (e))
| 26.95122
| 79
| 0.541176
|
import sys
import pyca
from options import Options
from Pv import Pv
if __name__ == '__main__':
options = Options(['pvnames'], ['timeout'], [])
try:
options.parse()
except Exception as msg:
options.usage(str(msg))
sys.exit()
pvnames = options.pvnames.split()
if options.timeout is not None:
timeout = float(options.timeout)
else:
timeout = 1.0
states = ["never connected", "previously connected", "connected", "closed"]
access = ['none', 'read only', 'write only', 'read-write']
for pvname in pvnames:
try:
pv = Pv(pvname)
pv.connect(timeout)
print(pv.name)
print(' State: ', states[pv.state()])
print(' Host: ', pv.host())
print(' Access:', access[pv.rwaccess()])
print(' Type: ', pv.type())
print(' Count: ', pv.count())
except pyca.pyexc as e:
print('pyca exception: %s' % (e))
except pyca.caexc as e:
print('channel access exception: %s' % (e))
| true
| true
|
1c3e6226c1833759f2f38ad6e6e1d37b078e47a9
| 13,351
|
py
|
Python
|
AutomatedTesting/Gem/PythonTests/Atom/tests/hydra_GPUTest_LightComponent.py
|
pollend/o3de
|
02b6b1dbf4d9889b55d4c11e049aa5b1804c9897
|
[
"Apache-2.0",
"MIT"
] | 8
|
2021-08-31T02:14:19.000Z
|
2021-12-28T19:20:59.000Z
|
AutomatedTesting/Gem/PythonTests/Atom/tests/hydra_GPUTest_LightComponent.py
|
pollend/o3de
|
02b6b1dbf4d9889b55d4c11e049aa5b1804c9897
|
[
"Apache-2.0",
"MIT"
] | 8
|
2021-07-12T13:55:00.000Z
|
2021-10-04T14:53:21.000Z
|
AutomatedTesting/Gem/PythonTests/Atom/tests/hydra_GPUTest_LightComponent.py
|
pollend/o3de
|
02b6b1dbf4d9889b55d4c11e049aa5b1804c9897
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-09-16T05:06:18.000Z
|
2021-09-16T05:06:18.000Z
|
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
"""
import os
import sys
import azlmbr.asset as asset
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.math as math
import azlmbr.paths
import azlmbr.legacy.general as general
sys.path.append(os.path.join(azlmbr.paths.projectroot, "Gem", "PythonTests"))
import editor_python_test_tools.hydra_editor_utils as hydra
from Atom.atom_utils import atom_component_helper, atom_constants, screenshot_utils
from editor_python_test_tools.editor_test_helper import EditorTestHelper
helper = EditorTestHelper(log_prefix="Atom_EditorTestHelper")
LEVEL_NAME = "auto_test"
LIGHT_COMPONENT = "Light"
LIGHT_TYPE_PROPERTY = 'Controller|Configuration|Light type'
DEGREE_RADIAN_FACTOR = 0.0174533
def run():
"""
Sets up the tests by making sure the required level is created & setup correctly.
It then executes 2 test cases - see each associated test function's docstring for more info.
Finally prints the string "Light component tests completed" after completion
Tests will fail immediately if any of these log lines are found:
1. Trace::Assert
2. Trace::Error
3. Traceback (most recent call last):
:return: None
"""
atom_component_helper.create_basic_atom_level(level_name=LEVEL_NAME)
# Run tests.
area_light_test()
spot_light_test()
general.log("Light component tests completed.")
def area_light_test():
"""
Basic test for the "Light" component attached to an "area_light" entity.
Test Case - Light Component: Capsule, Spot (disk), and Point (sphere):
1. Creates "area_light" entity w/ a Light component that has a Capsule Light type w/ the color set to 255, 0, 0
2. Enters game mode to take a screenshot for comparison, then exits game mode.
3. Sets the Light component Intensity Mode to Lumens (default).
4. Ensures the Light component Mode is Automatic (default).
5. Sets the Intensity value of the Light component to 0.0
6. Enters game mode again, takes another screenshot for comparison, then exits game mode.
7. Updates the Intensity value of the Light component to 1000.0
8. Enters game mode again, takes another screenshot for comparison, then exits game mode.
9. Swaps the Capsule light type option to Spot (disk) light type on the Light component
10. Updates "area_light" entity Transform rotate value to x: 90.0, y:0.0, z:0.0
11. Enters game mode again, takes another screenshot for comparison, then exits game mode.
12. Swaps the Spot (disk) light type for the Point (sphere) light type in the Light component.
13. Enters game mode again, takes another screenshot for comparison, then exits game mode.
14. Deletes the Light component from the "area_light" entity and verifies its successful.
"""
# Create an "area_light" entity with "Light" component using Light type of "Capsule"
area_light_entity_name = "area_light"
area_light = hydra.Entity(area_light_entity_name)
area_light.create_entity(math.Vector3(-1.0, -2.0, 3.0), [LIGHT_COMPONENT])
general.log(
f"{area_light_entity_name}_test: Component added to the entity: "
f"{hydra.has_components(area_light.id, [LIGHT_COMPONENT])}")
light_component_id_pair = hydra.attach_component_to_entity(area_light.id, LIGHT_COMPONENT)
# Select the "Capsule" light type option.
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['capsule']
)
# Update color and take screenshot in game mode
color = math.Color(255.0, 0.0, 0.0, 0.0)
area_light.get_set_test(0, "Controller|Configuration|Color", color)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_1", area_light_entity_name)
# Update intensity value to 0.0 and take screenshot in game mode
area_light.get_set_test(0, "Controller|Configuration|Attenuation Radius|Mode", 1)
area_light.get_set_test(0, "Controller|Configuration|Intensity", 0.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_2", area_light_entity_name)
# Update intensity value to 1000.0 and take screenshot in game mode
area_light.get_set_test(0, "Controller|Configuration|Intensity", 1000.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_3", area_light_entity_name)
# Swap the "Capsule" light type option to "Spot (disk)" light type
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['spot_disk']
)
area_light_rotation = math.Vector3(DEGREE_RADIAN_FACTOR * 90.0, 0.0, 0.0)
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetLocalRotation", area_light.id, area_light_rotation)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_4", area_light_entity_name)
# Swap the "Spot (disk)" light type to the "Point (sphere)" light type and take screenshot.
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['sphere']
)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_5", area_light_entity_name)
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntityById", area_light.id)
def spot_light_test():
"""
Basic test for the Light component attached to a "spot_light" entity.
Test Case - Light Component: Spot (disk) with shadows & colors:
1. Creates "spot_light" entity w/ a Light component attached to it.
2. Selects the "directional_light" entity already present in the level and disables it.
3. Selects the "global_skylight" entity already present in the level and disables the HDRi Skybox component,
as well as the Global Skylight (IBL) component.
4. Enters game mode to take a screenshot for comparison, then exits game mode.
5. Selects the "ground_plane" entity and changes updates the material to a new material.
6. Enters game mode to take a screenshot for comparison, then exits game mode.
7. Selects the "spot_light" entity and increases the Light component Intensity to 800 lm
8. Enters game mode to take a screenshot for comparison, then exits game mode.
9. Selects the "spot_light" entity and sets the Light component Color to 47, 75, 37
10. Enters game mode to take a screenshot for comparison, then exits game mode.
11. Selects the "spot_light" entity and modifies the Shutter controls to the following values:
- Enable shutters: True
- Inner Angle: 60.0
- Outer Angle: 75.0
12. Enters game mode to take a screenshot for comparison, then exits game mode.
13. Selects the "spot_light" entity and modifies the Shadow controls to the following values:
- Enable Shadow: True
- ShadowmapSize: 256
14. Modifies the world translate position of the "spot_light" entity to 0.7, -2.0, 1.9 (for casting shadows better)
15. Enters game mode to take a screenshot for comparison, then exits game mode.
"""
# Disable "Directional Light" component for the "directional_light" entity
# "directional_light" entity is created by the create_basic_atom_level() function by default.
directional_light_entity_id = hydra.find_entity_by_name("directional_light")
directional_light = hydra.Entity(name='directional_light', id=directional_light_entity_id)
directional_light_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Directional Light"], 0)[0]
directional_light_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', directional_light.id, directional_light_component_type
).GetValue()
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [directional_light_component])
general.idle_wait(0.5)
# Disable "Global Skylight (IBL)" and "HDRi Skybox" components for the "global_skylight" entity
global_skylight_entity_id = hydra.find_entity_by_name("global_skylight")
global_skylight = hydra.Entity(name='global_skylight', id=global_skylight_entity_id)
global_skylight_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Global Skylight (IBL)"], 0)[0]
global_skylight_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', global_skylight.id, global_skylight_component_type
).GetValue()
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [global_skylight_component])
hdri_skybox_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["HDRi Skybox"], 0)[0]
hdri_skybox_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', global_skylight.id, hdri_skybox_component_type
).GetValue()
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [hdri_skybox_component])
general.idle_wait(0.5)
# Create a "spot_light" entity with "Light" component using Light Type of "Spot (disk)"
spot_light_entity_name = "spot_light"
spot_light = hydra.Entity(spot_light_entity_name)
spot_light.create_entity(math.Vector3(0.7, -2.0, 1.0), [LIGHT_COMPONENT])
general.log(
f"{spot_light_entity_name}_test: Component added to the entity: "
f"{hydra.has_components(spot_light.id, [LIGHT_COMPONENT])}")
rotation = math.Vector3(DEGREE_RADIAN_FACTOR * 300.0, 0.0, 0.0)
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetLocalRotation", spot_light.id, rotation)
light_component_type = hydra.attach_component_to_entity(spot_light.id, LIGHT_COMPONENT)
editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_type,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['spot_disk']
)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_1", spot_light_entity_name)
# Change default material of ground plane entity and take screenshot
ground_plane_entity_id = hydra.find_entity_by_name("ground_plane")
ground_plane = hydra.Entity(name='ground_plane', id=ground_plane_entity_id)
ground_plane_asset_path = os.path.join("Materials", "Presets", "MacBeth", "22_neutral_5-0_0-70d.azmaterial")
ground_plane_asset_value = asset.AssetCatalogRequestBus(
bus.Broadcast, "GetAssetIdByPath", ground_plane_asset_path, math.Uuid(), False)
material_property_path = "Default Material|Material Asset"
material_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Material"], 0)[0]
material_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', ground_plane.id, material_component_type).GetValue()
editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
material_component,
material_property_path,
ground_plane_asset_value
)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_2", spot_light_entity_name)
# Increase intensity value of the Spot light and take screenshot in game mode
spot_light.get_set_test(0, "Controller|Configuration|Intensity", 800.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_3", spot_light_entity_name)
# Update the Spot light color and take screenshot in game mode
color_value = math.Color(47.0 / 255.0, 75.0 / 255.0, 37.0 / 255.0, 255.0 / 255.0)
spot_light.get_set_test(0, "Controller|Configuration|Color", color_value)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_4", spot_light_entity_name)
# Update the Shutter controls of the Light component and take screenshot
spot_light.get_set_test(0, "Controller|Configuration|Shutters|Enable shutters", True)
spot_light.get_set_test(0, "Controller|Configuration|Shutters|Inner angle", 60.0)
spot_light.get_set_test(0, "Controller|Configuration|Shutters|Outer angle", 75.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_5", spot_light_entity_name)
# Update the Shadow controls, move the spot_light entity world translate position and take screenshot
spot_light.get_set_test(0, "Controller|Configuration|Shadows|Enable shadow", True)
spot_light.get_set_test(0, "Controller|Configuration|Shadows|Shadowmap size", 256.0)
azlmbr.components.TransformBus(
azlmbr.bus.Event, "SetWorldTranslation", spot_light.id, math.Vector3(0.7, -2.0, 1.9))
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_6", spot_light_entity_name)
if __name__ == "__main__":
run()
| 50.958015
| 119
| 0.752453
|
import os
import sys
import azlmbr.asset as asset
import azlmbr.bus as bus
import azlmbr.editor as editor
import azlmbr.math as math
import azlmbr.paths
import azlmbr.legacy.general as general
sys.path.append(os.path.join(azlmbr.paths.projectroot, "Gem", "PythonTests"))
import editor_python_test_tools.hydra_editor_utils as hydra
from Atom.atom_utils import atom_component_helper, atom_constants, screenshot_utils
from editor_python_test_tools.editor_test_helper import EditorTestHelper
helper = EditorTestHelper(log_prefix="Atom_EditorTestHelper")
LEVEL_NAME = "auto_test"
LIGHT_COMPONENT = "Light"
LIGHT_TYPE_PROPERTY = 'Controller|Configuration|Light type'
DEGREE_RADIAN_FACTOR = 0.0174533
def run():
atom_component_helper.create_basic_atom_level(level_name=LEVEL_NAME)
area_light_test()
spot_light_test()
general.log("Light component tests completed.")
def area_light_test():
area_light_entity_name = "area_light"
area_light = hydra.Entity(area_light_entity_name)
area_light.create_entity(math.Vector3(-1.0, -2.0, 3.0), [LIGHT_COMPONENT])
general.log(
f"{area_light_entity_name}_test: Component added to the entity: "
f"{hydra.has_components(area_light.id, [LIGHT_COMPONENT])}")
light_component_id_pair = hydra.attach_component_to_entity(area_light.id, LIGHT_COMPONENT)
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['capsule']
)
color = math.Color(255.0, 0.0, 0.0, 0.0)
area_light.get_set_test(0, "Controller|Configuration|Color", color)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_1", area_light_entity_name)
area_light.get_set_test(0, "Controller|Configuration|Attenuation Radius|Mode", 1)
area_light.get_set_test(0, "Controller|Configuration|Intensity", 0.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_2", area_light_entity_name)
area_light.get_set_test(0, "Controller|Configuration|Intensity", 1000.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_3", area_light_entity_name)
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['spot_disk']
)
area_light_rotation = math.Vector3(DEGREE_RADIAN_FACTOR * 90.0, 0.0, 0.0)
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetLocalRotation", area_light.id, area_light_rotation)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_4", area_light_entity_name)
azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_id_pair,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['sphere']
)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("AreaLight_5", area_light_entity_name)
editor.ToolsApplicationRequestBus(bus.Broadcast, "DeleteEntityById", area_light.id)
def spot_light_test():
directional_light_entity_id = hydra.find_entity_by_name("directional_light")
directional_light = hydra.Entity(name='directional_light', id=directional_light_entity_id)
directional_light_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Directional Light"], 0)[0]
directional_light_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', directional_light.id, directional_light_component_type
).GetValue()
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [directional_light_component])
general.idle_wait(0.5)
global_skylight_entity_id = hydra.find_entity_by_name("global_skylight")
global_skylight = hydra.Entity(name='global_skylight', id=global_skylight_entity_id)
global_skylight_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Global Skylight (IBL)"], 0)[0]
global_skylight_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', global_skylight.id, global_skylight_component_type
).GetValue()
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [global_skylight_component])
hdri_skybox_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["HDRi Skybox"], 0)[0]
hdri_skybox_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', global_skylight.id, hdri_skybox_component_type
).GetValue()
editor.EditorComponentAPIBus(bus.Broadcast, "DisableComponents", [hdri_skybox_component])
general.idle_wait(0.5)
spot_light_entity_name = "spot_light"
spot_light = hydra.Entity(spot_light_entity_name)
spot_light.create_entity(math.Vector3(0.7, -2.0, 1.0), [LIGHT_COMPONENT])
general.log(
f"{spot_light_entity_name}_test: Component added to the entity: "
f"{hydra.has_components(spot_light.id, [LIGHT_COMPONENT])}")
rotation = math.Vector3(DEGREE_RADIAN_FACTOR * 300.0, 0.0, 0.0)
azlmbr.components.TransformBus(azlmbr.bus.Event, "SetLocalRotation", spot_light.id, rotation)
light_component_type = hydra.attach_component_to_entity(spot_light.id, LIGHT_COMPONENT)
editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
light_component_type,
LIGHT_TYPE_PROPERTY,
atom_constants.LIGHT_TYPES['spot_disk']
)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_1", spot_light_entity_name)
ground_plane_entity_id = hydra.find_entity_by_name("ground_plane")
ground_plane = hydra.Entity(name='ground_plane', id=ground_plane_entity_id)
ground_plane_asset_path = os.path.join("Materials", "Presets", "MacBeth", "22_neutral_5-0_0-70d.azmaterial")
ground_plane_asset_value = asset.AssetCatalogRequestBus(
bus.Broadcast, "GetAssetIdByPath", ground_plane_asset_path, math.Uuid(), False)
material_property_path = "Default Material|Material Asset"
material_component_type = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'FindComponentTypeIdsByEntityType', ["Material"], 0)[0]
material_component = azlmbr.editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast, 'GetComponentOfType', ground_plane.id, material_component_type).GetValue()
editor.EditorComponentAPIBus(
azlmbr.bus.Broadcast,
'SetComponentProperty',
material_component,
material_property_path,
ground_plane_asset_value
)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_2", spot_light_entity_name)
spot_light.get_set_test(0, "Controller|Configuration|Intensity", 800.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_3", spot_light_entity_name)
color_value = math.Color(47.0 / 255.0, 75.0 / 255.0, 37.0 / 255.0, 255.0 / 255.0)
spot_light.get_set_test(0, "Controller|Configuration|Color", color_value)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_4", spot_light_entity_name)
spot_light.get_set_test(0, "Controller|Configuration|Shutters|Enable shutters", True)
spot_light.get_set_test(0, "Controller|Configuration|Shutters|Inner angle", 60.0)
spot_light.get_set_test(0, "Controller|Configuration|Shutters|Outer angle", 75.0)
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_5", spot_light_entity_name)
spot_light.get_set_test(0, "Controller|Configuration|Shadows|Enable shadow", True)
spot_light.get_set_test(0, "Controller|Configuration|Shadows|Shadowmap size", 256.0)
azlmbr.components.TransformBus(
azlmbr.bus.Event, "SetWorldTranslation", spot_light.id, math.Vector3(0.7, -2.0, 1.9))
general.idle_wait(1.0)
screenshot_utils.take_screenshot_game_mode("SpotLight_6", spot_light_entity_name)
if __name__ == "__main__":
run()
| true
| true
|
1c3e62dc38ce4440d5e4870548943f939964bef3
| 8,786
|
py
|
Python
|
tests/arxml/ar4_portinterface_test.py
|
zhuhaijun753/autosar-2
|
c99e48128cb55dfcde0f1030806977dde4d23218
|
[
"MIT"
] | null | null | null |
tests/arxml/ar4_portinterface_test.py
|
zhuhaijun753/autosar-2
|
c99e48128cb55dfcde0f1030806977dde4d23218
|
[
"MIT"
] | null | null | null |
tests/arxml/ar4_portinterface_test.py
|
zhuhaijun753/autosar-2
|
c99e48128cb55dfcde0f1030806977dde4d23218
|
[
"MIT"
] | 1
|
2020-03-15T15:05:40.000Z
|
2020-03-15T15:05:40.000Z
|
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
import autosar
from tests.arxml.common import ARXMLTestClass
import unittest
def _create_packages(ws):
package=ws.createPackage('DataTypes', role='DataType')
package.createSubPackage('CompuMethods', role='CompuMethod')
package.createSubPackage('DataConstrs', role='DataConstraint')
package.createSubPackage('Units', role='Unit')
package.createSubPackage('BaseTypes')
ws.createPackage('ModeDclrGroups', role = 'ModeDclrGroup')
ws.createPackage('Constants', role='Constant')
ws.createPackage('ComponentTypes', role='ComponentType')
ws.createPackage('PortInterfaces', role="PortInterface")
def _create_data_types(ws):
basetypes = ws.find('/DataTypes/BaseTypes')
basetypes.createSwBaseType('boolean', 1, 'BOOLEAN')
basetypes.createSwBaseType('uint8', 8, nativeDeclaration='uint8')
basetypes.createSwBaseType('uint16', 16, nativeDeclaration='uint16')
basetypes.createSwBaseType('uint32', 32, nativeDeclaration='uint32')
basetypes.createSwBaseType('float32', 32, encoding='IEEE754')
package = ws.find('DataTypes')
package.createImplementationDataType('boolean', valueTable=['FALSE','TRUE'], baseTypeRef='/DataTypes/BaseTypes/boolean', typeEmitter='Platform_Type')
package.createImplementationDataType('uint8', lowerLimit=0, upperLimit=255, baseTypeRef='/DataTypes/BaseTypes/uint8', typeEmitter='Platform_Type')
package.createImplementationDataType('uint16', lowerLimit=0, upperLimit=65535, baseTypeRef='/DataTypes/BaseTypes/uint16', typeEmitter='Platform_Type')
package.createImplementationDataType('uint32', lowerLimit=0, upperLimit=4294967295, baseTypeRef='/DataTypes/BaseTypes/uint32', typeEmitter='Platform_Type')
package.createImplementationDataTypeRef('OffOn_T', implementationTypeRef = '/DataTypes/uint8',
valueTable = ['OffOn_Off',
'OffOn_On',
'OffOn_Error',
'OffOn_NotAvailable'
])
package.createImplementationDataTypeRef('Seconds_T', '/DataTypes/uint8', lowerLimit=0, upperLimit=63)
package.createImplementationDataTypeRef('Minutes_T', '/DataTypes/uint8', lowerLimit=0, upperLimit=63)
package.createImplementationDataTypeRef('Hours_T', '/DataTypes/uint8', lowerLimit=0, upperLimit=31)
def _create_mode_declarations(ws):
package = ws.find('ModeDclrGroups')
package.createModeDeclarationGroup('VehicleMode', ["OFF",
"ACCESSORY",
"RUNNING",
"CRANKING",
], "OFF")
def _init_ws(ws):
_create_packages(ws)
_create_data_types(ws)
_create_mode_declarations(ws)
class ARXML4PortInterfaceTest(ARXMLTestClass):
def test_create_sender_receiver_interface_single_element(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1 = package.createSenderReceiverInterface('HeaterPwrStat_I', autosar.element.DataElement('HeaterPwrStat', 'OffOn_T'))
self.assertEqual(pif1.dataElements[0].typeRef, '/DataTypes/OffOn_T')
file_name = 'ar4_sender_receiver_interface_single_element.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.SenderReceiverInterface)
self.assertEqual(len(pif2.dataElements), 1)
def test_create_sender_receiver_interface_multiple_elements(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1 = package.createSenderReceiverInterface('SystemTime_I', [
autosar.element.DataElement('Seconds', '/DataTypes/Seconds_T'),
autosar.element.DataElement('Minutes', '/DataTypes/Minutes_T'),
autosar.element.DataElement('Hours', '/DataTypes/Hours_T')
])
file_name = 'ar4_sender_receiver_interface_multiple_elements_explicit.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.SenderReceiverInterface)
self.assertEqual(len(pif2.dataElements), 3)
def test_create_client_server_interface_single_operation_no_return_no_service(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1=package.createClientServerInterface('FreeRunningTimer_I', ['GetTimeStamp'] )
pif1['GetTimeStamp'].createOutArgument('value', '/DataTypes/uint32')
file_name = 'ar4_client_server_interface_single_operation_no_return_no_service.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.ClientServerInterface)
self.assertEqual(pif2.isService, False)
self.assertEqual(len(pif2.operations), 1)
operation = pif2['GetTimeStamp']
self.assertIsInstance(operation, autosar.portinterface.Operation)
def test_create_client_server_interface_single_operation_no_return_is_service(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1=package.createClientServerInterface('FreeRunningTimer_I', ['GetTimeStamp'], isService=True )
arg = pif1['GetTimeStamp'].createOutArgument('value', '/DataTypes/uint32', 'NOT-ACCESSIBLE', 'USE-ARGUMENT-TYPE')
file_name = 'ar4_client_server_interface_single_operation_no_return_is_service.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.ClientServerInterface)
self.assertEqual(pif2.isService, True)
self.assertEqual(len(pif2.operations), 1)
operation = pif2['GetTimeStamp']
self.assertIsInstance(operation, autosar.portinterface.Operation)
def test_create_mode_switch_interface(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1 = package.createModeSwitchInterface('VehicleMode_I', autosar.mode.ModeGroup('mode', 'VehicleMode'))
self.assertEqual(pif1.modeGroup.typeRef, '/ModeDclrGroups/VehicleMode')
file_name = 'ar4_create_mode_switch_interface.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.ModeSwitchInterface)
self.assertEqual(pif1.modeGroup.name, pif2.modeGroup.name)
self.assertEqual(pif1.modeGroup.typeRef, pif2.modeGroup.typeRef)
if __name__ == '__main__':
unittest.main()
| 56.683871
| 160
| 0.674596
|
import os, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
import autosar
from tests.arxml.common import ARXMLTestClass
import unittest
def _create_packages(ws):
package=ws.createPackage('DataTypes', role='DataType')
package.createSubPackage('CompuMethods', role='CompuMethod')
package.createSubPackage('DataConstrs', role='DataConstraint')
package.createSubPackage('Units', role='Unit')
package.createSubPackage('BaseTypes')
ws.createPackage('ModeDclrGroups', role = 'ModeDclrGroup')
ws.createPackage('Constants', role='Constant')
ws.createPackage('ComponentTypes', role='ComponentType')
ws.createPackage('PortInterfaces', role="PortInterface")
def _create_data_types(ws):
basetypes = ws.find('/DataTypes/BaseTypes')
basetypes.createSwBaseType('boolean', 1, 'BOOLEAN')
basetypes.createSwBaseType('uint8', 8, nativeDeclaration='uint8')
basetypes.createSwBaseType('uint16', 16, nativeDeclaration='uint16')
basetypes.createSwBaseType('uint32', 32, nativeDeclaration='uint32')
basetypes.createSwBaseType('float32', 32, encoding='IEEE754')
package = ws.find('DataTypes')
package.createImplementationDataType('boolean', valueTable=['FALSE','TRUE'], baseTypeRef='/DataTypes/BaseTypes/boolean', typeEmitter='Platform_Type')
package.createImplementationDataType('uint8', lowerLimit=0, upperLimit=255, baseTypeRef='/DataTypes/BaseTypes/uint8', typeEmitter='Platform_Type')
package.createImplementationDataType('uint16', lowerLimit=0, upperLimit=65535, baseTypeRef='/DataTypes/BaseTypes/uint16', typeEmitter='Platform_Type')
package.createImplementationDataType('uint32', lowerLimit=0, upperLimit=4294967295, baseTypeRef='/DataTypes/BaseTypes/uint32', typeEmitter='Platform_Type')
package.createImplementationDataTypeRef('OffOn_T', implementationTypeRef = '/DataTypes/uint8',
valueTable = ['OffOn_Off',
'OffOn_On',
'OffOn_Error',
'OffOn_NotAvailable'
])
package.createImplementationDataTypeRef('Seconds_T', '/DataTypes/uint8', lowerLimit=0, upperLimit=63)
package.createImplementationDataTypeRef('Minutes_T', '/DataTypes/uint8', lowerLimit=0, upperLimit=63)
package.createImplementationDataTypeRef('Hours_T', '/DataTypes/uint8', lowerLimit=0, upperLimit=31)
def _create_mode_declarations(ws):
package = ws.find('ModeDclrGroups')
package.createModeDeclarationGroup('VehicleMode', ["OFF",
"ACCESSORY",
"RUNNING",
"CRANKING",
], "OFF")
def _init_ws(ws):
_create_packages(ws)
_create_data_types(ws)
_create_mode_declarations(ws)
class ARXML4PortInterfaceTest(ARXMLTestClass):
def test_create_sender_receiver_interface_single_element(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1 = package.createSenderReceiverInterface('HeaterPwrStat_I', autosar.element.DataElement('HeaterPwrStat', 'OffOn_T'))
self.assertEqual(pif1.dataElements[0].typeRef, '/DataTypes/OffOn_T')
file_name = 'ar4_sender_receiver_interface_single_element.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.SenderReceiverInterface)
self.assertEqual(len(pif2.dataElements), 1)
def test_create_sender_receiver_interface_multiple_elements(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1 = package.createSenderReceiverInterface('SystemTime_I', [
autosar.element.DataElement('Seconds', '/DataTypes/Seconds_T'),
autosar.element.DataElement('Minutes', '/DataTypes/Minutes_T'),
autosar.element.DataElement('Hours', '/DataTypes/Hours_T')
])
file_name = 'ar4_sender_receiver_interface_multiple_elements_explicit.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.SenderReceiverInterface)
self.assertEqual(len(pif2.dataElements), 3)
def test_create_client_server_interface_single_operation_no_return_no_service(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1=package.createClientServerInterface('FreeRunningTimer_I', ['GetTimeStamp'] )
pif1['GetTimeStamp'].createOutArgument('value', '/DataTypes/uint32')
file_name = 'ar4_client_server_interface_single_operation_no_return_no_service.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.ClientServerInterface)
self.assertEqual(pif2.isService, False)
self.assertEqual(len(pif2.operations), 1)
operation = pif2['GetTimeStamp']
self.assertIsInstance(operation, autosar.portinterface.Operation)
def test_create_client_server_interface_single_operation_no_return_is_service(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1=package.createClientServerInterface('FreeRunningTimer_I', ['GetTimeStamp'], isService=True )
arg = pif1['GetTimeStamp'].createOutArgument('value', '/DataTypes/uint32', 'NOT-ACCESSIBLE', 'USE-ARGUMENT-TYPE')
file_name = 'ar4_client_server_interface_single_operation_no_return_is_service.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.ClientServerInterface)
self.assertEqual(pif2.isService, True)
self.assertEqual(len(pif2.operations), 1)
operation = pif2['GetTimeStamp']
self.assertIsInstance(operation, autosar.portinterface.Operation)
def test_create_mode_switch_interface(self):
ws = autosar.workspace(version="4.2.2")
_init_ws(ws)
package = ws.find('/PortInterfaces')
pif1 = package.createModeSwitchInterface('VehicleMode_I', autosar.mode.ModeGroup('mode', 'VehicleMode'))
self.assertEqual(pif1.modeGroup.typeRef, '/ModeDclrGroups/VehicleMode')
file_name = 'ar4_create_mode_switch_interface.arxml'
generated_file = os.path.join(self.output_dir, file_name)
expected_file = os.path.join( 'expected_gen', 'portinterface', file_name)
self.save_and_check(ws, expected_file, generated_file)
ws2 = autosar.workspace(version="4.2.2")
ws2.loadXML(os.path.join(os.path.dirname(__file__), expected_file))
pif2 = portInterface = ws2.find(pif1.ref)
self.assertIsInstance(pif2, autosar.portinterface.ModeSwitchInterface)
self.assertEqual(pif1.modeGroup.name, pif2.modeGroup.name)
self.assertEqual(pif1.modeGroup.typeRef, pif2.modeGroup.typeRef)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c3e630fd839b16105479f88dbb3ae9a39a1958a
| 3,059
|
py
|
Python
|
admissions/grades.py
|
dsavransky/admissions
|
ffdba2c93f5a02667f7506965313b8ed7dd9381b
|
[
"MIT"
] | null | null | null |
admissions/grades.py
|
dsavransky/admissions
|
ffdba2c93f5a02667f7506965313b8ed7dd9381b
|
[
"MIT"
] | null | null | null |
admissions/grades.py
|
dsavransky/admissions
|
ffdba2c93f5a02667f7506965313b8ed7dd9381b
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas
import scipy.interpolate
import requests
from html.parser import HTMLParser
def scrapegradedata(URL="http://gpa.eng.uci.edu/"):
page = requests.get(URL)
class GradeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.intr = False
self.intd = False
self.ina = False
self.currattr = ""
self.titles = []
self.countries = []
self.intlgpas = []
self.usgpas = []
def handle_starttag(self, tag, attrs):
if tag == "tr":
self.intr = True
if tag == "td":
self.intd = True
self.currattr = attrs[0][1]
if tag == "a":
self.ina = True
def handle_endtag(self, tag):
if tag == "tr":
self.intr = False
if tag == "td":
self.intd = False
if tag == "a":
self.ina = False
def handle_data(self, data):
if self.intd:
if self.currattr == "views-field views-field-title":
if self.ina:
self.titles.append(data.strip())
elif self.currattr == "views-field views-field-field-country":
self.countries.append(data.strip())
elif self.currattr == "views-field views-field-field-intl-gpa":
self.intlgpas.append(data.strip())
elif self.currattr == "views-field views-field-field-us-gpa":
self.usgpas.append(data.strip())
else:
pass
# end HTMLParser
parser = GradeHTMLParser()
_ = parser.feed(page.text)
np.savez(
"grade_data",
titles=parser.titles,
countries=parser.countries,
intlgpas=parser.intlgpas,
usgpas=parser.usgpas,
)
def gengradedicts(grade_data="grade_data.xlsx"):
# generate school and country grade dictionaries
tmp = pandas.ExcelFile(grade_data, engine="openpyxl")
grades = tmp.parse("grades")
tmp.close()
defaults = np.array(["DEFAULT" in n for n in grades["Name"].values])
countrygrades = {}
for row in grades[defaults].iterrows():
countrygrades[
"{} - {}".format(row[1]["Country"], row[1]["GPAScale"])
] = scipy.interpolate.interp1d(
np.array(row[1]["SchoolGPA"].split("/")).astype(float),
np.array(row[1]["4ptGPA"].split("/")).astype(float),
kind="linear",
)
schoolgrades = {}
for row in grades[~defaults].iterrows():
schoolgrades[
"{} - {} - {}".format(row[1]["Name"], row[1]["Country"], row[1]["GPAScale"])
] = scipy.interpolate.interp1d(
np.array(row[1]["SchoolGPA"].split("/")).astype(float),
np.array(row[1]["4ptGPA"].split("/")).astype(float),
kind="linear",
)
return countrygrades, schoolgrades
| 30.287129
| 88
| 0.520758
|
import numpy as np
import pandas
import scipy.interpolate
import requests
from html.parser import HTMLParser
def scrapegradedata(URL="http://gpa.eng.uci.edu/"):
page = requests.get(URL)
class GradeHTMLParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.intr = False
self.intd = False
self.ina = False
self.currattr = ""
self.titles = []
self.countries = []
self.intlgpas = []
self.usgpas = []
def handle_starttag(self, tag, attrs):
if tag == "tr":
self.intr = True
if tag == "td":
self.intd = True
self.currattr = attrs[0][1]
if tag == "a":
self.ina = True
def handle_endtag(self, tag):
if tag == "tr":
self.intr = False
if tag == "td":
self.intd = False
if tag == "a":
self.ina = False
def handle_data(self, data):
if self.intd:
if self.currattr == "views-field views-field-title":
if self.ina:
self.titles.append(data.strip())
elif self.currattr == "views-field views-field-field-country":
self.countries.append(data.strip())
elif self.currattr == "views-field views-field-field-intl-gpa":
self.intlgpas.append(data.strip())
elif self.currattr == "views-field views-field-field-us-gpa":
self.usgpas.append(data.strip())
else:
pass
parser = GradeHTMLParser()
_ = parser.feed(page.text)
np.savez(
"grade_data",
titles=parser.titles,
countries=parser.countries,
intlgpas=parser.intlgpas,
usgpas=parser.usgpas,
)
def gengradedicts(grade_data="grade_data.xlsx"):
tmp = pandas.ExcelFile(grade_data, engine="openpyxl")
grades = tmp.parse("grades")
tmp.close()
defaults = np.array(["DEFAULT" in n for n in grades["Name"].values])
countrygrades = {}
for row in grades[defaults].iterrows():
countrygrades[
"{} - {}".format(row[1]["Country"], row[1]["GPAScale"])
] = scipy.interpolate.interp1d(
np.array(row[1]["SchoolGPA"].split("/")).astype(float),
np.array(row[1]["4ptGPA"].split("/")).astype(float),
kind="linear",
)
schoolgrades = {}
for row in grades[~defaults].iterrows():
schoolgrades[
"{} - {} - {}".format(row[1]["Name"], row[1]["Country"], row[1]["GPAScale"])
] = scipy.interpolate.interp1d(
np.array(row[1]["SchoolGPA"].split("/")).astype(float),
np.array(row[1]["4ptGPA"].split("/")).astype(float),
kind="linear",
)
return countrygrades, schoolgrades
| true
| true
|
1c3e633bf644ebaf5a7f7c2a5e95432142f3c7c9
| 1,004
|
py
|
Python
|
migrations/versions/4a9fa306c69c_.py
|
Anioko/frontcms
|
3a39c57881ae4d97b3aa65d033e30719aca648ac
|
[
"MIT"
] | 1
|
2020-06-26T05:03:48.000Z
|
2020-06-26T05:03:48.000Z
|
migrations/versions/4a9fa306c69c_.py
|
Anioko/frontcms
|
3a39c57881ae4d97b3aa65d033e30719aca648ac
|
[
"MIT"
] | 1
|
2021-06-02T02:15:07.000Z
|
2021-06-02T02:15:07.000Z
|
migrations/versions/4a9fa306c69c_.py
|
Anioko/frontcms
|
3a39c57881ae4d97b3aa65d033e30719aca648ac
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: 4a9fa306c69c
Revises: 3d2cc737a6c7
Create Date: 2020-07-24 12:48:22.326512
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4a9fa306c69c'
down_revision = '3d2cc737a6c7'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('payment_settings',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('name', sa.String(), nullable=True),
sa.Column('display_name', sa.String(), nullable=True),
sa.Column('value', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('payment_settings')
# ### end Alembic commands ###
| 27.135135
| 70
| 0.685259
|
from alembic import op
import sqlalchemy as sa
revision = '4a9fa306c69c'
down_revision = '3d2cc737a6c7'
branch_labels = None
depends_on = None
def upgrade():
splay_name', sa.String(), nullable=True),
sa.Column('value', sa.String(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
| true
| true
|
1c3e638e76c10ae20b3ae3c7635e42736dec7735
| 6,727
|
py
|
Python
|
viabel/convenience.py
|
SyanneLiu/viabel
|
947d7389184f4c5e58e762d7ef771ce1aae19dd4
|
[
"MIT"
] | 29
|
2019-10-20T21:10:35.000Z
|
2022-02-15T23:43:30.000Z
|
viabel/convenience.py
|
SyanneLiu/viabel
|
947d7389184f4c5e58e762d7ef771ce1aae19dd4
|
[
"MIT"
] | 29
|
2020-10-30T00:53:45.000Z
|
2021-03-11T07:41:08.000Z
|
viabel/convenience.py
|
SyanneLiu/viabel
|
947d7389184f4c5e58e762d7ef771ce1aae19dd4
|
[
"MIT"
] | 8
|
2019-10-22T13:08:54.000Z
|
2021-07-28T15:28:49.000Z
|
from viabel._psis import psislw
from viabel.approximations import MFGaussian
from viabel.diagnostics import all_diagnostics
from viabel.models import Model, StanModel
from viabel.objectives import ExclusiveKL
from viabel.optimization import FASO, RMSProp
all = [
'bbvi',
'vi_diagnostics',
]
def bbvi(dimension, *, n_iters=10000, num_mc_samples=10, log_density=None,
approx=None, objective=None, fit=None, adaptive=True,
init_var_param=None, learning_rate=0.01,
RMS_kwargs=dict(), FASO_kwargs=dict()):
"""Fit a model using black-box variational inference.
Currently the objective is optimized using ``viabel.optimization.FASO``.
Parameters
----------
dimension : `int`
Dimension of the model parameter.
n_iters : `int`, optional
Number of iterations of the optimization.
num_mc_samples : `int`, optional
Number of Monte Carlo samples to use for estimating the gradient of
the objective.
log_density : `function`, optional
(Unnormalized) log density of the model. Must support automatic
differentiation with ``autograd``. Either ``log_density`` or ``fit``
must be provided.
approx : `ApproximationFamily` object, optional
The approximation family. The default is to use ``viabel.approximations.MFGaussian``.
objective : `VariationalObjective` class
The default is to use ``viabel.objectives.ExclusiveKL``.
fit : `StanFit4model` object, optional
If provided, a ``StanModel`` will be used. Both ``fit`` and
``log_density`` cannot be given.
init_var_param, optional
Initial variational parameter.
adaptive : `bool`, optional
If ``True``, use ``FASO`` with ``RMSProp``. Otherwise use ``RMSProp``.
learning_rate : `float`
Tuning parameter that determines the step size.
RMS_kwargs : `dict`, optional
Dictionary of keyword arguments to pass to ``RMSProp``.
FASO_kwargs : `dict`, optional
Dictionary of keyword arguments to pass to ``FASO``.
Returns
-------
results : `dict`
Contains the following entries: `objective` and results from optimizer
"""
if objective is not None:
if fit is not None or log_density is not None or approx is not None:
raise ValueError(
'if objective is specified, cannot specify fit, log_density, or approx')
approx = objective.approx
model = objective.model
else:
if log_density is None:
if fit is None:
raise ValueError(
'either log_density or fit must be specified if objective not given')
model = StanModel(fit)
elif fit is None:
model = Model(log_density)
else:
raise ValueError('log_density and fit cannot both be specified')
if approx is None:
approx = MFGaussian(dimension)
objective = ExclusiveKL(approx, model, num_mc_samples)
if init_var_param is None:
init_var_param = approx.init_param()
base_opt = RMSProp(learning_rate, **RMS_kwargs)
if adaptive:
opt = FASO(base_opt, **FASO_kwargs)
else:
opt = base_opt
opt_results = opt.optimize(n_iters, objective, init_var_param)
opt_results['objective'] = objective
return opt_results
def vi_diagnostics(var_param, *, objective=None, model=None, approx=None, n_samples=100000):
"""Check variational inference diagnostics.
Check Pareto k and 2-divergence diagnostics. Return additional diagnostics
with mean, standard deviation, and covariance error bounds.
Parameters
----------
var_param : `numpy.ndarray`, shape (var_param_dim,)
The variational parameter.
objective : `function`
model : `Model` object
approx : `ApproximationFamily` object
n_samples : `int`
The number of samples to use for the diagnostics.
Returns
-------
diagnostics : `dict`
Also includes samples and smoothed log weights.
See Also
--------
diagostics.all_diagnostics : Compute all diagnostics.
"""
if objective is None:
if model is None or approx is None:
raise ValueError('either objective or both model and approx must be specified')
elif model is not None or approx is not None:
raise ValueError('model and/or approx cannot be specified if objective is')
else:
model = objective.model
approx = objective.approx
if n_samples <= 0:
raise ValueError('n_samples must be positive')
return _vi_diagnostics(var_param, model, approx, n_samples)
def _vi_diagnostics(var_param, model, approx, n_samples):
# first check Pareto k-hat
samples, smoothed_log_weights, khat = psis_correction(var_param, model, approx, n_samples)
results = dict(samples=samples,
smoothed_log_weights=smoothed_log_weights,
khat=khat)
print('Pareto k is estimated to be khat = {:.2f}'.format(results['khat']))
if results['khat'] > 0.7:
print('WARNING: khat > 0.7 means importance sampling is not feasible.')
print('WARNING: not running further diagnostics')
return results
print()
# if k-hat looks good, check other diagnostics
if approx.supports_pth_moment(2) and approx.supports_pth_moment(4):
def moment_bound_fn(p):
return approx.pth_moment(var_param, p)
else:
moment_bound_fn = None
_, q_var = approx.mean_and_cov(var_param)
results.update(all_diagnostics(smoothed_log_weights,
samples=samples,
moment_bound_fn=moment_bound_fn,
q_var=q_var))
print('The 2-divergence is estimated to be d2 = {:.2g}'.format(results['d2']))
if results['d2'] > 4.6: # pragma: no cover
print('WARNING: d2 > 4.6 means the approximation is very inaccurate')
elif results['d2'] > 0.1:
print('WARNING: 0.1 < d2 < 4.6 means the approximation is somewhat '
'inaccurate. Use importance sampling to decrease error.')
else:
print('\nAll diagnostics pass.')
return results
def psis_correction(var_param, model, approx, n_samples):
samples, log_weights = samples_and_log_weights(var_param, model, approx, n_samples)
smoothed_log_weights, khat = psislw(log_weights, overwrite_lw=True)
return samples.T, smoothed_log_weights, khat
def samples_and_log_weights(var_param, model, approx, n_samples):
samples = approx.sample(var_param, n_samples)
log_weights = model(samples) - approx.log_density(var_param, samples)
return samples, log_weights
| 39.110465
| 94
| 0.662405
|
from viabel._psis import psislw
from viabel.approximations import MFGaussian
from viabel.diagnostics import all_diagnostics
from viabel.models import Model, StanModel
from viabel.objectives import ExclusiveKL
from viabel.optimization import FASO, RMSProp
all = [
'bbvi',
'vi_diagnostics',
]
def bbvi(dimension, *, n_iters=10000, num_mc_samples=10, log_density=None,
approx=None, objective=None, fit=None, adaptive=True,
init_var_param=None, learning_rate=0.01,
RMS_kwargs=dict(), FASO_kwargs=dict()):
if objective is not None:
if fit is not None or log_density is not None or approx is not None:
raise ValueError(
'if objective is specified, cannot specify fit, log_density, or approx')
approx = objective.approx
model = objective.model
else:
if log_density is None:
if fit is None:
raise ValueError(
'either log_density or fit must be specified if objective not given')
model = StanModel(fit)
elif fit is None:
model = Model(log_density)
else:
raise ValueError('log_density and fit cannot both be specified')
if approx is None:
approx = MFGaussian(dimension)
objective = ExclusiveKL(approx, model, num_mc_samples)
if init_var_param is None:
init_var_param = approx.init_param()
base_opt = RMSProp(learning_rate, **RMS_kwargs)
if adaptive:
opt = FASO(base_opt, **FASO_kwargs)
else:
opt = base_opt
opt_results = opt.optimize(n_iters, objective, init_var_param)
opt_results['objective'] = objective
return opt_results
def vi_diagnostics(var_param, *, objective=None, model=None, approx=None, n_samples=100000):
if objective is None:
if model is None or approx is None:
raise ValueError('either objective or both model and approx must be specified')
elif model is not None or approx is not None:
raise ValueError('model and/or approx cannot be specified if objective is')
else:
model = objective.model
approx = objective.approx
if n_samples <= 0:
raise ValueError('n_samples must be positive')
return _vi_diagnostics(var_param, model, approx, n_samples)
def _vi_diagnostics(var_param, model, approx, n_samples):
samples, smoothed_log_weights, khat = psis_correction(var_param, model, approx, n_samples)
results = dict(samples=samples,
smoothed_log_weights=smoothed_log_weights,
khat=khat)
print('Pareto k is estimated to be khat = {:.2f}'.format(results['khat']))
if results['khat'] > 0.7:
print('WARNING: khat > 0.7 means importance sampling is not feasible.')
print('WARNING: not running further diagnostics')
return results
print()
if approx.supports_pth_moment(2) and approx.supports_pth_moment(4):
def moment_bound_fn(p):
return approx.pth_moment(var_param, p)
else:
moment_bound_fn = None
_, q_var = approx.mean_and_cov(var_param)
results.update(all_diagnostics(smoothed_log_weights,
samples=samples,
moment_bound_fn=moment_bound_fn,
q_var=q_var))
print('The 2-divergence is estimated to be d2 = {:.2g}'.format(results['d2']))
if results['d2'] > 4.6:
print('WARNING: d2 > 4.6 means the approximation is very inaccurate')
elif results['d2'] > 0.1:
print('WARNING: 0.1 < d2 < 4.6 means the approximation is somewhat '
'inaccurate. Use importance sampling to decrease error.')
else:
print('\nAll diagnostics pass.')
return results
def psis_correction(var_param, model, approx, n_samples):
samples, log_weights = samples_and_log_weights(var_param, model, approx, n_samples)
smoothed_log_weights, khat = psislw(log_weights, overwrite_lw=True)
return samples.T, smoothed_log_weights, khat
def samples_and_log_weights(var_param, model, approx, n_samples):
samples = approx.sample(var_param, n_samples)
log_weights = model(samples) - approx.log_density(var_param, samples)
return samples, log_weights
| true
| true
|
1c3e64579dc9a5b29cd306056b4a6227f1271f5c
| 6,026
|
py
|
Python
|
monai/metrics/froc.py
|
benduffy1/MONAI
|
046e625b09262261373d7b8039fb652547201368
|
[
"Apache-2.0"
] | 3
|
2020-06-22T20:59:14.000Z
|
2021-04-09T21:24:45.000Z
|
monai/metrics/froc.py
|
Borda/MONAI
|
e0db5a564225a7cb62e7a23df97267019006302f
|
[
"Apache-2.0"
] | null | null | null |
monai/metrics/froc.py
|
Borda/MONAI
|
e0db5a564225a7cb62e7a23df97267019006302f
|
[
"Apache-2.0"
] | 1
|
2020-06-22T19:22:59.000Z
|
2020-06-22T19:22:59.000Z
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
def compute_fp_tp_probs(
probs: Union[np.ndarray, torch.Tensor],
y_coord: Union[np.ndarray, torch.Tensor],
x_coord: Union[np.ndarray, torch.Tensor],
evaluation_mask: Union[np.ndarray, torch.Tensor],
labels_to_exclude: Optional[List] = None,
resolution_level: int = 0,
):
"""
This function is modified from the official evaluation code of
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to distinguish
true positive and false positive predictions. A true positive prediction is defined when
the detection point is within the annotated ground truth region.
Args:
probs: an array with shape (n,) that represents the probabilities of the detections.
Where, n is the number of predicted detections.
y_coord: an array with shape (n,) that represents the Y-coordinates of the detections.
x_coord: an array with shape (n,) that represents the X-coordinates of the detections.
evaluation_mask: the ground truth mask for evaluation.
labels_to_exclude: labels in this list will not be counted for metric calculation.
resolution_level: the level at which the evaluation mask is made.
Returns:
fp_probs: an array that contains the probabilities of the false positive detections.
tp_probs: an array that contains the probabilities of the True positive detections.
num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.
"""
if not (probs.shape == y_coord.shape == x_coord.shape):
raise AssertionError("the shapes for coordinates and probabilities should be the same.")
if isinstance(probs, torch.Tensor):
probs = probs.detach().cpu().numpy()
if isinstance(y_coord, torch.Tensor):
y_coord = y_coord.detach().cpu().numpy()
if isinstance(x_coord, torch.Tensor):
x_coord = x_coord.detach().cpu().numpy()
if isinstance(evaluation_mask, torch.Tensor):
evaluation_mask = evaluation_mask.detach().cpu().numpy()
if labels_to_exclude is None:
labels_to_exclude = []
max_label = np.max(evaluation_mask)
tp_probs = np.zeros((max_label,), dtype=np.float32)
y_coord = (y_coord / pow(2, resolution_level)).astype(int)
x_coord = (x_coord / pow(2, resolution_level)).astype(int)
hittedlabel = evaluation_mask[y_coord, x_coord]
fp_probs = probs[np.where(hittedlabel == 0)]
for i in range(1, max_label + 1):
if i not in labels_to_exclude and i in hittedlabel:
tp_probs[i - 1] = probs[np.where(hittedlabel == i)].max()
num_targets = max_label - len(labels_to_exclude)
return fp_probs, tp_probs, num_targets
def compute_froc_curve_data(
fp_probs: Union[np.ndarray, torch.Tensor],
tp_probs: Union[np.ndarray, torch.Tensor],
num_targets: int,
num_images: int,
):
"""
This function is modified from the official evaluation code of
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute
the required data for plotting the Free Response Operating Characteristic (FROC) curve.
Args:
fp_probs: an array that contains the probabilities of the false positive detections for all
images under evaluation.
tp_probs: an array that contains the probabilities of the True positive detections for all
images under evaluation.
num_targets: the total number of targets (excluding `labels_to_exclude`) for all images under evaluation.
num_images: the number of images under evaluation.
"""
if not isinstance(fp_probs, type(tp_probs)):
raise AssertionError("fp and tp probs should have same type.")
if isinstance(fp_probs, torch.Tensor):
fp_probs = fp_probs.detach().cpu().numpy()
if isinstance(tp_probs, torch.Tensor):
tp_probs = tp_probs.detach().cpu().numpy()
total_fps, total_tps = [], []
all_probs = sorted(set(list(fp_probs) + list(tp_probs)))
for thresh in all_probs[1:]:
total_fps.append((fp_probs >= thresh).sum())
total_tps.append((tp_probs >= thresh).sum())
total_fps.append(0)
total_tps.append(0)
fps_per_image = np.asarray(total_fps) / float(num_images)
total_sensitivity = np.asarray(total_tps) / float(num_targets)
return fps_per_image, total_sensitivity
def compute_froc_score(
fps_per_image: np.ndarray, total_sensitivity: np.ndarray, eval_thresholds: Tuple = (0.25, 0.5, 1, 2, 4, 8)
):
"""
This function is modified from the official evaluation code of
`CAMELYON 16 Challenge <https://camelyon16.grand-challenge.org/>`_, and used to compute
the challenge's second evaluation metric, which is defined as the average sensitivity at
the predefined false positive rates per whole slide image.
Args:
fps_per_image: the average number of false positives per image for different thresholds.
total_sensitivity: sensitivities (true positive rates) for different thresholds.
eval_thresholds: the false positive rates for calculating the average sensitivity. Defaults
to (0.25, 0.5, 1, 2, 4, 8) which is the same as the CAMELYON 16 Challenge.
"""
interp_sens = np.interp(eval_thresholds, fps_per_image[::-1], total_sensitivity[::-1])
return np.mean(interp_sens)
| 44.308824
| 113
| 0.710919
|
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
def compute_fp_tp_probs(
probs: Union[np.ndarray, torch.Tensor],
y_coord: Union[np.ndarray, torch.Tensor],
x_coord: Union[np.ndarray, torch.Tensor],
evaluation_mask: Union[np.ndarray, torch.Tensor],
labels_to_exclude: Optional[List] = None,
resolution_level: int = 0,
):
if not (probs.shape == y_coord.shape == x_coord.shape):
raise AssertionError("the shapes for coordinates and probabilities should be the same.")
if isinstance(probs, torch.Tensor):
probs = probs.detach().cpu().numpy()
if isinstance(y_coord, torch.Tensor):
y_coord = y_coord.detach().cpu().numpy()
if isinstance(x_coord, torch.Tensor):
x_coord = x_coord.detach().cpu().numpy()
if isinstance(evaluation_mask, torch.Tensor):
evaluation_mask = evaluation_mask.detach().cpu().numpy()
if labels_to_exclude is None:
labels_to_exclude = []
max_label = np.max(evaluation_mask)
tp_probs = np.zeros((max_label,), dtype=np.float32)
y_coord = (y_coord / pow(2, resolution_level)).astype(int)
x_coord = (x_coord / pow(2, resolution_level)).astype(int)
hittedlabel = evaluation_mask[y_coord, x_coord]
fp_probs = probs[np.where(hittedlabel == 0)]
for i in range(1, max_label + 1):
if i not in labels_to_exclude and i in hittedlabel:
tp_probs[i - 1] = probs[np.where(hittedlabel == i)].max()
num_targets = max_label - len(labels_to_exclude)
return fp_probs, tp_probs, num_targets
def compute_froc_curve_data(
fp_probs: Union[np.ndarray, torch.Tensor],
tp_probs: Union[np.ndarray, torch.Tensor],
num_targets: int,
num_images: int,
):
if not isinstance(fp_probs, type(tp_probs)):
raise AssertionError("fp and tp probs should have same type.")
if isinstance(fp_probs, torch.Tensor):
fp_probs = fp_probs.detach().cpu().numpy()
if isinstance(tp_probs, torch.Tensor):
tp_probs = tp_probs.detach().cpu().numpy()
total_fps, total_tps = [], []
all_probs = sorted(set(list(fp_probs) + list(tp_probs)))
for thresh in all_probs[1:]:
total_fps.append((fp_probs >= thresh).sum())
total_tps.append((tp_probs >= thresh).sum())
total_fps.append(0)
total_tps.append(0)
fps_per_image = np.asarray(total_fps) / float(num_images)
total_sensitivity = np.asarray(total_tps) / float(num_targets)
return fps_per_image, total_sensitivity
def compute_froc_score(
fps_per_image: np.ndarray, total_sensitivity: np.ndarray, eval_thresholds: Tuple = (0.25, 0.5, 1, 2, 4, 8)
):
interp_sens = np.interp(eval_thresholds, fps_per_image[::-1], total_sensitivity[::-1])
return np.mean(interp_sens)
| true
| true
|
1c3e64e83644603f93bba54d178f8945f101db1b
| 821
|
py
|
Python
|
migrations/versions/2021_112212_d0ccd9d7ac0c_.py
|
fareszr/app
|
1f3bc693eccb307234e53653f6fa2cb25ddc0647
|
[
"MIT"
] | null | null | null |
migrations/versions/2021_112212_d0ccd9d7ac0c_.py
|
fareszr/app
|
1f3bc693eccb307234e53653f6fa2cb25ddc0647
|
[
"MIT"
] | null | null | null |
migrations/versions/2021_112212_d0ccd9d7ac0c_.py
|
fareszr/app
|
1f3bc693eccb307234e53653f6fa2cb25ddc0647
|
[
"MIT"
] | null | null | null |
"""empty message
Revision ID: d0ccd9d7ac0c
Revises: e7d7ebcea26c
Create Date: 2021-11-22 12:05:31.814178
"""
import sqlalchemy_utils
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd0ccd9d7ac0c'
down_revision = 'e7d7ebcea26c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('fido', sa.Column('user_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'fido', 'users', ['user_id'], ['id'], ondelete='cascade')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'fido', type_='foreignkey')
op.drop_column('fido', 'user_id')
# ### end Alembic commands ###
| 25.65625
| 89
| 0.690621
|
import sqlalchemy_utils
from alembic import op
import sqlalchemy as sa
revision = 'd0ccd9d7ac0c'
down_revision = 'e7d7ebcea26c'
branch_labels = None
depends_on = None
def upgrade():
| true
| true
|
1c3e66fb87abebcf535ca1b30284269ed5319bb3
| 11,738
|
py
|
Python
|
yolov5/utils/augmentations.py
|
tstls/TSB_AI_Vision
|
f11a2f6c6ee6f275d950c95f8c2fbf519aadcce6
|
[
"MIT"
] | null | null | null |
yolov5/utils/augmentations.py
|
tstls/TSB_AI_Vision
|
f11a2f6c6ee6f275d950c95f8c2fbf519aadcce6
|
[
"MIT"
] | null | null | null |
yolov5/utils/augmentations.py
|
tstls/TSB_AI_Vision
|
f11a2f6c6ee6f275d950c95f8c2fbf519aadcce6
|
[
"MIT"
] | null | null | null |
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Image augmentation functions
"""
import logging
import math
import random
import cv2
import numpy as np
from utils.general import check_version, colorstr, resample_segments, segment2box
from utils.metrics import bbox_ioa
class Albumentations:
# YOLOv5 Albumentations class (optional, only used if package is installed)
def __init__(self):
self.transform = None
try:
import albumentations as A
check_version(A.__version__, '1.0.3', hard=True) # version requirement
self.transform = A.Compose([
A.Blur(p=0.01),
A.MedianBlur(p=0.01),
A.ToGray(p=0.01),
A.CLAHE(p=0.01),
A.RandomBrightnessContrast(p=0.0),
A.RandomGamma(p=0.0),
A.ImageCompression(quality_lower=75, p=0.0)],
bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
except ImportError: # package not installed, skip
pass
except Exception as e:
logging.info(colorstr('albumentations: ') + f'{e}')
def __call__(self, im, labels, p=1.0):
if self.transform and random.random() < p:
new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0]) # transformed
im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
return im, labels
def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
# HSV color-space augmentation
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
dtype = im.dtype # uint8
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im) # no return needed
def hist_equalize(im, clahe=True, bgr=False):
# Equalize histogram on BGR image 'im' with im.shape(n,m,3) and range 0-255
yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0]) # equalize Y channel histogram
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB) # convert YUV image to RGB
def replicate(im, labels):
# Replicate labels
h, w = im.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b] # im4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return im, labels
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = im.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better val mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return im, ratio, (dw, dh)
def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(0.1, 0.1), scale=(0.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = im.shape[0] + border[0] * 2 # shape(h,w,c)
width = im.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -im.shape[1] / 2 # x translation (pixels)
C[1, 2] = -im.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(im[:, :, ::-1]) # base
# ax[1].imshow(im2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments: # warp segments
segments = resample_segments(segments) # upsample
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T # transform
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2] # perspective rescale or affine
# clip
new[i] = segment2box(xy, width, height)
else: # warp boxes
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8) # perspective rescale or affine
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# clip
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return im, targets
def copy_paste(im, labels, segments, p=0.5):
# Implement Copy-Paste augmentation https://arxiv.org/abs/2012.07177, labels as nx5 np.array(cls, xyxy)
n = len(segments)
if p and n:
h, w, c = im.shape # height, width, channels
im_new = np.zeros(im.shape, np.uint8)
for j in random.sample(range(n), k=round(p * n)):
l, s = labels[j], segments[j]
box = w - l[3], l[2], w - l[1], l[4]
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
if (ioa < 0.30).all(): # allow 30% obscuration of existing labels
labels = np.concatenate((labels, [[l[0], *box]]), 0)
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=im, src2=im_new)
result = cv2.flip(result, 1) # augment segments (flip left-right)
i = result > 0 # pixels to replace
# i[:, :] = result.max(2).reshape(h, w, 1) # act over ch
im[i] = result[i] # cv2.imwrite('debug.jpg', im) # debug
return im, labels, segments
def cutout(im, labels, p=0.5):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
if random.random() < p:
h, w = im.shape[:2]
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s)) # create random masks
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def mixup(im, labels, im2, labels2):
# Applies MixUp augmentation https://arxiv.org/pdf/1710.09412.pdf
r = np.random.beta(32.0, 32.0) # mixup ratio, alpha=beta=32.0
im = (im * r + im2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
return im, labels
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr) # candidates
| 42.071685
| 118
| 0.567388
|
import logging
import math
import random
import cv2
import numpy as np
from utils.general import check_version, colorstr, resample_segments, segment2box
from utils.metrics import bbox_ioa
class Albumentations:
def __init__(self):
self.transform = None
try:
import albumentations as A
check_version(A.__version__, '1.0.3', hard=True)
self.transform = A.Compose([
A.Blur(p=0.01),
A.MedianBlur(p=0.01),
A.ToGray(p=0.01),
A.CLAHE(p=0.01),
A.RandomBrightnessContrast(p=0.0),
A.RandomGamma(p=0.0),
A.ImageCompression(quality_lower=75, p=0.0)],
bbox_params=A.BboxParams(format='yolo', label_fields=['class_labels']))
logging.info(colorstr('albumentations: ') + ', '.join(f'{x}' for x in self.transform.transforms if x.p))
except ImportError:
pass
except Exception as e:
logging.info(colorstr('albumentations: ') + f'{e}')
def __call__(self, im, labels, p=1.0):
if self.transform and random.random() < p:
new = self.transform(image=im, bboxes=labels[:, 1:], class_labels=labels[:, 0])
im, labels = new['image'], np.array([[c, *b] for c, b in zip(new['class_labels'], new['bboxes'])])
return im, labels
def augment_hsv(im, hgain=0.5, sgain=0.5, vgain=0.5):
if hgain or sgain or vgain:
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1
hue, sat, val = cv2.split(cv2.cvtColor(im, cv2.COLOR_BGR2HSV))
dtype = im.dtype
x = np.arange(0, 256, dtype=r.dtype)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
im_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val)))
cv2.cvtColor(im_hsv, cv2.COLOR_HSV2BGR, dst=im)
def hist_equalize(im, clahe=True, bgr=False):
yuv = cv2.cvtColor(im, cv2.COLOR_BGR2YUV if bgr else cv2.COLOR_RGB2YUV)
if clahe:
c = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
yuv[:, :, 0] = c.apply(yuv[:, :, 0])
else:
yuv[:, :, 0] = cv2.equalizeHist(yuv[:, :, 0])
return cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR if bgr else cv2.COLOR_YUV2RGB)
def replicate(im, labels):
h, w = im.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2
for i in s.argsort()[:round(s.size * 0.5)]:
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw))
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
im[y1a:y2a, x1a:x2a] = im[y1b:y2b, x1b:x2b]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return im, labels
def letterbox(im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
shape = im.shape[:2]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup:
r = min(r, 1.0)
ratio = r, r
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]
if auto:
dw, dh = np.mod(dw, stride), np.mod(dh, stride)
elif scaleFill:
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0]
dw /= 2
dh /= 2
if shape[::-1] != new_unpad:
im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)
return im, ratio, (dw, dh)
def random_perspective(im, targets=(), segments=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0,
border=(0, 0)):
height = im.shape[0] + border[0] * 2
width = im.shape[1] + border[1] * 2
C = np.eye(3)
C[0, 2] = -im.shape[1] / 2
C[1, 2] = -im.shape[0] / 2
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective)
P[2, 1] = random.uniform(-perspective, perspective)
R = np.eye(3)
a = random.uniform(-degrees, degrees)
cale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180)
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height
M = T @ S @ R @ P @ C
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any():
if perspective:
im = cv2.warpPerspective(im, M, dsize=(width, height), borderValue=(114, 114, 114))
else:
im = cv2.warpAffine(im, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
n = len(targets)
if n:
use_segments = any(x.any() for x in segments)
new = np.zeros((n, 4))
if use_segments:
segments = resample_segments(segments)
for i, segment in enumerate(segments):
xy = np.ones((len(segment), 3))
xy[:, :2] = segment
xy = xy @ M.T
xy = xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]
new[i] = segment2box(xy, width, height)
else:
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2)
xy = xy @ M.T
xy = (xy[:, :2] / xy[:, 2:3] if perspective else xy[:, :2]).reshape(n, 8)
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
new = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
new[:, [0, 2]] = new[:, [0, 2]].clip(0, width)
new[:, [1, 3]] = new[:, [1, 3]].clip(0, height)
i = box_candidates(box1=targets[:, 1:5].T * s, box2=new.T, area_thr=0.01 if use_segments else 0.10)
targets = targets[i]
targets[:, 1:5] = new[i]
return im, targets
def copy_paste(im, labels, segments, p=0.5):
n = len(segments)
if p and n:
h, w, c = im.shape
im_new = np.zeros(im.shape, np.uint8)
for j in random.sample(range(n), k=round(p * n)):
l, s = labels[j], segments[j]
box = w - l[3], l[2], w - l[1], l[4]
ioa = bbox_ioa(box, labels[:, 1:5])
if (ioa < 0.30).all():
labels = np.concatenate((labels, [[l[0], *box]]), 0)
segments.append(np.concatenate((w - s[:, 0:1], s[:, 1:2]), 1))
cv2.drawContours(im_new, [segments[j].astype(np.int32)], -1, (255, 255, 255), cv2.FILLED)
result = cv2.bitwise_and(src1=im, src2=im_new)
result = cv2.flip(result, 1)
i = result > 0
] = result[i] eturn im, labels, segments
def cutout(im, labels, p=0.5):
if random.random() < p:
h, w = im.shape[:2]
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
im[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5])
labels = labels[ioa < 0.60]
return labels
def mixup(im, labels, im2, labels2):
r = np.random.beta(32.0, 32.0)
im = (im * r + im2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
return im, labels
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.1, eps=1e-16):
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + eps), h2 / (w2 + eps))
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + eps) > area_thr) & (ar < ar_thr)
| true
| true
|
1c3e67025a83965a9ac3f68cba1fe13e7efbd30d
| 1,031
|
py
|
Python
|
exercicio 068.py
|
rayanesousa31/Python-Curso-em-video-Mundo-2
|
9f962557b5a373bd2b45509d8990d0658effce9c
|
[
"MIT"
] | null | null | null |
exercicio 068.py
|
rayanesousa31/Python-Curso-em-video-Mundo-2
|
9f962557b5a373bd2b45509d8990d0658effce9c
|
[
"MIT"
] | null | null | null |
exercicio 068.py
|
rayanesousa31/Python-Curso-em-video-Mundo-2
|
9f962557b5a373bd2b45509d8990d0658effce9c
|
[
"MIT"
] | null | null | null |
#Faça um programa que jogue PAR ou IMPAR com o computador.
#O jogo será interrompido quando o jogador PERDER,mostrando
#o total de vitórias consecutivas que ele conquistou no
#final do jogo.
from random import randint
from time import sleep
print('Vamos jogar IMPAR ou par'),sleep(2)
cont = 0
while True:
jogador = int(input('Digite um número '))
comp = randint(1,10)
jogo = comp + jogador
opção = ' '
while opção not in 'PI':
opção = str(input('Você escolhe impar ou par? [I / P] ')).upper().split()[0]
print(f'Você jogou {jogador} e o computador {comp}. Total de {jogo}')
if opção == 'p':
if jogo % 2 == 0:
print('Parabéns, você venceu')
cont += 1
else:
print('Você perdeu')
break
elif opção == 'I':
if jogo % 2 == 1:
print('Você venceu')
cont += 1
else:
print('Você perdeu')
break
print('Vamos jogar novamente')
print(f'Você venceu {cont} vezes.')
| 27.864865
| 85
| 0.57711
|
from random import randint
from time import sleep
print('Vamos jogar IMPAR ou par'),sleep(2)
cont = 0
while True:
jogador = int(input('Digite um número '))
comp = randint(1,10)
jogo = comp + jogador
opção = ' '
while opção not in 'PI':
opção = str(input('Você escolhe impar ou par? [I / P] ')).upper().split()[0]
print(f'Você jogou {jogador} e o computador {comp}. Total de {jogo}')
if opção == 'p':
if jogo % 2 == 0:
print('Parabéns, você venceu')
cont += 1
else:
print('Você perdeu')
break
elif opção == 'I':
if jogo % 2 == 1:
print('Você venceu')
cont += 1
else:
print('Você perdeu')
break
print('Vamos jogar novamente')
print(f'Você venceu {cont} vezes.')
| true
| true
|
1c3e6793967ef5707643ead3c0be75ed24cd0414
| 10,245
|
py
|
Python
|
api_tests/nodes/views/test_node_links_detail.py
|
hmoco/osf.io
|
a02869f9b5c198bafae7cea0c216674bbcba62f7
|
[
"Apache-2.0"
] | 1
|
2015-10-02T18:35:53.000Z
|
2015-10-02T18:35:53.000Z
|
api_tests/nodes/views/test_node_links_detail.py
|
hmoco/osf.io
|
a02869f9b5c198bafae7cea0c216674bbcba62f7
|
[
"Apache-2.0"
] | 4
|
2016-05-13T14:24:16.000Z
|
2017-03-30T15:28:31.000Z
|
api_tests/nodes/views/test_node_links_detail.py
|
hmoco/osf.io
|
a02869f9b5c198bafae7cea0c216674bbcba62f7
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from nose.tools import * # flake8: noqa
from urlparse import urlparse
from framework.auth.core import Auth
from website.models import NodeLog
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
)
from tests.utils import assert_logs
node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id)
class TestNodeLinkDetail(ApiTestCase):
def setUp(self):
super(TestNodeLinkDetail, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer = self.private_project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.private_project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(creator=self.user, is_public=True)
self.public_pointer_project = ProjectFactory(is_public=True)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_returns_embedded_public_node_pointer_detail_logged_out(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_public_node_pointer_detail_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_private_node_pointer_detail_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 200)
target_node = res.json['data']['embeds']['target_node']
assert_in('errors', target_node)
assert_equal(target_node['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_returns_private_node_pointer_detail_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
def test_returns_private_node_pointer_detail_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 200)
target_node = res.json['data']['embeds']['target_node']
assert_in('errors', target_node)
assert_equal(target_node['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_self_link_points_to_node_link_detail_url(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = res.json['data']['links']['self']
assert_in(self.public_url, url)
def test_node_links_bad_version(self):
url = '{}?version=2.1'.format(self.public_url)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'This feature is deprecated as of version 2.1')
class TestDeleteNodeLink(ApiTestCase):
def setUp(self):
super(TestDeleteNodeLink, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=True)
self.pointer = self.project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_delete_node_link_no_permissions_for_target_node(self):
pointer_project = ProjectFactory(creator=self.user_two, is_public=False)
pointer = self.public_project.add_pointer(pointer_project, auth=Auth(self.user), save=True)
assert_in(pointer.child, self.public_project.nodes)
url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, pointer._id)
res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 204)
self.public_project.reload()
assert_not_in(pointer, self.public_project.nodes)
def test_cannot_delete_if_registration(self):
registration = RegistrationFactory(project=self.public_project)
url = '/{}registrations/{}/node_links/'.format(
API_BASE,
registration._id,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
pointer_id = res.json['data'][0]['id']
url = '/{}registrations/{}/node_links/{}/'.format(
API_BASE,
registration._id,
pointer_id,
)
res = self.app.delete(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 405)
def test_deletes_public_node_pointer_logged_out(self):
res = self.app.delete(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0].keys())
def test_deletes_public_node_pointer_fails_if_bad_auth(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user_two.auth, expect_errors=True)
# This is could arguably be a 405, but we don't need to go crazy with status codes
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
self.public_project.reload()
assert_equal(node_count_before, len(self.public_project.nodes_pointer))
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_deletes_public_node_pointer_succeeds_as_owner(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 204)
assert_equal(node_count_before - 1, len(self.public_project.nodes_pointer))
def test_deletes_private_node_pointer_logged_out(self):
res = self.app.delete(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_deletes_private_node_pointer_logged_in_contributor(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
assert_equal(len(self.project.nodes_pointer), 0)
def test_deletes_private_node_pointer_logged_in_non_contributor(self):
res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_return_deleted_public_node_pointer(self):
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.public_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_return_deleted_private_node_pointer(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Regression test for https://openscience.atlassian.net/browse/OSF-4322
def test_delete_link_that_is_not_linked_to_correct_node(self):
project = ProjectFactory(creator=self.user)
# The node link belongs to a different project
res = self.app.delete(
'/{}nodes/{}/node_links/{}/'.format(API_BASE, project._id, self.public_pointer._id),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 404)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], 'Not found.')
| 48.098592
| 121
| 0.686774
|
from nose.tools import *
from urlparse import urlparse
from framework.auth.core import Auth
from website.models import NodeLog
from api.base.settings.defaults import API_BASE
from tests.base import ApiTestCase
from osf_tests.factories import (
ProjectFactory,
RegistrationFactory,
AuthUserFactory,
)
from tests.utils import assert_logs
node_url_for = lambda n_id: '/{}nodes/{}/'.format(API_BASE, n_id)
class TestNodeLinkDetail(ApiTestCase):
def setUp(self):
super(TestNodeLinkDetail, self).setUp()
self.user = AuthUserFactory()
self.private_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=False)
self.pointer = self.private_project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.private_project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(creator=self.user, is_public=True)
self.public_pointer_project = ProjectFactory(is_public=True)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_returns_embedded_public_node_pointer_detail_logged_out(self):
res = self.app.get(self.public_url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
res_json = res.json['data']
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_public_node_pointer_detail_logged_in(self):
res = self.app.get(self.public_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.public_pointer_project._id)
def test_returns_private_node_pointer_detail_logged_out(self):
res = self.app.get(self.private_url, expect_errors=True)
assert_equal(res.status_code, 200)
target_node = res.json['data']['embeds']['target_node']
assert_in('errors', target_node)
assert_equal(target_node['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_returns_private_node_pointer_detail_logged_in_contributor(self):
res = self.app.get(self.private_url, auth=self.user.auth)
res_json = res.json['data']
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
embedded = res_json['embeds']['target_node']['data']['id']
assert_equal(embedded, self.pointer_project._id)
def test_returns_private_node_pointer_detail_logged_in_non_contributor(self):
res = self.app.get(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 200)
target_node = res.json['data']['embeds']['target_node']
assert_in('errors', target_node)
assert_equal(target_node['errors'][0]['detail'], 'You do not have permission to perform this action.')
def test_self_link_points_to_node_link_detail_url(self):
res = self.app.get(self.public_url, auth=self.user.auth)
assert_equal(res.status_code, 200)
url = res.json['data']['links']['self']
assert_in(self.public_url, url)
def test_node_links_bad_version(self):
url = '{}?version=2.1'.format(self.public_url)
res = self.app.get(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
assert_equal(res.json['errors'][0]['detail'], 'This feature is deprecated as of version 2.1')
class TestDeleteNodeLink(ApiTestCase):
def setUp(self):
super(TestDeleteNodeLink, self).setUp()
self.user = AuthUserFactory()
self.project = ProjectFactory(creator=self.user, is_public=False)
self.pointer_project = ProjectFactory(creator=self.user, is_public=True)
self.pointer = self.project.add_pointer(self.pointer_project, auth=Auth(self.user), save=True)
self.private_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.project._id, self.pointer._id)
self.user_two = AuthUserFactory()
self.public_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer_project = ProjectFactory(is_public=True, creator=self.user)
self.public_pointer = self.public_project.add_pointer(self.public_pointer_project,
auth=Auth(self.user),
save=True)
self.public_url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, self.public_pointer._id)
def test_delete_node_link_no_permissions_for_target_node(self):
pointer_project = ProjectFactory(creator=self.user_two, is_public=False)
pointer = self.public_project.add_pointer(pointer_project, auth=Auth(self.user), save=True)
assert_in(pointer.child, self.public_project.nodes)
url = '/{}nodes/{}/node_links/{}/'.format(API_BASE, self.public_project._id, pointer._id)
res = self.app.delete_json_api(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 204)
self.public_project.reload()
assert_not_in(pointer, self.public_project.nodes)
def test_cannot_delete_if_registration(self):
registration = RegistrationFactory(project=self.public_project)
url = '/{}registrations/{}/node_links/'.format(
API_BASE,
registration._id,
)
res = self.app.get(url, auth=self.user.auth)
assert_equal(res.status_code, 200)
pointer_id = res.json['data'][0]['id']
url = '/{}registrations/{}/node_links/{}/'.format(
API_BASE,
registration._id,
pointer_id,
)
res = self.app.delete(url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 405)
def test_deletes_public_node_pointer_logged_out(self):
res = self.app.delete(self.public_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0].keys())
def test_deletes_public_node_pointer_fails_if_bad_auth(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
self.public_project.reload()
assert_equal(node_count_before, len(self.public_project.nodes_pointer))
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_deletes_public_node_pointer_succeeds_as_owner(self):
node_count_before = len(self.public_project.nodes_pointer)
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload()
assert_equal(res.status_code, 204)
assert_equal(node_count_before - 1, len(self.public_project.nodes_pointer))
def test_deletes_private_node_pointer_logged_out(self):
res = self.app.delete(self.private_url, expect_errors=True)
assert_equal(res.status_code, 401)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_deletes_private_node_pointer_logged_in_contributor(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
assert_equal(len(self.project.nodes_pointer), 0)
def test_deletes_private_node_pointer_logged_in_non_contributor(self):
res = self.app.delete(self.private_url, auth=self.user_two.auth, expect_errors=True)
assert_equal(res.status_code, 403)
assert_in('detail', res.json['errors'][0])
@assert_logs(NodeLog.POINTER_REMOVED, 'public_project')
def test_return_deleted_public_node_pointer(self):
res = self.app.delete(self.public_url, auth=self.user.auth)
self.public_project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.public_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
@assert_logs(NodeLog.POINTER_REMOVED, 'project')
def test_return_deleted_private_node_pointer(self):
res = self.app.delete(self.private_url, auth=self.user.auth)
self.project.reload() # Update the model to reflect changes made by post request
assert_equal(res.status_code, 204)
#check that deleted pointer can not be returned
res = self.app.get(self.private_url, auth=self.user.auth, expect_errors=True)
assert_equal(res.status_code, 404)
# Regression test for https://openscience.atlassian.net/browse/OSF-4322
def test_delete_link_that_is_not_linked_to_correct_node(self):
project = ProjectFactory(creator=self.user)
# The node link belongs to a different project
res = self.app.delete(
'/{}nodes/{}/node_links/{}/'.format(API_BASE, project._id, self.public_pointer._id),
auth=self.user.auth,
expect_errors=True
)
assert_equal(res.status_code, 404)
errors = res.json['errors']
assert_equal(len(errors), 1)
assert_equal(errors[0]['detail'], 'Not found.')
| true
| true
|
1c3e69669f57fe674a926d9ade62e5797dbedaae
| 1,266
|
py
|
Python
|
polimorfo/datasets/utils.py
|
chrisPiemonte/polimorfo
|
79e2178dbc21fe3f98e8d84d23f33b818244ab08
|
[
"Apache-2.0"
] | null | null | null |
polimorfo/datasets/utils.py
|
chrisPiemonte/polimorfo
|
79e2178dbc21fe3f98e8d84d23f33b818244ab08
|
[
"Apache-2.0"
] | null | null | null |
polimorfo/datasets/utils.py
|
chrisPiemonte/polimorfo
|
79e2178dbc21fe3f98e8d84d23f33b818244ab08
|
[
"Apache-2.0"
] | null | null | null |
from PIL import Image
from functools import partial
import requests
from io import BytesIO
import multiprocessing
from multiprocessing.dummy import Pool
from tqdm.autonotebook import tqdm
import logging
log = logging.getLogger(__name__)
def download_image(url_path):
"""download an image and save to the destination path
Arguments:
url {str} -- the url of the image
path {Path} -- the base path where the images shold be saved
Keyword Arguments:
timeout {int} -- the timeout in seconds (default: {1})
"""
url, path = url_path
if path.exists():
return
try:
response = requests.get(url, timeout=15, allow_redirects=False)
if response.ok:
img = Image.open(BytesIO(response.content)).convert('RGB')
img.save(path, "JPEG", optimize=True)
response.close()
except Exception as ex:
log.debug('error processing the url %s' % (url), ex)
def process_images(urls_filepath, timeout):
parallelism = multiprocessing.cpu_count() // 2
with Pool(parallelism) as pool:
with tqdm(total=len(urls_filepath), desc='download images') as pbar:
for _ in pool.imap_unordered(download_image, urls_filepath):
pbar.update()
| 29.44186
| 76
| 0.669036
|
from PIL import Image
from functools import partial
import requests
from io import BytesIO
import multiprocessing
from multiprocessing.dummy import Pool
from tqdm.autonotebook import tqdm
import logging
log = logging.getLogger(__name__)
def download_image(url_path):
url, path = url_path
if path.exists():
return
try:
response = requests.get(url, timeout=15, allow_redirects=False)
if response.ok:
img = Image.open(BytesIO(response.content)).convert('RGB')
img.save(path, "JPEG", optimize=True)
response.close()
except Exception as ex:
log.debug('error processing the url %s' % (url), ex)
def process_images(urls_filepath, timeout):
parallelism = multiprocessing.cpu_count() // 2
with Pool(parallelism) as pool:
with tqdm(total=len(urls_filepath), desc='download images') as pbar:
for _ in pool.imap_unordered(download_image, urls_filepath):
pbar.update()
| true
| true
|
1c3e6a3d37aeaa774ede07ce507303532ab60719
| 888
|
py
|
Python
|
pixelate/__init__.py
|
useless-tools/pixelate
|
5e964c1a77780f933db20b1424807e59e899a427
|
[
"BSD-3-Clause"
] | 23
|
2017-10-18T15:31:30.000Z
|
2022-02-01T14:50:28.000Z
|
pixelate/__init__.py
|
useless-tools/pixelate
|
5e964c1a77780f933db20b1424807e59e899a427
|
[
"BSD-3-Clause"
] | 1
|
2021-04-06T05:15:29.000Z
|
2022-02-18T15:07:06.000Z
|
pixelate/__init__.py
|
useless-tools/pixelate
|
5e964c1a77780f933db20b1424807e59e899a427
|
[
"BSD-3-Clause"
] | 5
|
2018-11-30T21:05:25.000Z
|
2021-12-23T23:47:43.000Z
|
from PIL import Image
def pixelate(input_file_path: str, output_file_path: str, pixel_size: int):
"""
Create a pixel image from the input image.
Args:
input_file_path: the path to the source image file to be processed.
output_file_path: the path to the result file.
pixel_size: pixel size.
Raises:
FileNotFoundError: if `input_file_path` does not exist.
TypeError: if `pixel_size` is not int.
ValueError: if `pixel_size` is not correct int.
"""
with Image.open(input_file_path) as image:
image = image.resize(
(image.size[0] // pixel_size, image.size[1] // pixel_size),
Image.NEAREST
)
image = image.resize(
(image.size[0] * pixel_size, image.size[1] * pixel_size),
Image.NEAREST
)
image.save(output_file_path)
| 30.62069
| 75
| 0.611486
|
from PIL import Image
def pixelate(input_file_path: str, output_file_path: str, pixel_size: int):
with Image.open(input_file_path) as image:
image = image.resize(
(image.size[0] // pixel_size, image.size[1] // pixel_size),
Image.NEAREST
)
image = image.resize(
(image.size[0] * pixel_size, image.size[1] * pixel_size),
Image.NEAREST
)
image.save(output_file_path)
| true
| true
|
1c3e6c6bab0854f4b3751c24a38dc7909dfdff70
| 5,736
|
py
|
Python
|
src/pop3sf/adapters/DirectoryAdapterBase.py
|
vitlabuda/pop3sf
|
3792b98da329fa8091308f3363808a499af58ad7
|
[
"BSD-3-Clause"
] | 1
|
2022-03-16T18:58:19.000Z
|
2022-03-16T18:58:19.000Z
|
src/pop3sf/adapters/DirectoryAdapterBase.py
|
vitlabuda/pop3sf
|
3792b98da329fa8091308f3363808a499af58ad7
|
[
"BSD-3-Clause"
] | null | null | null |
src/pop3sf/adapters/DirectoryAdapterBase.py
|
vitlabuda/pop3sf
|
3792b98da329fa8091308f3363808a499af58ad7
|
[
"BSD-3-Clause"
] | 1
|
2022-03-17T18:01:43.000Z
|
2022-03-17T18:01:43.000Z
|
# SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2021 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
from typing import List
import abc
import time
import os
import os.path
import glob
import fcntl
import hashlib
from .AdapterAuxiliaries import AdapterAuxiliaries
from .AdapterBase import AdapterBase
class DirectoryAdapterBase(AdapterBase, metaclass=abc.ABCMeta):
class _MessageIndexEntry:
def __init__(self, path: str, last_modified_epoch: float, is_plaintext: bool):
self.path: str = path
self.last_modified_epoch: float = last_modified_epoch
self.last_modified: time.struct_time = time.gmtime(last_modified_epoch)
self.is_plaintext: bool = is_plaintext
self.marked_as_deleted: bool = False
def __init__(self, directory_path: str):
self._directory_path: str = directory_path
os.makedirs(directory_path, exist_ok=True)
self._message_index: List[DirectoryAdapterBase._MessageIndexEntry] = []
def login_successful(self, username: str, read_only: bool) -> None:
self._message_index = self._generate_message_index()
@abc.abstractmethod
def _generate_message_index(self) -> List[DirectoryAdapterBase._MessageIndexEntry]:
raise NotImplementedError("The _generate_message_index() method must be overridden prior to calling it!")
def get_message_count(self) -> int:
return len(self._message_index)
def get_message_content(self, index: int, encoding: str) -> str:
message = self._message_index[index]
with open(message.path) as file:
fcntl.flock(file.fileno(), fcntl.LOCK_SH)
content = file.read()
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
if message.is_plaintext:
subject = "Plaintext file {}".format(self.get_message_unique_id(index)[0:8])
from_to = AdapterAuxiliaries.generate_from_to_email_address("nobody")
return AdapterAuxiliaries.wrap_plaintext_in_email(content, subject, from_to, from_to, message.last_modified)
return content
def is_message_marked_as_deleted(self, index: int) -> bool:
return self._message_index[index].marked_as_deleted
def mark_message_as_deleted(self, index: int) -> None:
self._message_index[index].marked_as_deleted = True
def unmark_messages_marked_as_deleted(self) -> None:
for message in self._message_index:
message.marked_as_deleted = False
def commit_deletions(self) -> None:
for message in self._message_index:
if message.marked_as_deleted:
os.remove(message.path)
def get_message_unique_id(self, index: int) -> str:
# The file's path is always unique and shouldn't change (at least this program doesn't move the file)
# The last modified timestamp is used to detect changes
message = self._message_index[index]
hashed_string = "{}{}".format(message.path, message.last_modified_epoch).encode("utf-8")
return hashlib.sha256(hashed_string).hexdigest()
def _generate_message_index_using_full_directory_path(self, full_directory_path: str) -> List[DirectoryAdapterBase._MessageIndexEntry]:
message_index = []
paths = sorted(glob.glob(os.path.join(full_directory_path, "*")))
absolute_file_paths_iter = filter(os.path.isfile, map(os.path.abspath, paths))
for filepath in absolute_file_paths_iter:
last_modified_epoch = os.stat(filepath).st_mtime
if filepath.endswith(".eml"):
message_index.append(DirectoryAdapterBase._MessageIndexEntry(filepath, last_modified_epoch, False))
elif filepath.endswith(".txt"):
message_index.append(DirectoryAdapterBase._MessageIndexEntry(filepath, last_modified_epoch, True))
# The messages are sorted by last modification dates of their files; thus, if a new message is added, it will be at the end of the message list.
# If more messages have the same modification date (this is very rare, though), those messages are sorted by their filenames.
message_index.sort(key=lambda item: item.last_modified_epoch)
return message_index
| 47.404959
| 152
| 0.731695
|
from __future__ import annotations
from typing import List
import abc
import time
import os
import os.path
import glob
import fcntl
import hashlib
from .AdapterAuxiliaries import AdapterAuxiliaries
from .AdapterBase import AdapterBase
class DirectoryAdapterBase(AdapterBase, metaclass=abc.ABCMeta):
class _MessageIndexEntry:
def __init__(self, path: str, last_modified_epoch: float, is_plaintext: bool):
self.path: str = path
self.last_modified_epoch: float = last_modified_epoch
self.last_modified: time.struct_time = time.gmtime(last_modified_epoch)
self.is_plaintext: bool = is_plaintext
self.marked_as_deleted: bool = False
def __init__(self, directory_path: str):
self._directory_path: str = directory_path
os.makedirs(directory_path, exist_ok=True)
self._message_index: List[DirectoryAdapterBase._MessageIndexEntry] = []
def login_successful(self, username: str, read_only: bool) -> None:
self._message_index = self._generate_message_index()
@abc.abstractmethod
def _generate_message_index(self) -> List[DirectoryAdapterBase._MessageIndexEntry]:
raise NotImplementedError("The _generate_message_index() method must be overridden prior to calling it!")
def get_message_count(self) -> int:
return len(self._message_index)
def get_message_content(self, index: int, encoding: str) -> str:
message = self._message_index[index]
with open(message.path) as file:
fcntl.flock(file.fileno(), fcntl.LOCK_SH)
content = file.read()
fcntl.flock(file.fileno(), fcntl.LOCK_UN)
if message.is_plaintext:
subject = "Plaintext file {}".format(self.get_message_unique_id(index)[0:8])
from_to = AdapterAuxiliaries.generate_from_to_email_address("nobody")
return AdapterAuxiliaries.wrap_plaintext_in_email(content, subject, from_to, from_to, message.last_modified)
return content
def is_message_marked_as_deleted(self, index: int) -> bool:
return self._message_index[index].marked_as_deleted
def mark_message_as_deleted(self, index: int) -> None:
self._message_index[index].marked_as_deleted = True
def unmark_messages_marked_as_deleted(self) -> None:
for message in self._message_index:
message.marked_as_deleted = False
def commit_deletions(self) -> None:
for message in self._message_index:
if message.marked_as_deleted:
os.remove(message.path)
def get_message_unique_id(self, index: int) -> str:
# The last modified timestamp is used to detect changes
message = self._message_index[index]
hashed_string = "{}{}".format(message.path, message.last_modified_epoch).encode("utf-8")
return hashlib.sha256(hashed_string).hexdigest()
def _generate_message_index_using_full_directory_path(self, full_directory_path: str) -> List[DirectoryAdapterBase._MessageIndexEntry]:
message_index = []
paths = sorted(glob.glob(os.path.join(full_directory_path, "*")))
absolute_file_paths_iter = filter(os.path.isfile, map(os.path.abspath, paths))
for filepath in absolute_file_paths_iter:
last_modified_epoch = os.stat(filepath).st_mtime
if filepath.endswith(".eml"):
message_index.append(DirectoryAdapterBase._MessageIndexEntry(filepath, last_modified_epoch, False))
elif filepath.endswith(".txt"):
message_index.append(DirectoryAdapterBase._MessageIndexEntry(filepath, last_modified_epoch, True))
# The messages are sorted by last modification dates of their files; thus, if a new message is added, it will be at the end of the message list.
# If more messages have the same modification date (this is very rare, though), those messages are sorted by their filenames.
message_index.sort(key=lambda item: item.last_modified_epoch)
return message_index
| true
| true
|
1c3e6d3b562e4192a449eec58aedf7c4a9e1563e
| 2,568
|
py
|
Python
|
environment/script/edit_dot_graph.py
|
computationalgeography/lue
|
71993169bae67a9863d7bd7646d207405dc6f767
|
[
"MIT"
] | 2
|
2021-02-26T22:45:56.000Z
|
2021-05-02T10:28:48.000Z
|
environment/script/edit_dot_graph.py
|
pcraster/lue
|
e64c18f78a8b6d8a602b7578a2572e9740969202
|
[
"MIT"
] | 262
|
2016-08-11T10:12:02.000Z
|
2020-10-13T18:09:16.000Z
|
environment/script/edit_dot_graph.py
|
computationalgeography/lue
|
71993169bae67a9863d7bd7646d207405dc6f767
|
[
"MIT"
] | 1
|
2020-03-11T09:49:41.000Z
|
2020-03-11T09:49:41.000Z
|
#!/usr/bin/env python
import functools
import os.path
import re
import sys
import traceback
import docopt
def checked_call(
function):
@functools.wraps(function)
def wrapper(
*args,
**kwargs):
result = 0
try:
result = function(*args, **kwargs)
except:
traceback.print_exc(file=sys.stderr)
result = 1
return 0 if result is None else result
return wrapper
doc_string = """\
Edit a dot formatted graph
Usage:
{command} [--output=<file>] node add_attribute
<graph_name> <node_name> <attribute_name> <attribute_value>
Options:
-h --help Show this screen
--version Show version
--output=<file> Name of file to store result in
graph_name Pathname of file containing graph
node_name Name of node to update
attribute_name Name of attribute
attribute_value Value of attribute
""".format(
command=os.path.basename(sys.argv[0]))
@checked_call
def add_attribute(
graph_name,
output_name,
node_name,
attribute_name,
attribute_value):
graph = open(graph_name, "r").read()
snippet = "{attribute}={value}".format(
attribute=attribute_name,
value=attribute_value)
pattern = r"^\s*{node}\s*\[".format(node=node_name)
def update_node(
match_object):
return "{match}\n{indent}{attribute}".format(
match=match_object.group(0),
indent=8 * " ",
attribute=snippet)
graph, nr_subs = re.subn(pattern, update_node, graph, flags=re.MULTILINE)
if nr_subs == 0:
raise RuntimeError("node '{}' was not found".format(node_name))
elif nr_subs > 1:
raise RuntimeError("node '{}' was found multiple times".format(
node_name))
stream = sys.stdout if output_name is None else open(output_name, "w")
stream.write(graph)
if __name__ == "__main__":
arguments = docopt.docopt(doc_string)
output_name = arguments["--output"]
if arguments["node"]:
if arguments["add_attribute"]:
graph_name = arguments["<graph_name>"]
node_name = arguments["<node_name>"]
attribute_name = arguments["<attribute_name>"]
attribute_value = arguments["<attribute_value>"]
function = add_attribute
arguments = (
graph_name, output_name, node_name, attribute_name,
attribute_value
)
sys.exit(function(*arguments))
| 26.474227
| 77
| 0.608645
|
import functools
import os.path
import re
import sys
import traceback
import docopt
def checked_call(
function):
@functools.wraps(function)
def wrapper(
*args,
**kwargs):
result = 0
try:
result = function(*args, **kwargs)
except:
traceback.print_exc(file=sys.stderr)
result = 1
return 0 if result is None else result
return wrapper
doc_string = """\
Edit a dot formatted graph
Usage:
{command} [--output=<file>] node add_attribute
<graph_name> <node_name> <attribute_name> <attribute_value>
Options:
-h --help Show this screen
--version Show version
--output=<file> Name of file to store result in
graph_name Pathname of file containing graph
node_name Name of node to update
attribute_name Name of attribute
attribute_value Value of attribute
""".format(
command=os.path.basename(sys.argv[0]))
@checked_call
def add_attribute(
graph_name,
output_name,
node_name,
attribute_name,
attribute_value):
graph = open(graph_name, "r").read()
snippet = "{attribute}={value}".format(
attribute=attribute_name,
value=attribute_value)
pattern = r"^\s*{node}\s*\[".format(node=node_name)
def update_node(
match_object):
return "{match}\n{indent}{attribute}".format(
match=match_object.group(0),
indent=8 * " ",
attribute=snippet)
graph, nr_subs = re.subn(pattern, update_node, graph, flags=re.MULTILINE)
if nr_subs == 0:
raise RuntimeError("node '{}' was not found".format(node_name))
elif nr_subs > 1:
raise RuntimeError("node '{}' was found multiple times".format(
node_name))
stream = sys.stdout if output_name is None else open(output_name, "w")
stream.write(graph)
if __name__ == "__main__":
arguments = docopt.docopt(doc_string)
output_name = arguments["--output"]
if arguments["node"]:
if arguments["add_attribute"]:
graph_name = arguments["<graph_name>"]
node_name = arguments["<node_name>"]
attribute_name = arguments["<attribute_name>"]
attribute_value = arguments["<attribute_value>"]
function = add_attribute
arguments = (
graph_name, output_name, node_name, attribute_name,
attribute_value
)
sys.exit(function(*arguments))
| true
| true
|
1c3e6de79b1a7d032b4af2b2d0cbd9ee93c15924
| 300
|
py
|
Python
|
chapter 8/sampleCode5.py
|
DTAIEB/Thoughtful-Data-Science
|
8b80e8f3e33b6fdc6672ecee1f27e0b983b28241
|
[
"Apache-2.0"
] | 15
|
2018-06-01T19:18:32.000Z
|
2021-11-28T03:31:35.000Z
|
chapter 8/sampleCode5.py
|
chshychen/Thoughtful-Data-Science
|
8b80e8f3e33b6fdc6672ecee1f27e0b983b28241
|
[
"Apache-2.0"
] | 1
|
2018-12-17T02:01:42.000Z
|
2018-12-17T02:01:42.000Z
|
chapter 8/sampleCode5.py
|
chshychen/Thoughtful-Data-Science
|
8b80e8f3e33b6fdc6672ecee1f27e0b983b28241
|
[
"Apache-2.0"
] | 10
|
2018-09-23T02:45:45.000Z
|
2022-03-12T15:32:05.000Z
|
import requests
databases = []
page = 1
while(page is not None):
payload = requests.get("https://www.quandl.com/api/v3/databases?api_key={}&page={}"\
.format(quandl.ApiConfig.api_key, page)).json()
databases += payload['databases']
page = payload['meta']['next_page']
| 33.333333
| 88
| 0.636667
|
import requests
databases = []
page = 1
while(page is not None):
payload = requests.get("https://www.quandl.com/api/v3/databases?api_key={}&page={}"\
.format(quandl.ApiConfig.api_key, page)).json()
databases += payload['databases']
page = payload['meta']['next_page']
| true
| true
|
1c3e6f3666a6ca5f3a023941fb3c4c1b80494304
| 1,399
|
py
|
Python
|
nuke_stubs/nuke/nuke_internal/scripts.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | 1
|
2022-01-12T01:29:16.000Z
|
2022-01-12T01:29:16.000Z
|
nuke_stubs/nuke/nuke_internal/scripts.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
nuke_stubs/nuke/nuke_internal/scripts.py
|
sisoe24/Nuke-Python-Stubs
|
79c53cf5cb7b38e15a34fd04f672b143d9d7dc85
|
[
"MIT"
] | null | null | null |
"""This module define the scriptSaveAndClear method for Nuke API.
nuke.scriptSaveAndClear will call nuke.scriptSave() if any changes were made and then calls nuke.scriptClear()
"""
import nuke_internal as nuke
def scriptSaveAndClear(filename=None, ignoreUnsavedChanges=False):
""" scriptSaveAndClear(filename=None, ignoreUnsavedChanges=False) -> None
Calls nuke.scriptSave and nuke.scriptClear
@param filename: Save to this file name without changing the script name in the
project.
@param ignoreUnsavedChanges: Optional. If set to True scripSave will be called,
ignoring any unsaved changes
@return: True when sucessful. False if the user cancels the operation. In this
case nuke.scripClear will not be called
"""
root = nuke.Root()
if not ignoreUnsavedChanges and root is not None and root.modified() and len(root.nodes()) > 0:
runScriptSave = False
if filename is None:
scriptName = ''
try:
scriptName = nuke.scriptName()
except RuntimeError:
scriptName = 'untitled'
try:
runScriptSave = nuke.askWithCancel( "Save changes to " + scriptName + " before closing?" )
except nuke.CancelledError:
return False
else:
runScriptSave = True
if runScriptSave:
try:
nuke.scriptSave( filename )
except RuntimeError:
return False
nuke.scriptClear()
return True
| 31.795455
| 110
| 0.706934
|
import nuke_internal as nuke
def scriptSaveAndClear(filename=None, ignoreUnsavedChanges=False):
root = nuke.Root()
if not ignoreUnsavedChanges and root is not None and root.modified() and len(root.nodes()) > 0:
runScriptSave = False
if filename is None:
scriptName = ''
try:
scriptName = nuke.scriptName()
except RuntimeError:
scriptName = 'untitled'
try:
runScriptSave = nuke.askWithCancel( "Save changes to " + scriptName + " before closing?" )
except nuke.CancelledError:
return False
else:
runScriptSave = True
if runScriptSave:
try:
nuke.scriptSave( filename )
except RuntimeError:
return False
nuke.scriptClear()
return True
| true
| true
|
1c3e6f796d22ad4a6d1d7dcf66a8c3466bf58805
| 427
|
py
|
Python
|
data/__init__.py
|
pishchalnikov/hacker-news-api-tests
|
773b3dfbaaa4675fcebb1421ddb9d35ad0bfa65f
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
pishchalnikov/hacker-news-api-tests
|
773b3dfbaaa4675fcebb1421ddb9d35ad0bfa65f
|
[
"MIT"
] | null | null | null |
data/__init__.py
|
pishchalnikov/hacker-news-api-tests
|
773b3dfbaaa4675fcebb1421ddb9d35ad0bfa65f
|
[
"MIT"
] | null | null | null |
import os
import jsonref
class DataJsonReader(dict):
def __init__(self, file_name):
base_path = os.path.dirname(os.path.abspath(__file__))
json_path = os.path.join(base_path, file_name)
base_uri = f"file://{base_path}/"
with open(json_path) as input_file:
self.update(jsonref.loads(input_file.read(),
base_uri=base_uri, jsonschema=True))
| 30.5
| 74
| 0.620609
|
import os
import jsonref
class DataJsonReader(dict):
def __init__(self, file_name):
base_path = os.path.dirname(os.path.abspath(__file__))
json_path = os.path.join(base_path, file_name)
base_uri = f"file://{base_path}/"
with open(json_path) as input_file:
self.update(jsonref.loads(input_file.read(),
base_uri=base_uri, jsonschema=True))
| true
| true
|
1c3e71024ceb589d01c4523ee29040d20dbbcdaf
| 1,567
|
py
|
Python
|
Trinket_Question_Block_Sound_Jewelry/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 665
|
2017-09-27T21:20:14.000Z
|
2022-03-31T09:09:25.000Z
|
Trinket_Question_Block_Sound_Jewelry/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 641
|
2017-10-03T19:46:37.000Z
|
2022-03-30T18:28:46.000Z
|
Trinket_Question_Block_Sound_Jewelry/code.py
|
gamblor21/Adafruit_Learning_System_Guides
|
f5dab4a758bc82d0bfc3c299683fe89dc093912a
|
[
"MIT"
] | 734
|
2017-10-02T22:47:38.000Z
|
2022-03-30T14:03:51.000Z
|
# SPDX-FileCopyrightText: 2017 Limor Fried/ladyada for Adafruit Industries
# SPDX-FileCopyrightText: 2018 Mikey Sklar for Adafruit Industries
#
# SPDX-License-Identifier: MIT
import time
import board
import simpleio
import pwmio
import digitalio
# PWM is not available on Trinket D1
vibration_pin = board.D1 # vibration switch is connected
speaker_pin = board.D2 # PWM speaker
pwm_leds = board.D4 # PWM "fading" LEDs
# initialize PWM for LEDs
pwm = pwmio.PWMOut(pwm_leds, frequency=256, duty_cycle=50)
led_fade_delay = .001 # delay in seconds makes color fade visible
led_fade_step = 1024 # fade amount
# initialize vibration sensor
vpin = digitalio.DigitalInOut(vibration_pin)
vpin.direction = digitalio.Direction.INPUT
vpin.pull = digitalio.Pull.UP
def led_fade(brightness):
pwm.duty_cycle = brightness
brightness_start = brightness
while brightness >= (brightness_start / 2):
brightness -= led_fade_step
pwm.duty_cycle = brightness
time.sleep(led_fade_delay)
while True:
# wait for vibration sensor detect (reverse logic)
# play Super Mario Bros. coin sound
# fade LEDs
if not vpin.value:
led_fade((2 ** 16) - 1) # full brightness
simpleio.tone(speaker_pin, 988, 0.083) # tone1 - B5
led_fade(2 ** 15) # half brightness
simpleio.tone(speaker_pin, 1319, 0.83) # tone2 - E6
led_fade(2 ** 14) # quarter brightness
pwm.duty_cycle = 0 # turn off LEDs
| 33.340426
| 74
| 0.668794
|
import time
import board
import simpleio
import pwmio
import digitalio
vibration_pin = board.D1
speaker_pin = board.D2
pwm_leds = board.D4
pwm = pwmio.PWMOut(pwm_leds, frequency=256, duty_cycle=50)
led_fade_delay = .001
led_fade_step = 1024
vpin = digitalio.DigitalInOut(vibration_pin)
vpin.direction = digitalio.Direction.INPUT
vpin.pull = digitalio.Pull.UP
def led_fade(brightness):
pwm.duty_cycle = brightness
brightness_start = brightness
while brightness >= (brightness_start / 2):
brightness -= led_fade_step
pwm.duty_cycle = brightness
time.sleep(led_fade_delay)
while True:
if not vpin.value:
led_fade((2 ** 16) - 1)
simpleio.tone(speaker_pin, 988, 0.083)
led_fade(2 ** 15)
simpleio.tone(speaker_pin, 1319, 0.83)
led_fade(2 ** 14)
pwm.duty_cycle = 0
| true
| true
|
1c3e71085a82468f1cdb29c08537996c0482b531
| 8,168
|
py
|
Python
|
vericite_lms_client/models/report_meta_data.py
|
vericite/vericite_api_python
|
1ffef5c7900c534c74b681254afe204260e0326c
|
[
"Apache-2.0"
] | null | null | null |
vericite_lms_client/models/report_meta_data.py
|
vericite/vericite_api_python
|
1ffef5c7900c534c74b681254afe204260e0326c
|
[
"Apache-2.0"
] | null | null | null |
vericite_lms_client/models/report_meta_data.py
|
vericite/vericite_api_python
|
1ffef5c7900c534c74b681254afe204260e0326c
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
VeriCiteLmsApiV1
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class ReportMetaData(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, assignment_title=None, context_title=None, external_content_data=None, user_email=None, user_first_name=None, user_last_name=None, user_role=None):
"""
ReportMetaData - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'assignment_title': 'str',
'context_title': 'str',
'external_content_data': 'list[ExternalContentData]',
'user_email': 'str',
'user_first_name': 'str',
'user_last_name': 'str',
'user_role': 'str'
}
self.attribute_map = {
'assignment_title': 'assignmentTitle',
'context_title': 'contextTitle',
'external_content_data': 'externalContentData',
'user_email': 'userEmail',
'user_first_name': 'userFirstName',
'user_last_name': 'userLastName',
'user_role': 'userRole'
}
self._assignment_title = assignment_title
self._context_title = context_title
self._external_content_data = external_content_data
self._user_email = user_email
self._user_first_name = user_first_name
self._user_last_name = user_last_name
self._user_role = user_role
@property
def assignment_title(self):
"""
Gets the assignment_title of this ReportMetaData.
Title of Assignment
:return: The assignment_title of this ReportMetaData.
:rtype: str
"""
return self._assignment_title
@assignment_title.setter
def assignment_title(self, assignment_title):
"""
Sets the assignment_title of this ReportMetaData.
Title of Assignment
:param assignment_title: The assignment_title of this ReportMetaData.
:type: str
"""
self._assignment_title = assignment_title
@property
def context_title(self):
"""
Gets the context_title of this ReportMetaData.
Title of Context
:return: The context_title of this ReportMetaData.
:rtype: str
"""
return self._context_title
@context_title.setter
def context_title(self, context_title):
"""
Sets the context_title of this ReportMetaData.
Title of Context
:param context_title: The context_title of this ReportMetaData.
:type: str
"""
self._context_title = context_title
@property
def external_content_data(self):
"""
Gets the external_content_data of this ReportMetaData.
:return: The external_content_data of this ReportMetaData.
:rtype: list[ExternalContentData]
"""
return self._external_content_data
@external_content_data.setter
def external_content_data(self, external_content_data):
"""
Sets the external_content_data of this ReportMetaData.
:param external_content_data: The external_content_data of this ReportMetaData.
:type: list[ExternalContentData]
"""
if external_content_data is None:
raise ValueError("Invalid value for `external_content_data`, must not be `None`")
self._external_content_data = external_content_data
@property
def user_email(self):
"""
Gets the user_email of this ReportMetaData.
Users Email
:return: The user_email of this ReportMetaData.
:rtype: str
"""
return self._user_email
@user_email.setter
def user_email(self, user_email):
"""
Sets the user_email of this ReportMetaData.
Users Email
:param user_email: The user_email of this ReportMetaData.
:type: str
"""
self._user_email = user_email
@property
def user_first_name(self):
"""
Gets the user_first_name of this ReportMetaData.
Users First Name
:return: The user_first_name of this ReportMetaData.
:rtype: str
"""
return self._user_first_name
@user_first_name.setter
def user_first_name(self, user_first_name):
"""
Sets the user_first_name of this ReportMetaData.
Users First Name
:param user_first_name: The user_first_name of this ReportMetaData.
:type: str
"""
self._user_first_name = user_first_name
@property
def user_last_name(self):
"""
Gets the user_last_name of this ReportMetaData.
Users Last Name
:return: The user_last_name of this ReportMetaData.
:rtype: str
"""
return self._user_last_name
@user_last_name.setter
def user_last_name(self, user_last_name):
"""
Sets the user_last_name of this ReportMetaData.
Users Last Name
:param user_last_name: The user_last_name of this ReportMetaData.
:type: str
"""
self._user_last_name = user_last_name
@property
def user_role(self):
"""
Gets the user_role of this ReportMetaData.
User Role
:return: The user_role of this ReportMetaData.
:rtype: str
"""
return self._user_role
@user_role.setter
def user_role(self, user_role):
"""
Sets the user_role of this ReportMetaData.
User Role
:param user_role: The user_role of this ReportMetaData.
:type: str
"""
self._user_role = user_role
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 28.659649
| 170
| 0.60835
|
from pprint import pformat
from six import iteritems
import re
class ReportMetaData(object):
def __init__(self, assignment_title=None, context_title=None, external_content_data=None, user_email=None, user_first_name=None, user_last_name=None, user_role=None):
self.swagger_types = {
'assignment_title': 'str',
'context_title': 'str',
'external_content_data': 'list[ExternalContentData]',
'user_email': 'str',
'user_first_name': 'str',
'user_last_name': 'str',
'user_role': 'str'
}
self.attribute_map = {
'assignment_title': 'assignmentTitle',
'context_title': 'contextTitle',
'external_content_data': 'externalContentData',
'user_email': 'userEmail',
'user_first_name': 'userFirstName',
'user_last_name': 'userLastName',
'user_role': 'userRole'
}
self._assignment_title = assignment_title
self._context_title = context_title
self._external_content_data = external_content_data
self._user_email = user_email
self._user_first_name = user_first_name
self._user_last_name = user_last_name
self._user_role = user_role
@property
def assignment_title(self):
return self._assignment_title
@assignment_title.setter
def assignment_title(self, assignment_title):
self._assignment_title = assignment_title
@property
def context_title(self):
return self._context_title
@context_title.setter
def context_title(self, context_title):
self._context_title = context_title
@property
def external_content_data(self):
return self._external_content_data
@external_content_data.setter
def external_content_data(self, external_content_data):
if external_content_data is None:
raise ValueError("Invalid value for `external_content_data`, must not be `None`")
self._external_content_data = external_content_data
@property
def user_email(self):
return self._user_email
@user_email.setter
def user_email(self, user_email):
self._user_email = user_email
@property
def user_first_name(self):
return self._user_first_name
@user_first_name.setter
def user_first_name(self, user_first_name):
self._user_first_name = user_first_name
@property
def user_last_name(self):
return self._user_last_name
@user_last_name.setter
def user_last_name(self, user_last_name):
self._user_last_name = user_last_name
@property
def user_role(self):
return self._user_role
@user_role.setter
def user_role(self, user_role):
self._user_role = user_role
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c3e71189d4930286e5f0e16f6b38e6ad810cd26
| 7,674
|
py
|
Python
|
tests/pytests/functional/states/test_win_certutil.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | 1
|
2022-03-12T00:03:19.000Z
|
2022-03-12T00:03:19.000Z
|
tests/pytests/functional/states/test_win_certutil.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | 2
|
2022-03-02T16:11:35.000Z
|
2022-03-03T08:04:30.000Z
|
tests/pytests/functional/states/test_win_certutil.py
|
tomdoherty/salt
|
f87d5d7abbf9777773c4d91fdafecb8b1a728e76
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests for win_certutil state module
"""
import pytest
import salt.utils.files
pytestmark = [
pytest.mark.windows_whitelisted,
pytest.mark.skip_unless_on_windows,
]
@pytest.fixture(scope="module")
def certutil(states):
return states.certutil
@pytest.fixture(scope="module")
def certutil_mod(modules):
return modules.certutil
@pytest.fixture(scope="module")
def cert_file(state_tree):
# This is the binary contents of a self-signed cert for testing
binary_data = (
b"0\x82\x03\x0e0\x82\x01\xf6\xa0\x03\x02\x01\x02\x02\x10[\xe1\xcc]Q\xb7"
b"\x8d\xbdI\xa0\xb7\xc0\rD\x80m0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x0b"
b"\x05\x000\x1a1\x180\x16\x06\x03U\x04\x03\x0c\x0fTestCertificate0\x1e"
b"\x17\r220120174254Z\x17\r230120180254Z0\x1a1\x180\x16\x06\x03U\x04"
b'\x03\x0c\x0fTestCertificate0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01'
b"\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00"
b"\xb8x@YBP\x9f\x9c\x0e\n\xad\xd0l6\xc4\x9c\x7f#\x97\xbck@b\\\xa1\x94"
b"\xecR\x85Xq\xe4H\x0c\xfa\x1b]\xb8\x14\x14x\x05\xb7\xe6\xb6t\x07j\xda0"
b"\xd0\xb5\xc8\xdf\xe8\xad\xeb4qa\x86\xefw\x19\xf0\x9a%\xb8!\x81\xc2"
b"\xcbd\x81,\xbd\xe1a\x91\x822\nh\x88\x9d\xb7\x82 \xe8\x0f\x91\x13\xc8"
b"\xc0xir\xf8\x90Yc\x8f3\xe9\xdc\xa3\xbc+\xea/\x02\n\x94\xde\xba\xbb"
b"\xcb0\x98Z\xbc\xeeK\xab\xc5\xba,\x0f\x7f}6\xb9$|\xdd=\xdaN\xff]N\xe3"
b"\xbd\x00\xee?H\xdav\xa9\x95\xb8Vd\xf9=\x01\x16K\xb8\xa0C%\x1e[\x18'"
b"\xb4\x17Vi\xee\x97[\xf9\xa8MM\xfb\x88\x9fc\xbb\x08\xa7!\xc0U\xa8\xfc"
b"\nx:\xbc\x8f\x14\x0eF\x1f\x85Ba\x8b\xa3\xd7\xc4<\xcaN\xd1;y\xd0\x1a"
b"\xeb\xd2\x91c\x94\xee%\xc8\x82\x85\x92\x88\xec\x1d\nh\xa9q|E\x1a\xaf"
b"\x16\x89!i\x19'\xb7t{\x11\xe8\xb8\xee\xa9\x97\xf4\x1c\xfa\x92-\x02"
b"\x03\x01\x00\x01\xa3P0N0\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03"
b"\x02\x05\xa00\x1d\x06\x03U\x1d%\x04\x160\x14\x06\x08+\x06\x01\x05\x05"
b"\x07\x03\x02\x06\x08+\x06\x01\x05\x05\x07\x03\x010\x1d\x06\x03U\x1d"
b"\x0e\x04\x16\x04\x14\xefy\x97r\x16\xadg\r\x85\xea\xfe\xa8y[29\x0b%"
b"\xdfB0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x0b\x05\x00\x03\x82\x01\x01"
b"\x00\x93)\x0c$\xeb\xf7\x02\x9fSf^[\t2\xd3\xdf\xcc~b\xdd\xd3\x1e<\x91"
b"\xbc\x93\x87Z\x8ciC/\x87\x85\xf4\x18\xe0j\xae\xf3\x1c\xa7\xab\xf7\xfd"
b"\xd9\xeb\x11:}Ys\x8f\xc9\\\xea\x17\xbb\x957\x9b\xef\x17E]RwY\x10\x8b"
b'\x08\xc5\xa6\xc9\x05[\xe7\x11\xf3"2\xd3\xca\xf6\x05\x8a2\xc1S\x1e\xf0'
b"\xdb\xfa,\xfc\x80\xb88-!\x07\xe5\x81mc'\xca\x16@\x16\xf7\x9b\xc5"
b"\x95V;$\x95\xeab\xea\x1eX\x1dU\x97\x87\xc0\x17\xd0n\x01c@\x88z\xec"
b"\x9ep\x19\x02I\xf6\xe4\xddr\xc3(\xb9\x98\x97$\xb8\xf3g\x16\x05\xa7"
b"\x04\xf7\x15\x9a\xed!\x02\xd76\xb2nC\x04}sV=,\xd5\x8e\xb8hG\x99\xcb-x"
b"\x0e\x05h\xee;\xcdp\x13\xfc)\xdb\xa9o\xb0\x1c\x0e\x86\xb2\r\xc5.\xb1"
b"\x036\t\xd3l&\xd1\x13\xc1\xc1\x12\xfb\xc0\xab<\xaf\x04\x0eIW\xb8<OD"
b'\xfe"(U\xc2&\xa8\xd8\x9bkY\xdb~\xf8\xad\xb7\xa8Mu\xb6\xef\x89\xf2'
b"\xbeM"
)
with pytest.helpers.temp_file(
"TestCertificate.cer", directory=state_tree
) as cert_file:
with salt.utils.files.fopen(str(cert_file), "wb") as fh:
fh.write(binary_data)
yield cert_file
@pytest.fixture(scope="module")
def invalid_cert_file(state_tree):
with pytest.helpers.temp_file("Invalid.cer", directory=state_tree) as cert_file:
with salt.utils.files.fopen(str(cert_file), "wb") as fh:
fh.write(b"Invalid cert data")
yield cert_file
@pytest.fixture(scope="function")
def clean_store(certutil_mod, cert_file):
certutil_mod.del_store(source=str(cert_file), store="TrustedPublisher")
serials = certutil_mod.get_stored_cert_serials(store="TrustedPublisher")
assert "5be1cc5d51b78dbd49a0b7c00d44806d" not in serials
yield
certutil_mod.del_store(source=str(cert_file), store="TrustedPublisher")
@pytest.fixture(scope="function")
def populate_store(certutil_mod, cert_file):
certutil_mod.add_store(source=str(cert_file), store="TrustedPublisher")
serials = certutil_mod.get_stored_cert_serials(store="TrustedPublisher")
assert "5be1cc5d51b78dbd49a0b7c00d44806d" in serials
yield
certutil_mod.del_store(source=str(cert_file), store="TrustedPublisher")
def test_add_store_non_existing_cert(certutil):
"""
Test add_store when the certificate does not exist
"""
ret = certutil.add_store(
name="salt://non-existing.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate file not found")
assert ret.result is False
def test_add_store_invalid_cert(certutil, invalid_cert_file):
"""
Test add_store with an invalid certificate
"""
ret = certutil.add_store(name="salt://Invalid.cer", store="TrustedPublisher")
assert ret.comment.startswith("Invalid certificate file")
assert ret.result is False
def test_add_store_cert_already_present(certutil, cert_file, populate_store):
"""
Test add_store when the certificate is already present
"""
ret = certutil.add_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate already present")
assert ret.result is True
def test_add_store_cert_test_is_true(certutil, cert_file, clean_store):
"""
Test add_store when test is True
"""
ret = certutil.add_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
test=True,
)
assert ret.comment.startswith("Certificate will be added")
assert ret.result is None
def test_add_store(certutil, cert_file, clean_store):
"""
Test add_store
"""
ret = certutil.add_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Added certificate")
assert ret.result is True
def test_del_store_non_existing_cert(certutil):
"""
Test del_store when the certificate does not exist
"""
ret = certutil.del_store(
name="salt://non-existing.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate file not found")
assert ret.result is False
def test_del_store_invalid_cert(certutil, invalid_cert_file):
"""
Test del_store with an invalid certificate
"""
ret = certutil.del_store(name="salt://Invalid.cer", store="TrustedPublisher")
assert ret.comment.startswith("Invalid certificate file")
assert ret.result is False
def test_del_store_cert_already_absent(certutil, cert_file, clean_store):
"""
Test del_store when the certificate is already absent
"""
ret = certutil.del_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate already absent")
assert ret.result is True
def test_del_store_cert_test_is_true(certutil, cert_file, populate_store):
"""
Test del_store when test is True
"""
ret = certutil.del_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
test=True,
)
assert ret.comment.startswith("Certificate will be removed")
assert ret.result is None
def test_del_store(certutil, cert_file, populate_store):
"""
Test del_store
"""
ret = certutil.del_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Removed certificate")
assert ret.result is True
| 36.542857
| 84
| 0.681783
|
import pytest
import salt.utils.files
pytestmark = [
pytest.mark.windows_whitelisted,
pytest.mark.skip_unless_on_windows,
]
@pytest.fixture(scope="module")
def certutil(states):
return states.certutil
@pytest.fixture(scope="module")
def certutil_mod(modules):
return modules.certutil
@pytest.fixture(scope="module")
def cert_file(state_tree):
binary_data = (
b"0\x82\x03\x0e0\x82\x01\xf6\xa0\x03\x02\x01\x02\x02\x10[\xe1\xcc]Q\xb7"
b"\x8d\xbdI\xa0\xb7\xc0\rD\x80m0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x0b"
b"\x05\x000\x1a1\x180\x16\x06\x03U\x04\x03\x0c\x0fTestCertificate0\x1e"
b"\x17\r220120174254Z\x17\r230120180254Z0\x1a1\x180\x16\x06\x03U\x04"
b'\x03\x0c\x0fTestCertificate0\x82\x01"0\r\x06\t*\x86H\x86\xf7\r\x01'
b"\x01\x01\x05\x00\x03\x82\x01\x0f\x000\x82\x01\n\x02\x82\x01\x01\x00"
b"\xb8x@YBP\x9f\x9c\x0e\n\xad\xd0l6\xc4\x9c\x7f
b"\xecR\x85Xq\xe4H\x0c\xfa\x1b]\xb8\x14\x14x\x05\xb7\xe6\xb6t\x07j\xda0"
b"\xd0\xb5\xc8\xdf\xe8\xad\xeb4qa\x86\xefw\x19\xf0\x9a%\xb8!\x81\xc2"
b"\xcbd\x81,\xbd\xe1a\x91\x822\nh\x88\x9d\xb7\x82 \xe8\x0f\x91\x13\xc8"
b"\xc0xir\xf8\x90Yc\x8f3\xe9\xdc\xa3\xbc+\xea/\x02\n\x94\xde\xba\xbb"
b"\xcb0\x98Z\xbc\xeeK\xab\xc5\xba,\x0f\x7f}6\xb9$|\xdd=\xdaN\xff]N\xe3"
b"\xbd\x00\xee?H\xdav\xa9\x95\xb8Vd\xf9=\x01\x16K\xb8\xa0C%\x1e[\x18'"
b"\xb4\x17Vi\xee\x97[\xf9\xa8MM\xfb\x88\x9fc\xbb\x08\xa7!\xc0U\xa8\xfc"
b"\nx:\xbc\x8f\x14\x0eF\x1f\x85Ba\x8b\xa3\xd7\xc4<\xcaN\xd1;y\xd0\x1a"
b"\xeb\xd2\x91c\x94\xee%\xc8\x82\x85\x92\x88\xec\x1d\nh\xa9q|E\x1a\xaf"
b"\x16\x89!i\x19'\xb7t{\x11\xe8\xb8\xee\xa9\x97\xf4\x1c\xfa\x92-\x02"
b"\x03\x01\x00\x01\xa3P0N0\x0e\x06\x03U\x1d\x0f\x01\x01\xff\x04\x04\x03"
b"\x02\x05\xa00\x1d\x06\x03U\x1d%\x04\x160\x14\x06\x08+\x06\x01\x05\x05"
b"\x07\x03\x02\x06\x08+\x06\x01\x05\x05\x07\x03\x010\x1d\x06\x03U\x1d"
b"\x0e\x04\x16\x04\x14\xefy\x97r\x16\xadg\r\x85\xea\xfe\xa8y[29\x0b%"
b"\xdfB0\r\x06\t*\x86H\x86\xf7\r\x01\x01\x0b\x05\x00\x03\x82\x01\x01"
b"\x00\x93)\x0c$\xeb\xf7\x02\x9fSf^[\t2\xd3\xdf\xcc~b\xdd\xd3\x1e<\x91"
b"\xbc\x93\x87Z\x8ciC/\x87\x85\xf4\x18\xe0j\xae\xf3\x1c\xa7\xab\xf7\xfd"
b"\xd9\xeb\x11:}Ys\x8f\xc9\\\xea\x17\xbb\x957\x9b\xef\x17E]RwY\x10\x8b"
b'\x08\xc5\xa6\xc9\x05[\xe7\x11\xf3"2\xd3\xca\xf6\x05\x8a2\xc1S\x1e\xf0'
b"\xdb\xfa,\xfc\x80\xb88-!\x07\xe5\x81mc'\xca\x16@\x16\xf7\x9b\xc5"
b"\x95V;$\x95\xeab\xea\x1eX\x1dU\x97\x87\xc0\x17\xd0n\x01c@\x88z\xec"
b"\x9ep\x19\x02I\xf6\xe4\xddr\xc3(\xb9\x98\x97$\xb8\xf3g\x16\x05\xa7"
b"\x04\xf7\x15\x9a\xed!\x02\xd76\xb2nC\x04}sV=,\xd5\x8e\xb8hG\x99\xcb-x"
b"\x0e\x05h\xee;\xcdp\x13\xfc)\xdb\xa9o\xb0\x1c\x0e\x86\xb2\r\xc5.\xb1"
b"\x036\t\xd3l&\xd1\x13\xc1\xc1\x12\xfb\xc0\xab<\xaf\x04\x0eIW\xb8<OD"
b'\xfe"(U\xc2&\xa8\xd8\x9bkY\xdb~\xf8\xad\xb7\xa8Mu\xb6\xef\x89\xf2'
b"\xbeM"
)
with pytest.helpers.temp_file(
"TestCertificate.cer", directory=state_tree
) as cert_file:
with salt.utils.files.fopen(str(cert_file), "wb") as fh:
fh.write(binary_data)
yield cert_file
@pytest.fixture(scope="module")
def invalid_cert_file(state_tree):
with pytest.helpers.temp_file("Invalid.cer", directory=state_tree) as cert_file:
with salt.utils.files.fopen(str(cert_file), "wb") as fh:
fh.write(b"Invalid cert data")
yield cert_file
@pytest.fixture(scope="function")
def clean_store(certutil_mod, cert_file):
certutil_mod.del_store(source=str(cert_file), store="TrustedPublisher")
serials = certutil_mod.get_stored_cert_serials(store="TrustedPublisher")
assert "5be1cc5d51b78dbd49a0b7c00d44806d" not in serials
yield
certutil_mod.del_store(source=str(cert_file), store="TrustedPublisher")
@pytest.fixture(scope="function")
def populate_store(certutil_mod, cert_file):
certutil_mod.add_store(source=str(cert_file), store="TrustedPublisher")
serials = certutil_mod.get_stored_cert_serials(store="TrustedPublisher")
assert "5be1cc5d51b78dbd49a0b7c00d44806d" in serials
yield
certutil_mod.del_store(source=str(cert_file), store="TrustedPublisher")
def test_add_store_non_existing_cert(certutil):
ret = certutil.add_store(
name="salt://non-existing.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate file not found")
assert ret.result is False
def test_add_store_invalid_cert(certutil, invalid_cert_file):
ret = certutil.add_store(name="salt://Invalid.cer", store="TrustedPublisher")
assert ret.comment.startswith("Invalid certificate file")
assert ret.result is False
def test_add_store_cert_already_present(certutil, cert_file, populate_store):
ret = certutil.add_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate already present")
assert ret.result is True
def test_add_store_cert_test_is_true(certutil, cert_file, clean_store):
ret = certutil.add_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
test=True,
)
assert ret.comment.startswith("Certificate will be added")
assert ret.result is None
def test_add_store(certutil, cert_file, clean_store):
ret = certutil.add_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Added certificate")
assert ret.result is True
def test_del_store_non_existing_cert(certutil):
ret = certutil.del_store(
name="salt://non-existing.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate file not found")
assert ret.result is False
def test_del_store_invalid_cert(certutil, invalid_cert_file):
ret = certutil.del_store(name="salt://Invalid.cer", store="TrustedPublisher")
assert ret.comment.startswith("Invalid certificate file")
assert ret.result is False
def test_del_store_cert_already_absent(certutil, cert_file, clean_store):
ret = certutil.del_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Certificate already absent")
assert ret.result is True
def test_del_store_cert_test_is_true(certutil, cert_file, populate_store):
ret = certutil.del_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
test=True,
)
assert ret.comment.startswith("Certificate will be removed")
assert ret.result is None
def test_del_store(certutil, cert_file, populate_store):
ret = certutil.del_store(
name="salt://TestCertificate.cer",
store="TrustedPublisher",
)
assert ret.comment.startswith("Removed certificate")
assert ret.result is True
| true
| true
|
1c3e713af7c0116ccd04cf76d73556e884f2c1ce
| 9,120
|
py
|
Python
|
tests/test_data/test_personal.py
|
el/elizabeth
|
dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff
|
[
"MIT"
] | null | null | null |
tests/test_data/test_personal.py
|
el/elizabeth
|
dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff
|
[
"MIT"
] | null | null | null |
tests/test_data/test_personal.py
|
el/elizabeth
|
dc82cd9d2bb230acdb2f1a49bc16b1c3d12077ff
|
[
"MIT"
] | 1
|
2019-12-27T19:34:17.000Z
|
2019-12-27T19:34:17.000Z
|
# -*- coding: utf-8 -*-
import re
from unittest import TestCase
from elizabeth import Personal
import elizabeth.core.interdata as common
from tests.test_data import DummyCase
from ._patterns import *
class PersonalBaseTest(TestCase):
def setUp(self):
self.personal = Personal()
def tearDown(self):
del self.personal
def test_str(self):
self.assertTrue(re.match(STR_REGEX, self.personal.__str__()))
def test_age(self):
result = self.personal.age(maximum=55)
self.assertTrue(result <= 55)
def test_age(self):
result = self.personal.age(maximum=55)
self.assertTrue(result <= 55)
def test_age_store(self):
result = self.personal._store['age']
self.assertEqual(result, 0)
def test_age_update(self):
result = self.personal.age() - self.personal._store['age'] # calling age() should go first
self.assertEqual(result, 0)
def test_child_count(self):
result = self.personal.child_count(max_childs=10)
self.assertTrue(result <= 10)
def test_work_experience(self):
result = self.personal.work_experience(working_start_age=0) - self.personal._store['age']
self.assertEqual(result, 0)
def test_work_experience_store(self):
result = self.personal.work_experience() - self.personal.work_experience()
self.assertEqual(result, 0)
def test_work_experience_extreme(self):
result = self.personal.work_experience(working_start_age=100000)
self.assertEqual(result, 0)
def test_paypal(self):
result = self.personal.paypal()
self.assertIsNotNone(result)
def test_password(self):
plain = self.personal.password(length=15)
self.assertEqual(len(plain), 15)
md5 = self.personal.password(algorithm='md5')
self.assertEqual(len(md5), 32)
sha1 = self.personal.password(algorithm='sha1')
self.assertEqual(len(sha1), 40)
sha256 = self.personal.password(algorithm='sha256')
self.assertEqual(len(sha256), 64)
sha512 = self.personal.password(algorithm='sha512')
self.assertEqual(len(sha512), 128)
with self.assertRaises(NotImplementedError):
self.personal.password(algorithm='sha42')
def test_username(self):
result = self.personal.username()
self.assertTrue(re.match(USERNAME_REGEX, result))
def test_email(self):
result = self.personal.email()
self.assertTrue(re.match(EMAIL_REGEX, result))
def test_bitcoin(self):
result = self.personal.bitcoin()
self.assertEqual(len(result), 34)
def test_cvv(self):
result = self.personal.cvv()
self.assertTrue((100 <= result) and (result <= 999))
def test_credit_card_number(self):
result = self.personal.credit_card_number()
self.assertTrue(re.match(CREDIT_CARD_REGEX, result))
result_mc = self.personal.credit_card_number(card_type='master_card')
self.assertTrue(re.match(CREDIT_CARD_REGEX, result_mc))
result_ax = self.personal.credit_card_number(card_type='amex')
self.assertTrue(re.match(CREDIT_CARD_REGEX, result_ax))
with self.assertRaises(NotImplementedError):
self.personal.credit_card_number(card_type="discover")
def test_expiration_date(self):
result = self.personal.credit_card_expiration_date(
minimum=16, maximum=25)
year = result.split('/')[1]
self.assertTrue((int(year) >= 16) and (int(year) <= 25))
def test_cid(self):
result = self.personal.cid()
self.assertTrue((1000 <= result) and (result <= 9999))
def test_height(self):
result = self.personal.height(minimum=1.60, maximum=1.90)
self.assertTrue(result.startswith('1'))
self.assertIsInstance(result, str)
def test_weight(self):
result = self.personal.weight(minimum=40, maximum=60)
self.assertTrue((result >= 40) and (result <= 60))
def test_blood_type(self):
result = self.personal.blood_type()
self.assertIn(result, common.BLOOD_GROUPS)
def test_favorite_movie(self):
result = self.personal.favorite_movie()
self.assertIn(result, self.personal.data['favorite_movie'])
def test_favorite_music_genre(self):
result = self.personal.favorite_music_genre()
self.assertIn(result, common.FAVORITE_MUSIC_GENRE)
def test_avatar(self):
result = self.personal.avatar(size=512)
img, size, *__ = result.split('/')[::-1]
self.assertEqual(int(size), 512)
self.assertEqual(32, len(img.split('.')[0]))
def test_identifier(self):
result = self.personal.identifier()
mask = '##-##/##'
self.assertEqual(len(mask), len(result))
result = self.personal.identifier(mask='##', suffix=True)
lst = result.split()
_id, sfx = lst[0], lst[1]
self.assertEqual(len(_id), 2)
self.assertEqual(len(sfx), 2)
result = self.personal.identifier(suffix=True)
suffix = result.split(' ')[1]
self.assertTrue(suffix.isalpha())
def test_level_of_english(self):
result = self.personal.level_of_english()
lvl_s = ['Beginner',
'Elementary',
'Pre - Intermediate',
'Intermediate',
'Upper Intermediate',
'Advanced',
'Proficiency'
]
self.assertIn(result, lvl_s)
class PersonalTestCase(DummyCase):
def test_name(self):
result = self.generic.personal.name(gender='female')
self.assertIn(result, self.generic.personal.data['names']['female'])
result = self.generic.personal.name(gender='male')
self.assertIn(result, self.generic.personal.data['names']['male'])
def test_telephone(self):
result = self.generic.personal.telephone()
self.assertTrue(len(result) >= 11)
mask = '+5 (###)-###-##-##'
result2 = self.generic.personal.telephone(mask=mask)
head = result2.split(' ')[0]
self.assertEqual(head, '+5')
def test_surname(self):
diff_surnames = ('ru', 'is')
if self.generic.personal.locale in diff_surnames:
result = self.generic.personal.surname(gender='female')
self.assertIn(
result, self.generic.personal.data['surnames']['female'])
result = self.generic.personal.surname(gender='male')
self.assertIn(
result, self.generic.personal.data['surnames']['male'])
else:
result = self.generic.personal.surname()
self.assertIn(result, self.generic.personal.data['surnames'])
def test_full_name(self):
result = self.generic.personal.full_name(gender='female')
_result = result.split(' ')
self.assertIsInstance(_result, list)
self.assertIsNotNone(_result)
result = self.generic.personal.full_name(reverse=True)
self.assertIsNotNone(result)
def test_gender(self):
result = self.generic.personal.gender()
self.assertIn(result, self.generic.personal.data['gender'])
symbol = self.generic.personal.gender(symbol=True)
self.assertIn(symbol, common.GENDER_SYMBOLS)
def test_sexual_orientation(self):
result = self.generic.personal.sexual_orientation()
self.assertIn(result, self.generic.personal.data['sexuality'])
symbol = self.generic.personal.sexual_orientation(symbol=True)
self.assertIn(symbol, common.SEXUALITY_SYMBOLS)
def test_profession(self):
result = self.generic.personal.occupation()
self.assertIn(result, self.generic.personal.data['occupation'])
def test_university(self):
result = self.generic.personal.university()
self.assertIn(result, self.generic.personal.data['university'])
def test_academic_degree(self):
result = self.generic.personal.academic_degree()
self.assertIn(result, self.generic.personal.data['academic_degree'])
def test_language(self):
result = self.generic.personal.language()
self.assertIn(result, self.generic.personal.data['language'])
def test_worldview(self):
result = self.generic.personal.worldview()
self.assertIn(result, self.generic.personal.data['worldview'])
def test_views_on(self):
result = self.generic.personal.views_on()
self.assertIn(result, self.generic.personal.data['views_on'])
def test_political_views(self):
result = self.generic.personal.political_views()
self.assertIn(result, self.generic.personal.data['political_views'])
def test_title(self):
result = self.generic.personal.title(type_='typical')
self.assertIsInstance(result, str)
result2 = self.generic.personal.title(type_='academic')
self.assertIsInstance(result2, str)
def test_nationality(self):
result = self.generic.personal.nationality()
self.assertIsNotNone(result)
| 34.285714
| 99
| 0.649561
|
import re
from unittest import TestCase
from elizabeth import Personal
import elizabeth.core.interdata as common
from tests.test_data import DummyCase
from ._patterns import *
class PersonalBaseTest(TestCase):
def setUp(self):
self.personal = Personal()
def tearDown(self):
del self.personal
def test_str(self):
self.assertTrue(re.match(STR_REGEX, self.personal.__str__()))
def test_age(self):
result = self.personal.age(maximum=55)
self.assertTrue(result <= 55)
def test_age(self):
result = self.personal.age(maximum=55)
self.assertTrue(result <= 55)
def test_age_store(self):
result = self.personal._store['age']
self.assertEqual(result, 0)
def test_age_update(self):
result = self.personal.age() - self.personal._store['age']
self.assertEqual(result, 0)
def test_child_count(self):
result = self.personal.child_count(max_childs=10)
self.assertTrue(result <= 10)
def test_work_experience(self):
result = self.personal.work_experience(working_start_age=0) - self.personal._store['age']
self.assertEqual(result, 0)
def test_work_experience_store(self):
result = self.personal.work_experience() - self.personal.work_experience()
self.assertEqual(result, 0)
def test_work_experience_extreme(self):
result = self.personal.work_experience(working_start_age=100000)
self.assertEqual(result, 0)
def test_paypal(self):
result = self.personal.paypal()
self.assertIsNotNone(result)
def test_password(self):
plain = self.personal.password(length=15)
self.assertEqual(len(plain), 15)
md5 = self.personal.password(algorithm='md5')
self.assertEqual(len(md5), 32)
sha1 = self.personal.password(algorithm='sha1')
self.assertEqual(len(sha1), 40)
sha256 = self.personal.password(algorithm='sha256')
self.assertEqual(len(sha256), 64)
sha512 = self.personal.password(algorithm='sha512')
self.assertEqual(len(sha512), 128)
with self.assertRaises(NotImplementedError):
self.personal.password(algorithm='sha42')
def test_username(self):
result = self.personal.username()
self.assertTrue(re.match(USERNAME_REGEX, result))
def test_email(self):
result = self.personal.email()
self.assertTrue(re.match(EMAIL_REGEX, result))
def test_bitcoin(self):
result = self.personal.bitcoin()
self.assertEqual(len(result), 34)
def test_cvv(self):
result = self.personal.cvv()
self.assertTrue((100 <= result) and (result <= 999))
def test_credit_card_number(self):
result = self.personal.credit_card_number()
self.assertTrue(re.match(CREDIT_CARD_REGEX, result))
result_mc = self.personal.credit_card_number(card_type='master_card')
self.assertTrue(re.match(CREDIT_CARD_REGEX, result_mc))
result_ax = self.personal.credit_card_number(card_type='amex')
self.assertTrue(re.match(CREDIT_CARD_REGEX, result_ax))
with self.assertRaises(NotImplementedError):
self.personal.credit_card_number(card_type="discover")
def test_expiration_date(self):
result = self.personal.credit_card_expiration_date(
minimum=16, maximum=25)
year = result.split('/')[1]
self.assertTrue((int(year) >= 16) and (int(year) <= 25))
def test_cid(self):
result = self.personal.cid()
self.assertTrue((1000 <= result) and (result <= 9999))
def test_height(self):
result = self.personal.height(minimum=1.60, maximum=1.90)
self.assertTrue(result.startswith('1'))
self.assertIsInstance(result, str)
def test_weight(self):
result = self.personal.weight(minimum=40, maximum=60)
self.assertTrue((result >= 40) and (result <= 60))
def test_blood_type(self):
result = self.personal.blood_type()
self.assertIn(result, common.BLOOD_GROUPS)
def test_favorite_movie(self):
result = self.personal.favorite_movie()
self.assertIn(result, self.personal.data['favorite_movie'])
def test_favorite_music_genre(self):
result = self.personal.favorite_music_genre()
self.assertIn(result, common.FAVORITE_MUSIC_GENRE)
def test_avatar(self):
result = self.personal.avatar(size=512)
img, size, *__ = result.split('/')[::-1]
self.assertEqual(int(size), 512)
self.assertEqual(32, len(img.split('.')[0]))
def test_identifier(self):
result = self.personal.identifier()
mask = '##-##/##'
self.assertEqual(len(mask), len(result))
result = self.personal.identifier(mask='##', suffix=True)
lst = result.split()
_id, sfx = lst[0], lst[1]
self.assertEqual(len(_id), 2)
self.assertEqual(len(sfx), 2)
result = self.personal.identifier(suffix=True)
suffix = result.split(' ')[1]
self.assertTrue(suffix.isalpha())
def test_level_of_english(self):
result = self.personal.level_of_english()
lvl_s = ['Beginner',
'Elementary',
'Pre - Intermediate',
'Intermediate',
'Upper Intermediate',
'Advanced',
'Proficiency'
]
self.assertIn(result, lvl_s)
class PersonalTestCase(DummyCase):
def test_name(self):
result = self.generic.personal.name(gender='female')
self.assertIn(result, self.generic.personal.data['names']['female'])
result = self.generic.personal.name(gender='male')
self.assertIn(result, self.generic.personal.data['names']['male'])
def test_telephone(self):
result = self.generic.personal.telephone()
self.assertTrue(len(result) >= 11)
mask = '+5 (###)-###-##-##'
result2 = self.generic.personal.telephone(mask=mask)
head = result2.split(' ')[0]
self.assertEqual(head, '+5')
def test_surname(self):
diff_surnames = ('ru', 'is')
if self.generic.personal.locale in diff_surnames:
result = self.generic.personal.surname(gender='female')
self.assertIn(
result, self.generic.personal.data['surnames']['female'])
result = self.generic.personal.surname(gender='male')
self.assertIn(
result, self.generic.personal.data['surnames']['male'])
else:
result = self.generic.personal.surname()
self.assertIn(result, self.generic.personal.data['surnames'])
def test_full_name(self):
result = self.generic.personal.full_name(gender='female')
_result = result.split(' ')
self.assertIsInstance(_result, list)
self.assertIsNotNone(_result)
result = self.generic.personal.full_name(reverse=True)
self.assertIsNotNone(result)
def test_gender(self):
result = self.generic.personal.gender()
self.assertIn(result, self.generic.personal.data['gender'])
symbol = self.generic.personal.gender(symbol=True)
self.assertIn(symbol, common.GENDER_SYMBOLS)
def test_sexual_orientation(self):
result = self.generic.personal.sexual_orientation()
self.assertIn(result, self.generic.personal.data['sexuality'])
symbol = self.generic.personal.sexual_orientation(symbol=True)
self.assertIn(symbol, common.SEXUALITY_SYMBOLS)
def test_profession(self):
result = self.generic.personal.occupation()
self.assertIn(result, self.generic.personal.data['occupation'])
def test_university(self):
result = self.generic.personal.university()
self.assertIn(result, self.generic.personal.data['university'])
def test_academic_degree(self):
result = self.generic.personal.academic_degree()
self.assertIn(result, self.generic.personal.data['academic_degree'])
def test_language(self):
result = self.generic.personal.language()
self.assertIn(result, self.generic.personal.data['language'])
def test_worldview(self):
result = self.generic.personal.worldview()
self.assertIn(result, self.generic.personal.data['worldview'])
def test_views_on(self):
result = self.generic.personal.views_on()
self.assertIn(result, self.generic.personal.data['views_on'])
def test_political_views(self):
result = self.generic.personal.political_views()
self.assertIn(result, self.generic.personal.data['political_views'])
def test_title(self):
result = self.generic.personal.title(type_='typical')
self.assertIsInstance(result, str)
result2 = self.generic.personal.title(type_='academic')
self.assertIsInstance(result2, str)
def test_nationality(self):
result = self.generic.personal.nationality()
self.assertIsNotNone(result)
| true
| true
|
1c3e7190f63670b6ac954e3ba27387d7957fe2ed
| 7,366
|
py
|
Python
|
pybind/nos/v7_1_0/hide_routemap_holder/route_map/content/set_/ipv6/next_vrf/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/hide_routemap_holder/route_map/content/set_/ipv6/next_vrf/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | null | null | null |
pybind/nos/v7_1_0/hide_routemap_holder/route_map/content/set_/ipv6/next_vrf/__init__.py
|
shivharis/pybind
|
4e1c6d54b9fd722ccec25546ba2413d79ce337e6
|
[
"Apache-2.0"
] | 1
|
2021-11-05T22:15:42.000Z
|
2021-11-05T22:15:42.000Z
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import next_vrf_list
class next_vrf(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-ip-policy - based on the path /hide-routemap-holder/route-map/content/set/ipv6/next-vrf. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__next_vrf_list',)
_yang_name = 'next-vrf'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__next_vrf_list = YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'hide-routemap-holder', u'route-map', u'content', u'set', u'ipv6', u'next-vrf']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'route-map', u'set', u'ipv6']
def _get_next_vrf_list(self):
"""
Getter method for next_vrf_list, mapped from YANG variable /hide_routemap_holder/route_map/content/set/ipv6/next_vrf/next_vrf_list (list)
"""
return self.__next_vrf_list
def _set_next_vrf_list(self, v, load=False):
"""
Setter method for next_vrf_list, mapped from YANG variable /hide_routemap_holder/route_map/content/set/ipv6/next_vrf/next_vrf_list (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_next_vrf_list is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_next_vrf_list() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_vrf_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""",
})
self.__next_vrf_list = t
if hasattr(self, '_set'):
self._set()
def _unset_next_vrf_list(self):
self.__next_vrf_list = YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
next_vrf_list = __builtin__.property(_get_next_vrf_list, _set_next_vrf_list)
_pyangbind_elements = {'next_vrf_list': next_vrf_list, }
| 59.403226
| 811
| 0.721423
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import next_vrf_list
class next_vrf(PybindBase):
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__next_vrf_list',)
_yang_name = 'next-vrf'
_rest_name = ''
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__next_vrf_list = YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'hide-routemap-holder', u'route-map', u'content', u'set', u'ipv6', u'next-vrf']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'route-map', u'set', u'ipv6']
def _get_next_vrf_list(self):
return self.__next_vrf_list
def _set_next_vrf_list(self, v, load=False):
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """next_vrf_list must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)""",
})
self.__next_vrf_list = t
if hasattr(self, '_set'):
self._set()
def _unset_next_vrf_list(self):
self.__next_vrf_list = YANGDynClass(base=YANGListType("vrf next_hop",next_vrf_list.next_vrf_list, yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='vrf next-hop', extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}), is_container='list', yang_name="next-vrf-list", rest_name="next-vrf-list", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'pbripv6vrf-cp', u'cli-drop-node-name': None, u'cli-suppress-mode': None}}, namespace='urn:brocade.com:mgmt:brocade-ip-policy', defining_module='brocade-ip-policy', yang_type='list', is_config=True)
next_vrf_list = __builtin__.property(_get_next_vrf_list, _set_next_vrf_list)
_pyangbind_elements = {'next_vrf_list': next_vrf_list, }
| true
| true
|
1c3e71a10ea7e6e5d942f2a0f2035b00368692e9
| 925
|
py
|
Python
|
setup.py
|
RollingStar/getnative
|
7c30c882a37b07e54daa7bbddd9de63794d436ef
|
[
"MIT"
] | null | null | null |
setup.py
|
RollingStar/getnative
|
7c30c882a37b07e54daa7bbddd9de63794d436ef
|
[
"MIT"
] | null | null | null |
setup.py
|
RollingStar/getnative
|
7c30c882a37b07e54daa7bbddd9de63794d436ef
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup, find_packages
with open("README.md") as fh:
long_description = fh.read()
with open("requirements.txt") as fh:
install_requires = fh.read()
setup(
name="getnative",
version='2.2.0',
description='Find the native resolution(s) of upscaled material (mostly anime)',
long_description=long_description,
long_description_content_type="text/markdown",
author='Infi, Kageru',
author_email='infiziert@protonmail.ch, kageru@encode.moe',
url='https://github.com/Infiziert90/getnative',
install_requires=install_requires,
python_requires='>=3.6',
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': ['getnative=getnative.app:main'],
}
)
| 28.90625
| 84
| 0.676757
|
from setuptools import setup, find_packages
with open("README.md") as fh:
long_description = fh.read()
with open("requirements.txt") as fh:
install_requires = fh.read()
setup(
name="getnative",
version='2.2.0',
description='Find the native resolution(s) of upscaled material (mostly anime)',
long_description=long_description,
long_description_content_type="text/markdown",
author='Infi, Kageru',
author_email='infiziert@protonmail.ch, kageru@encode.moe',
url='https://github.com/Infiziert90/getnative',
install_requires=install_requires,
python_requires='>=3.6',
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={
'console_scripts': ['getnative=getnative.app:main'],
}
)
| true
| true
|
1c3e74c023dfd36517727b4164044dbac99e87a4
| 983
|
py
|
Python
|
MUNDO 3/ex113.py
|
athavus/Curso-em-video-Python-3
|
a32be95adbccfcbe512a1ed30d3859141a230b5e
|
[
"MIT"
] | 1
|
2020-11-12T14:03:32.000Z
|
2020-11-12T14:03:32.000Z
|
MUNDO 3/ex113.py
|
athavus/Curso-em-video-Python-3
|
a32be95adbccfcbe512a1ed30d3859141a230b5e
|
[
"MIT"
] | null | null | null |
MUNDO 3/ex113.py
|
athavus/Curso-em-video-Python-3
|
a32be95adbccfcbe512a1ed30d3859141a230b5e
|
[
"MIT"
] | 1
|
2021-01-05T22:18:46.000Z
|
2021-01-05T22:18:46.000Z
|
def leiaInt(msg):
while True:
try:
num = int(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERRO! Por favor digite um número real válido.\033[m')
continue
except (KeyboardInterrupt):
print('\033[1;31mO Usuário resolveu não digitar esse número\033[m')
return 0
else:
return num
def leiaFloat(msg):
while True:
try:
num = float(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERRO! Por favor digite um número inteiro válido.\033[m')
continue
except (KeyboardInterrupt):
print('\033[1;31mO Usuário resolveu não digitar esse número\033[m')
return 0
else:
return num
a = leiaInt('Digite um número inteiro: ')
b = leiaFloat('Digite um número real: ')
print(f'O valor inteiro digitado foi {a} e o real foi {b}')
| 30.71875
| 86
| 0.552391
|
def leiaInt(msg):
while True:
try:
num = int(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERRO! Por favor digite um número real válido.\033[m')
continue
except (KeyboardInterrupt):
print('\033[1;31mO Usuário resolveu não digitar esse número\033[m')
return 0
else:
return num
def leiaFloat(msg):
while True:
try:
num = float(input(msg))
except (ValueError, TypeError):
print('\033[1;31mERRO! Por favor digite um número inteiro válido.\033[m')
continue
except (KeyboardInterrupt):
print('\033[1;31mO Usuário resolveu não digitar esse número\033[m')
return 0
else:
return num
a = leiaInt('Digite um número inteiro: ')
b = leiaFloat('Digite um número real: ')
print(f'O valor inteiro digitado foi {a} e o real foi {b}')
| true
| true
|
1c3e74f1e2e6e4618e63c5d6c908d723ae6b7034
| 2,572
|
py
|
Python
|
game/node.py
|
HexDecimal/7drl-2022
|
755949875cc11e288908eccaee102c7ca0e43777
|
[
"CC0-1.0"
] | null | null | null |
game/node.py
|
HexDecimal/7drl-2022
|
755949875cc11e288908eccaee102c7ca0e43777
|
[
"CC0-1.0"
] | null | null | null |
game/node.py
|
HexDecimal/7drl-2022
|
755949875cc11e288908eccaee102c7ca0e43777
|
[
"CC0-1.0"
] | null | null | null |
from __future__ import annotations
import logging
from typing import Any, Iterator, Optional, Set, Type, TypeVar
TNode = TypeVar("TNode", bound="Node")
logger = logging.getLogger(__name__)
class Node:
"""A mixin that allows instances to be organzied into a scene graph."""
def __init__(self, *, parent: Optional[Node] = None) -> None:
super().__init__()
self._parent: Optional[Node] = None
self._children: Set[Any] = set()
if parent is not None:
self.parent = parent
@property
def parent(self) -> Optional[Node]:
return self._parent
@parent.setter
def parent(self, new_parent: Optional[Node]) -> None:
assert hasattr(self, "_parent"), f"Make sure that subclasses of Node call super().__init__()\n{self!r}"
if self._parent is new_parent:
logger.debug("%r is already assigned to %r", self, new_parent)
return
if self._parent is not None:
if new_parent is None:
logger.debug("Removing %r from %r", self, self._parent)
else:
logger.debug("Moving %r from %r to %r", self, self._parent, new_parent)
# Remove self from the current parent.
self._parent._children.remove(self)
self._parent = None
else:
logger.debug("Added %r to %r", self, new_parent)
if new_parent is not None:
# Add self to new_parent.
self._parent = new_parent
new_parent._children.add(self)
def get_parent(self, kind: Type[TNode]) -> TNode:
while True:
assert self._parent is not None
self = self._parent
if isinstance(self, kind):
return self
def try_get(self, kind: Type[TNode]) -> Optional[TNode]:
for n in self._children:
if isinstance(n, kind):
return n
return None
def __getitem__(self, kind: Type[TNode]) -> TNode:
for n in self._children:
if isinstance(n, kind):
return n
raise TypeError(f"This node has no {kind!r} instances.")
def __setitem__(self, kind: Type[TNode], node: Optional[TNode]) -> None:
self._children = {n for n in self._children if not isinstance(n, kind)}
if node is not None:
node.parent = self
def get_children(self, kind: Type[TNode]) -> Iterator[TNode]:
for n in self._children:
if isinstance(n, kind):
yield n
if __name__ == "__main__":
n = Node()
| 32.974359
| 111
| 0.586703
|
from __future__ import annotations
import logging
from typing import Any, Iterator, Optional, Set, Type, TypeVar
TNode = TypeVar("TNode", bound="Node")
logger = logging.getLogger(__name__)
class Node:
def __init__(self, *, parent: Optional[Node] = None) -> None:
super().__init__()
self._parent: Optional[Node] = None
self._children: Set[Any] = set()
if parent is not None:
self.parent = parent
@property
def parent(self) -> Optional[Node]:
return self._parent
@parent.setter
def parent(self, new_parent: Optional[Node]) -> None:
assert hasattr(self, "_parent"), f"Make sure that subclasses of Node call super().__init__()\n{self!r}"
if self._parent is new_parent:
logger.debug("%r is already assigned to %r", self, new_parent)
return
if self._parent is not None:
if new_parent is None:
logger.debug("Removing %r from %r", self, self._parent)
else:
logger.debug("Moving %r from %r to %r", self, self._parent, new_parent)
self._parent._children.remove(self)
self._parent = None
else:
logger.debug("Added %r to %r", self, new_parent)
if new_parent is not None:
self._parent = new_parent
new_parent._children.add(self)
def get_parent(self, kind: Type[TNode]) -> TNode:
while True:
assert self._parent is not None
self = self._parent
if isinstance(self, kind):
return self
def try_get(self, kind: Type[TNode]) -> Optional[TNode]:
for n in self._children:
if isinstance(n, kind):
return n
return None
def __getitem__(self, kind: Type[TNode]) -> TNode:
for n in self._children:
if isinstance(n, kind):
return n
raise TypeError(f"This node has no {kind!r} instances.")
def __setitem__(self, kind: Type[TNode], node: Optional[TNode]) -> None:
self._children = {n for n in self._children if not isinstance(n, kind)}
if node is not None:
node.parent = self
def get_children(self, kind: Type[TNode]) -> Iterator[TNode]:
for n in self._children:
if isinstance(n, kind):
yield n
if __name__ == "__main__":
n = Node()
| true
| true
|
1c3e750f020ac4a5d73c3160bb8c244aec04969a
| 543
|
py
|
Python
|
08_run_wordcount.py
|
azmikamis/apache-beam-wordcount
|
be21156a8f5c1ca9b50f28cffe608589e8ce5383
|
[
"MIT"
] | null | null | null |
08_run_wordcount.py
|
azmikamis/apache-beam-wordcount
|
be21156a8f5c1ca9b50f28cffe608589e8ce5383
|
[
"MIT"
] | null | null | null |
08_run_wordcount.py
|
azmikamis/apache-beam-wordcount
|
be21156a8f5c1ca9b50f28cffe608589e8ce5383
|
[
"MIT"
] | null | null | null |
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
from datetime import datetime
credentials = GoogleCredentials.get_application_default()
service = build('dataflow', 'v1b3', credentials=credentials)
request = service.projects().templates().launch(
projectId='PROJECT-ID',
gcsPath='gs://BUCKET-NAME/wordcount_template',
body={"jobName": "JOBNAME-USERNAME-" + datetime.strftime(datetime.now(),'%Y%m%d-%H%M%S%z')})
response = request.execute()
print(response)
| 45.25
| 106
| 0.725599
|
from googleapiclient.discovery import build
from oauth2client.client import GoogleCredentials
from datetime import datetime
credentials = GoogleCredentials.get_application_default()
service = build('dataflow', 'v1b3', credentials=credentials)
request = service.projects().templates().launch(
projectId='PROJECT-ID',
gcsPath='gs://BUCKET-NAME/wordcount_template',
body={"jobName": "JOBNAME-USERNAME-" + datetime.strftime(datetime.now(),'%Y%m%d-%H%M%S%z')})
response = request.execute()
print(response)
| true
| true
|
1c3e760e5296e1d4b6cedd5acd92fda6198682cd
| 3,187
|
py
|
Python
|
Assignment02/Part01/Graph_udemy.py
|
saurabhkakade21/AIS_spring2021
|
784d20670794c405505b09c1feea36e0a504ae5d
|
[
"MIT"
] | null | null | null |
Assignment02/Part01/Graph_udemy.py
|
saurabhkakade21/AIS_spring2021
|
784d20670794c405505b09c1feea36e0a504ae5d
|
[
"MIT"
] | null | null | null |
Assignment02/Part01/Graph_udemy.py
|
saurabhkakade21/AIS_spring2021
|
784d20670794c405505b09c1feea36e0a504ae5d
|
[
"MIT"
] | null | null | null |
# Created by Elshad Karimov
# Copyright © 2021 AppMillers. All rights reserved.
class Graph:
def __init__(self, gdict=None):
if gdict is None:
gdict = {}
self.gdict = gdict
def addEdge(self, vertex, edge):
self.gdict[vertex].append(edge)
def bfs(self, vertex):
visited = [vertex]
queue = [vertex]
while queue:
deVertex = queue.pop(0)
node = deVertex[-1]
if node not in visited:
adjacentVertex = vertex[node]
# print(deVertex)
for adjacentVertex in self.gdict[deVertex]:
visited.append(adjacentVertex)
queue.append(adjacentVertex)
def dfs(self, vertex):
visited = [vertex]
stack = [vertex]
while stack:
popVertex = stack.pop()
print(popVertex)
for adjacentVertex in self.gdict[popVertex]:
if adjacentVertex not in visited:
visited.append(adjacentVertex)
stack.append(adjacentVertex)
def loadData():
myData = open("30node.txt", "r").read().split("\n")
mySecondData = list()
myThirdData = list()
for i in range(len(myData)):
mySecondData.append(myData[i].split(","))
for j in range(len(mySecondData)):
myThirdData.append(myData[j].rsplit("'"))
# print(myThirdData[j][1]+" "+myThirdData[j][3])
myCustomDict = {}
secondList = list()
currSel = ''
for i in range(len(myThirdData)):
currSel = myThirdData[i][1]
for j in range(len(myThirdData)):
if(myThirdData[j][1] == currSel):
secondList.append(myThirdData[j][3])
myCustomDict[currSel] = secondList
# secondList = []
# print(myCustomDict)
# thirdList = []
# for i in range(len(myThirdData)):
# currSel = myThirdData[i][1]
# # l = list()
# # # print(myThirdData[i][4].split(", ["))
# # print(''.join(map(str,myThirdData[i][4])))
# # for i in range(len(myThirdData)):
# # l.append(myThirdData[i][4].split(","))
# # for j in range(len(mySecondData)):
# # myThirdData.append(myData[j].rsplit("'"))
# for j in range(len(myThirdData)):
# if(myThirdData[j][3] == currSel):
# thirdList.append(myThirdData[j][1])
# # myCustomDict[currSel].append(thirdList)
# for x in range(len(thirdList)):
# myCustomDict[currSel].append(thirdList[x])
# thirdList = []
# # print(myCustomDict)
# for key in myCustomDict.items():
# print(list(set(myCustomDict[key])))
# # myCustomDict[key] = sorted(list(set(myCustomDict[key])))
graph = myCustomDict
return graph
abc = loadData()
# customDict = { "a" : ["b","c"],
# "b" : ["a", "d", "e"],
# "c" : ["a", "e"],
# "d" : ["b", "e", "f"],
# "e" : ["d", "f", "c"],
# "f" : ["d", "e"]
# }
g = Graph(abc)
g.dfs("N")
| 23.962406
| 72
| 0.503608
|
class Graph:
def __init__(self, gdict=None):
if gdict is None:
gdict = {}
self.gdict = gdict
def addEdge(self, vertex, edge):
self.gdict[vertex].append(edge)
def bfs(self, vertex):
visited = [vertex]
queue = [vertex]
while queue:
deVertex = queue.pop(0)
node = deVertex[-1]
if node not in visited:
adjacentVertex = vertex[node]
for adjacentVertex in self.gdict[deVertex]:
visited.append(adjacentVertex)
queue.append(adjacentVertex)
def dfs(self, vertex):
visited = [vertex]
stack = [vertex]
while stack:
popVertex = stack.pop()
print(popVertex)
for adjacentVertex in self.gdict[popVertex]:
if adjacentVertex not in visited:
visited.append(adjacentVertex)
stack.append(adjacentVertex)
def loadData():
myData = open("30node.txt", "r").read().split("\n")
mySecondData = list()
myThirdData = list()
for i in range(len(myData)):
mySecondData.append(myData[i].split(","))
for j in range(len(mySecondData)):
myThirdData.append(myData[j].rsplit("'"))
# print(myThirdData[j][1]+" "+myThirdData[j][3])
myCustomDict = {}
secondList = list()
currSel = ''
for i in range(len(myThirdData)):
currSel = myThirdData[i][1]
for j in range(len(myThirdData)):
if(myThirdData[j][1] == currSel):
secondList.append(myThirdData[j][3])
myCustomDict[currSel] = secondList
# secondList = []
# print(myCustomDict)
# thirdList = []
# for i in range(len(myThirdData)):
# currSel = myThirdData[i][1]
# # l = list()
# # # print(myThirdData[i][4].split(", ["))
# # print(''.join(map(str,myThirdData[i][4])))
# # for i in range(len(myThirdData)):
# # l.append(myThirdData[i][4].split(","))
# # for j in range(len(mySecondData)):
# # myThirdData.append(myData[j].rsplit("'"))
g = Graph(abc)
g.dfs("N")
| true
| true
|
1c3e767bdefa9d1ae404e50bbf6bc102d64b8573
| 4,263
|
py
|
Python
|
adb-connect.py
|
remylavergne/ADB-Wi-Fi-Connect-GUI
|
de0a167534485a9ad1c172fe7e275f831e707e5f
|
[
"MIT"
] | 5
|
2020-03-21T00:15:13.000Z
|
2021-12-10T07:59:20.000Z
|
adb-connect.py
|
remylavergne/ADB-Wi-Fi-Connect-GUI
|
de0a167534485a9ad1c172fe7e275f831e707e5f
|
[
"MIT"
] | null | null | null |
adb-connect.py
|
remylavergne/ADB-Wi-Fi-Connect-GUI
|
de0a167534485a9ad1c172fe7e275f831e707e5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import subprocess
import sys
import time
from PySide2.QtWidgets import (QLineEdit, QPushButton, QApplication, QDialog, QLabel, QGridLayout)
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle("ADB Wi-Fi Connect 0.2")
# Create widgets
self.edit = QLineEdit("192.168.236.197")
self.edit2 = QLineEdit("5555")
self.button = QPushButton("Connect device")
self.button2 = QPushButton("Disconnect device")
self.label = QLabel("Output:")
self.label2 = QLabel("")
# Create layout and add widgets
grid_layout = QGridLayout()
grid_layout.addWidget(QLabel('Device IP'), 0, 0)
grid_layout.addWidget(self.edit, 1, 0, 1, 1)
grid_layout.addWidget(QLabel('Port'), 0, 1, 1, 1)
grid_layout.addWidget(self.edit2, 1, 1)
# Buttons
grid_layout.addWidget(self.button, 2, 0)
grid_layout.addWidget(self.button2, 2, 1)
# Output // addWidget(*Widget, row, column, rowspan, colspan)
grid_layout.addWidget(self.label, 3, 0)
grid_layout.addWidget(self.label2, 4, 0, 1, 2)
# Set dialog layout
self.setLayout(grid_layout)
# Add button signal to greetings slot
self.button.clicked.connect(self.adb_connect)
self.button2.clicked.connect(self.disconnect)
self.attempts = 0
self.usb_plug_asked = False
def adb_connect(self):
self.label2.setText('')
time.sleep(1)
try:
my_out = subprocess.Popen(f"adb connect {self.edit.text()}:{self.edit2.text()}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = my_out.communicate()
# Keep outputs
output = str(stdout)
# UNUSED output_error = str(stdout)
# Process outputs
self.process_outputs_messages(output)
except subprocess.CalledProcessError as err:
self.label2.setText('General fatal error. Please restart program.')
def process_outputs_messages(self, output):
if 'already' in output:
self.label2.setText('Already connected...')
return
if 'connected' in output:
if self.usb_plug_asked:
self.label2.setText('Connected ! You can unplug the USB cable.')
self.usb_plug_asked = False
else:
self.label2.setText('Connected !')
return
if 'protocol fault' in output:
self.label2.setText('Check if device is turned on, please. And retry.')
if 'failed to connect' in output:
print(f'\tFailed to connected {self.edit.text()}')
self.kill_adb()
self.set_tcpip()
@staticmethod
def kill_adb():
# Kill ADB server
subprocess.Popen(f"adb kill-server",
shell=True)
time.sleep(1)
def set_tcpip(self):
self.attempts += 1
my_out = subprocess.Popen(f"adb tcpip {self.edit2.text()}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = my_out.communicate()
if self.attempts > 2:
self.label2.setText('Plug your phone to your computer via USB, please.\nAnd retry.')
self.attempts = 0
self.usb_plug_asked = True
return
if 'error: no devices/emulators found' in str(stdout):
print('Attemp to reconnect device to adb')
self.adb_connect()
else:
print('Force tcpip reset')
self.set_tcpip()
def disconnect(self):
subprocess.Popen(f"adb disconnect {self.edit.text()}",
shell=True)
self.kill_adb()
self.label2.setText(f'Device {self.edit.text()}:{self.edit2.text()} has been disconnected.')
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Form()
form.show()
sys.exit(app.exec_())
| 34.942623
| 100
| 0.571194
|
import subprocess
import sys
import time
from PySide2.QtWidgets import (QLineEdit, QPushButton, QApplication, QDialog, QLabel, QGridLayout)
class Form(QDialog):
def __init__(self, parent=None):
super(Form, self).__init__(parent)
self.setWindowTitle("ADB Wi-Fi Connect 0.2")
self.edit = QLineEdit("192.168.236.197")
self.edit2 = QLineEdit("5555")
self.button = QPushButton("Connect device")
self.button2 = QPushButton("Disconnect device")
self.label = QLabel("Output:")
self.label2 = QLabel("")
grid_layout = QGridLayout()
grid_layout.addWidget(QLabel('Device IP'), 0, 0)
grid_layout.addWidget(self.edit, 1, 0, 1, 1)
grid_layout.addWidget(QLabel('Port'), 0, 1, 1, 1)
grid_layout.addWidget(self.edit2, 1, 1)
grid_layout.addWidget(self.button, 2, 0)
grid_layout.addWidget(self.button2, 2, 1)
grid_layout.addWidget(self.label, 3, 0)
grid_layout.addWidget(self.label2, 4, 0, 1, 2)
self.setLayout(grid_layout)
self.button.clicked.connect(self.adb_connect)
self.button2.clicked.connect(self.disconnect)
self.attempts = 0
self.usb_plug_asked = False
def adb_connect(self):
self.label2.setText('')
time.sleep(1)
try:
my_out = subprocess.Popen(f"adb connect {self.edit.text()}:{self.edit2.text()}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = my_out.communicate()
output = str(stdout)
self.process_outputs_messages(output)
except subprocess.CalledProcessError as err:
self.label2.setText('General fatal error. Please restart program.')
def process_outputs_messages(self, output):
if 'already' in output:
self.label2.setText('Already connected...')
return
if 'connected' in output:
if self.usb_plug_asked:
self.label2.setText('Connected ! You can unplug the USB cable.')
self.usb_plug_asked = False
else:
self.label2.setText('Connected !')
return
if 'protocol fault' in output:
self.label2.setText('Check if device is turned on, please. And retry.')
if 'failed to connect' in output:
print(f'\tFailed to connected {self.edit.text()}')
self.kill_adb()
self.set_tcpip()
@staticmethod
def kill_adb():
subprocess.Popen(f"adb kill-server",
shell=True)
time.sleep(1)
def set_tcpip(self):
self.attempts += 1
my_out = subprocess.Popen(f"adb tcpip {self.edit2.text()}",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = my_out.communicate()
if self.attempts > 2:
self.label2.setText('Plug your phone to your computer via USB, please.\nAnd retry.')
self.attempts = 0
self.usb_plug_asked = True
return
if 'error: no devices/emulators found' in str(stdout):
print('Attemp to reconnect device to adb')
self.adb_connect()
else:
print('Force tcpip reset')
self.set_tcpip()
def disconnect(self):
subprocess.Popen(f"adb disconnect {self.edit.text()}",
shell=True)
self.kill_adb()
self.label2.setText(f'Device {self.edit.text()}:{self.edit2.text()} has been disconnected.')
if __name__ == '__main__':
app = QApplication(sys.argv)
form = Form()
form.show()
sys.exit(app.exec_())
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.