hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
64cf50d899f6068054dc86195c2c83bd7b97cd7a | 437 | py | Python | bball_intel/bball_intel/wsgi.py | ltiao/basketball-intelligence | c39a2794ca013a1bacd35ec2cbe778977c69834e | [
"MIT"
] | 2 | 2015-11-03T22:58:48.000Z | 2020-11-10T22:29:11.000Z | bball_intel/bball_intel/wsgi.py | ltiao/basketball-intelligence | c39a2794ca013a1bacd35ec2cbe778977c69834e | [
"MIT"
] | null | null | null | bball_intel/bball_intel/wsgi.py | ltiao/basketball-intelligence | c39a2794ca013a1bacd35ec2cbe778977c69834e | [
"MIT"
] | null | null | null | """
WSGI config for bball_intel project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bball_intel.settings.base")
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
application = Cling(get_wsgi_application())
| 27.3125 | 78 | 0.796339 |
328b4849bc82c442f507edd85da54db85153e25c | 5,095 | py | Python | web/app/lib/ee/serializer.py | geary/claslite | 83c49cf98eceae633034b82d35ed7991d943b857 | [
"Unlicense"
] | null | null | null | web/app/lib/ee/serializer.py | geary/claslite | 83c49cf98eceae633034b82d35ed7991d943b857 | [
"Unlicense"
] | null | null | null | web/app/lib/ee/serializer.py | geary/claslite | 83c49cf98eceae633034b82d35ed7991d943b857 | [
"Unlicense"
] | null | null | null | """A serializer that encodes EE object trees as JSON DAGs."""
# Using lowercase function naming to match the JavaScript names.
# pylint: disable=g-bad-name
import datetime
import json
import math
import md5
import numbers
import ee_exception
import encodable
# The datetime for the beginning of the Unix epoch.
_EPOCH_DATETIME = datetime.datetime.utcfromtimestamp(0)
class Serializer(object):
"""A serializer for EE object trees."""
def __init__(self, is_compound=True):
"""Constructs a serializer.
Args:
is_compound: Whether the encoding should factor out shared subtrees.
"""
# Whether the encoding should factor out shared subtrees.
self._is_compound = bool(is_compound)
# A list of shared subtrees as [name, value] pairs.
self._scope = []
# A lookup table from object hash to subtree names as stored in self._scope
self._encoded = {}
# A lookup table from object ID as retrieved by id() to md5 hash values.
self._hashcache = {}
def _encode(self, obj):
"""Encodes a top level object in the EE API v2 (DAG) format.
Args:
obj: The object to encode.
Returns:
An encoded object ready for JSON serialization.
"""
value = self._encodeValue(obj)
if self._is_compound:
if (isinstance(value, dict) and
value['type'] == 'ValueRef' and
len(self._scope) == 1):
# Just one value. No need for complex structure.
value = self._scope[0][1]
else:
# Wrap the scopes and final value with a CompoundValue.
value = {
'type': 'CompoundValue',
'scope': self._scope,
'value': value
}
# Clear state in case of future encoding.
self._scope = []
self._encoded = {}
self._hashcache = {}
return value
def _encodeValue(self, obj):
"""Encodes a subtree as a Value in the EE API v2 (DAG) format.
If _is_compound is True, this will fill the _scope and _encoded properties.
Args:
obj: The object to encode.
Returns:
An encoded object.
"""
obj_id = id(obj)
hashval = self._hashcache.get(obj_id)
encoded = self._encoded.get(hashval, None)
if self._is_compound and encoded:
# Already encoded objects are encoded as ValueRefs and returned directly.
return {
'type': 'ValueRef',
'value': encoded
}
elif obj is None or isinstance(obj, (bool, numbers.Number, basestring)):
# Primitives are encoded as is and not saved in the scope.
return obj
elif isinstance(obj, datetime.datetime):
# Dates are encoded as typed UTC microseconds since the Unix epoch.
# They are returned directly and not saved in the scope either.
td = (obj - _EPOCH_DATETIME)
microseconds = td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6
return {
'type': 'Date',
'value': math.floor(microseconds)
}
elif isinstance(obj, encodable.Encodable):
# Some objects know how to encode themselves.
result = obj.encode(self._encodeValue)
if not isinstance(result, dict) or result['type'] == 'ArgumentRef':
# Optimization: simple enough that adding it to the scope is probably
# not worth it.
return result
elif isinstance(obj, (list, tuple)):
# Lists are encoded recursively.
result = [self._encodeValue(i) for i in obj]
elif isinstance(obj, dict):
# Dictionary are encoded recursively and wrapped in a type specifier.
result = {
'type': 'Dictionary',
'value': dict([(key, self._encodeValue(value))
for key, value in obj.iteritems()])
}
else:
raise ee_exception.EEException('Can\'t encode object: %s' % obj)
if self._is_compound:
# Save the new object and return a ValueRef.
hashval = md5.new(json.dumps(result)).digest()
self._hashcache[obj_id] = hashval
name = self._encoded.get(hashval, None)
if not name:
name = str(len(self._scope))
self._scope.append((name, result))
self._encoded[hashval] = name
return {
'type': 'ValueRef',
'value': name
}
else:
return result
def encode(obj):
"""Serialize an object to a JSON-compatible structure for API calls.
Args:
obj: The object to serialize.
Returns:
A JSON-compatible structure representing the input.
"""
serializer = Serializer(True)
return serializer._encode(obj) # pylint: disable=protected-access
def toJSON(obj, opt_pretty=False):
"""Serialize an object to a JSON string appropriate for API calls.
Args:
obj: The object to serialize.
opt_pretty: True to pretty-print the object.
Returns:
A JSON string representing the input.
"""
serializer = Serializer(not opt_pretty)
encoded = serializer._encode(obj) # pylint: disable=protected-access
return json.dumps(encoded, indent=2 if opt_pretty else None)
def toReadableJSON(obj):
"""Convert an object to readable JSON."""
return toJSON(obj, True)
| 29.450867 | 79 | 0.646712 |
4957600b4049ff59dc9833fce02beaa0c28a287b | 687 | py | Python | pyomni/webdav/__init__.py | taxpon/pyomni | 74e256b6fc9cf0f4bcfc575bb8ed702f573a967f | [
"MIT"
] | 12 | 2015-12-20T13:13:22.000Z | 2022-01-29T05:32:35.000Z | pyomni/webdav/__init__.py | taxpon/pyomni | 74e256b6fc9cf0f4bcfc575bb8ed702f573a967f | [
"MIT"
] | 1 | 2020-09-15T15:59:07.000Z | 2020-09-15T15:59:07.000Z | pyomni/webdav/__init__.py | taxpon/pyomni | 74e256b6fc9cf0f4bcfc575bb8ed702f573a967f | [
"MIT"
] | 2 | 2016-04-20T15:59:57.000Z | 2019-06-16T12:42:10.000Z | # Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
__version__ = "$LastChangedRevision$"
| 40.411765 | 75 | 0.754003 |
db17620aec64a26fc807d96571480b50551fa2f8 | 5,212 | py | Python | itr2-112a.py | gakudva/itr2 | 3352a36a1ca53fb15b1589423313a3e829e38c73 | [
"MIT"
] | null | null | null | itr2-112a.py | gakudva/itr2 | 3352a36a1ca53fb15b1589423313a3e829e38c73 | [
"MIT"
] | null | null | null | itr2-112a.py | gakudva/itr2 | 3352a36a1ca53fb15b1589423313a3e829e38c73 | [
"MIT"
] | null | null | null | import pytest
from selenium import webdriver
from selenium.webdriver.support.select import Select
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.select import Select
from time import sleep
import sys
import csv
def test_automate_112a():
chrome_driver = webdriver.Chrome()
chrome_driver.get('https://eportal.incometax.gov.in/iec/foservices/#/login')
sleep(15)
#Enter User ID
chrome_driver.find_element_by_id("panAdhaarUserId").click()
chrome_driver.find_element_by_id("panAdhaarUserId").send_keys("USERNAME")
chrome_driver.find_element_by_id("panAdhaarUserId").send_keys(Keys.ENTER)
sleep(5)
#select the Checkbox "Please confirm your secure access message"
chrome_driver.find_element_by_css_selector(".mat-checkbox-inner-container").click()
#select Password field, and wait for user input to be supplied by 15 seconds and then press enter
chrome_driver.find_element_by_id("loginPasswordField").click()
sleep(15)
chrome_driver.find_element_by_id("loginPasswordField").send_keys("PASSWORD")
chrome_driver.find_element_by_id("loginPasswordField").send_keys(Keys.ENTER)
sleep(5)
#Click on generate OTP, press Continue
chrome_driver.find_element_by_css_selector("#mat-radio-3 .mat-radio-label-content").click()
chrome_driver.find_element_by_css_selector(".large-button-primary > .marRight4").click()
#agree to use Aadhar, and click on requesting OTP
chrome_driver.find_element_by_css_selector(".mat-checkbox-inner-container").click()
chrome_driver.find_element_by_css_selector(".large-button-primary").click()
#insert OTP -> manual input
sleep(25)
chrome_driver.find_element_by_css_selector(".large-button-primary").click()
sleep(10)
#click on "File Now"
chrome_driver.find_element_by_css_selector(".defaultButton").click()
sleep(5)
#Select Assessment year as 2021-2022
chrome_driver.find_element_by_css_selector("#filterStyleForChip\\ myPanelClassItr .mat-select-value").click()
chrome_driver.find_element_by_xpath("//span[contains(text(),'(Current A.Y.)')]").click()
#click on "Select mode of filing as online"
chrome_driver.find_element_by_css_selector("#mat-radio-5 .A-Gross-Total-Income").click()
#click on Continue
chrome_driver.find_element_by_css_selector(".large-button-primary > span:nth-child(1)").click()
sleep(3)
#click on Resume Filing
chrome_driver.find_element_by_css_selector(".col-md-3:nth-child(4) .primaryButton").click()
sleep(3)
#click on Continue
chrome_driver.find_element_by_css_selector("#uniservenxtcmp_button_152 > span").click()
sleep(3)
#click on Continue on Note pop-up
chrome_driver.find_element_by_id("uniservenxtcmp_button_373").click()
#click "Skip the questions" on proceed to scheduled questions
chrome_driver.find_element_by_id("uniservenxtcmp_hyper_12").click()
sleep(3)
#click OK on pop-up box
chrome_driver.find_element_by_id("uniservenxtcmp_button_246").click()
#click on 112A
chrome_driver.find_element_by_css_selector("#uniservenxtcmp_list_111 .bluebrbg").click()
sleep(3)
#open CSV vile
with open('112a.csv', mode='r')as file:
csvFile = csv.reader(file)
for lines in csvFile:
#click +Add Another button
chrome_driver.find_element_by_id("uniservenxtcmp_button_23").click()
sleep(3)
#Add a transaction
chrome_driver.find_element_by_id("select2-uniservenxtcmp_dropdown_41-container").click()
chrome_driver.find_element_by_xpath("//body[1]/span[1]/span[1]/span[2]/ul[1]/li[2]").click()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_10").clear()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_10").send_keys(lines[1])
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_11").clear()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_11").send_keys(lines[2])
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_12").clear()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_12").send_keys(lines[3])
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_13").clear()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_13").send_keys(lines[4])
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_14").clear()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_14").send_keys(lines[5])
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_15").clear()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_15").send_keys(lines[6])
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_17").clear()
chrome_driver.find_element_by_id("uniservenxtcmp_textbox_17").send_keys(lines[7])
chrome_driver.find_element_by_id("uniservenxtcmp_button_19").click()
sleep(3)
#end loop
sys.stderr.write("Success")
#wait for manual save, confirm and logoff
sleep(250)
chrome_driver.close()
| 44.931034 | 113 | 0.739447 |
7eaca17c6ffbffcabf59b7ac489438a11b7d5c70 | 8,788 | py | Python | matflow_neper/utils.py | LightForm-group/matflow-neper | 58bf27ae956d9d95ad6c475c12be4ada7ea64177 | [
"MIT"
] | null | null | null | matflow_neper/utils.py | LightForm-group/matflow-neper | 58bf27ae956d9d95ad6c475c12be4ada7ea64177 | [
"MIT"
] | null | null | null | matflow_neper/utils.py | LightForm-group/matflow-neper | 58bf27ae956d9d95ad6c475c12be4ada7ea64177 | [
"MIT"
] | 1 | 2022-02-07T08:44:26.000Z | 2022-02-07T08:44:26.000Z | import re
import copy
from pathlib import Path
import numpy as np
from matflow_neper.from_damask_parse import validate_volume_element
def read_neper_tess_block(path, block_name):
"""Read a block from a Neper-compatible .tess or .tesr tessellation file.
Parameters
----------
path : Path or str
Path to the .tesr or .tess file.
Returns
-------
block_str : str
Characters from the input file associated with a given Neper data block.
References
----------
[1] Neper Reference Manual, Romain Quey, 24 November 2020
"""
with Path(path).open() as handle:
file_contents = handle.read()
# Return empty string if block does not exist:
if f'*{block_name}' not in file_contents:
return ''
# Otherwise, get all lines in the block:
pattern = r'(?:\*{}\s)([\s\S]+?)(?:\*)'.format(block_name)
match_group = re.search(pattern, file_contents).groups()[0]
block_lines = match_group.strip().split('\n')
return block_lines
def read_neper_tesr(tesr_path):
"""Read a Neper-compatible raster tessellation file (.tesr).
Parameters
----------
tesr_path : Path or str
Path to the .tesr file.
data_starting_index : int, 0 or 1, optional
Should the returned data array (if included in the .tesr file) be zero- or one-indexed. By default, 0.
Returns
-------
tesr_data : dict
Dict representing the raster tessellation.
References
----------
[1] Neper Reference Manual, Romain Quey, 24 November 2020
"""
# Parse mandatory parameters:
tesr_format_raw = read_neper_tess_block(tesr_path, 'format')[0].strip().split()
tesr_general_raw = read_neper_tess_block(tesr_path, 'general')
tesr_format = {
'format': float(tesr_format_raw[0]),
'data_format': tesr_format_raw[1],
}
tesr_general = {
'dimension': int(tesr_general_raw[0]),
'size': [int(i) for i in tesr_general_raw[1].strip().split()],
'voxel_size': [float(i) for i in tesr_general_raw[2].strip().split()],
}
tesr_data = {
'format': tesr_format,
'general': tesr_general,
}
# Parse optional parameters:
OPTIONAL_BLOCK_NAMES = [
'origin',
'cell',
'id',
'seed',
'ori',
'oridist',
'coo',
'vol',
'convexity',
'crysym',
'data',
'oridata',
]
for opt_name in OPTIONAL_BLOCK_NAMES:
block_lines = read_neper_tess_block(tesr_path, opt_name)
if not block_lines:
continue
if opt_name == 'data':
# New lines have no significance:
block = ' '.join(block_lines)
block = np.array([int(i) for i in block.split()])
block = np.swapaxes(
block.reshape(tesr_data['general']['size'][::-1]),
0,
2,
)
elif opt_name == 'ori':
# First line is descriptor:
ori_descriptor = block_lines[0]
oris = np.array([[float(j) for j in i.split()] for i in block_lines[1:]])
block = {
'orientations': oris,
'descriptor': ori_descriptor,
}
elif opt_name == 'cell':
# Note if we `-transform addbuffer`, number of cells does not increment
opt_name = 'number_of_cells'
block = int(block_lines[0])
else:
# TODO: add custom parsing for other block names here
block = block_lines
tesr_data.update({opt_name: block})
return tesr_data
def tesr_to_volume_element(tesr_path, phase_labels, homog_label, orientations=None):
"""Read a Neper-compatible raster tessellation file (.tesr) and parse it to a
volume element.
Parameters
----------
tesr_path : Path or str
Path to the .tesr file.
phase_labels : list or ndarray of str, optional
List of phase labels to associate with the constituents. The first list element is
the phase label that will be associated with all of the geometrical elements
for which an orientation is also specified. Additional list elements are
phase labels for geometrical elements for which no orientations are
specified. If the `-transform addbuffer()` option has been used in the creation
of the .tesr file, additional voxels will be added with index 0 (i.e. void voxels).
These voxels are those with which an additional phase in `phase_labels` will be
associated.
homog_label : str, optional
The homogenization scheme label to use for all materials in the volume element.
orientations : list or ndarray of shape (R, 3) of float
Euler angles to optionally use instead of those from the .tesr file.
Returns
-------
volume_element : dict
"""
tesr_dat = read_neper_tesr(tesr_path)
if orientations is not None:
euler_angles = orientations
else:
if 'euler-bunge' not in tesr_dat['ori']['descriptor']:
raise NotImplementedError('Requires euler-bunge Euler angles.')
euler_angles = tesr_dat['ori']['orientations']
elem_mat_idx = tesr_dat['data']
# Set void voxels (0) to end:
elem_mat_idx[elem_mat_idx == 0] = np.max(elem_mat_idx) + 1
# Zero-index instead of one-index:
elem_mat_idx -= 1
volume_element = {
'orientations': {
'type': 'euler',
'euler_angles': euler_angles,
'unit_cell_alignment': {'y': 'b'},
},
'element_material_idx': elem_mat_idx,
'grid_size': tesr_dat['general']['size'],
'size': [1, 1, 1],
'phase_labels': phase_labels,
'homog_label': homog_label,
}
return validate_volume_element(volume_element)
def write_tesr(volume_element, tesr_path):
"""Write a Neper-compatbile .tesr file from a volume element representation.
Parameters
----------
volume_element : dict
Dict that represents the specification of a volume element, with keys:
element_material_idx : ndarray of shape equal to `grid_size` of int, optional
Determines the material to which each geometric model element belongs,
where P is the number of elements.
grid_size : ndarray of shape (3,) of int, optional
Geometric model grid dimensions.
size : list of length three, optional
Volume element size. By default set to unit size: [1.0, 1.0, 1.0].
origin : list of length three, optional
Volume element origin. By default: [0, 0, 0].
tesr_path : str or Path
The path to the file that will be generated.
Returns
-------
tesr_path : Path
The path to the generated file.
"""
volume_element = validate_volume_element(volume_element)
element_material_idx = volume_element['element_material_idx']
dimension = 3
ve_origin = volume_element.get('origin') or [0.0, 0.0, 0.0]
grid_size = volume_element['grid_size']
vox_size = [1 / i for i in grid_size]
num_micros = np.max(element_material_idx) + 1 # element_material_idx is zero-indexed
cell_idx_one_indexed = ' '.join([f'{i}' for i in range(1, num_micros + 1)])
ori_descriptor = 'quaternion'
oris_lines = [''.join([f'{j:>17.12f}' for j in i])
for i in volume_element['orientations']['quaternions']]
data_flat = np.swapaxes(element_material_idx, 0, 2).reshape(-1)
vox_id_per_line = 20
num_lines = int(np.ceil(data_flat.size / vox_id_per_line))
data_lines = []
for line_index in range(num_lines):
start_index = line_index * vox_id_per_line
end_index = (line_index + 1) * vox_id_per_line
sub_data = data_flat[start_index:end_index]
data_lines.append(' '.join([f'{i}' for i in sub_data]))
lines = [
f'***tesr',
f' **format',
f' 2.0 ascii',
f' **general',
f' {dimension}',
f' {grid_size[0]} {grid_size[1]} {grid_size[2]}',
f' {vox_size[0]:.12f} {vox_size[1]:.12f} {vox_size[2]:.12f}',
f' *origin',
f' {ve_origin[0]:.12f} {ve_origin[1]:.12f} {ve_origin[2]:.12f}',
f' *hasvoid 0',
f' **cell', # number of cells (i.e. number of grains)
f' {num_micros}',
f' *id',
f' {cell_idx_one_indexed}',
f' *ori',
f' {ori_descriptor}'
] + oris_lines + [
f' **data',
] + data_lines + [
f'***end',
]
tesr_path = Path(tesr_path)
with tesr_path.open('w') as handle:
handle.write('\n'.join(lines))
return tesr_path
| 32.072993 | 110 | 0.601274 |
5d432dcc7d51aedadee2498a4965081c2e543c3e | 2,343 | py | Python | tests/test_character.py | HoldenB/grunt-db | 4b8d43ee431e4788f8284449e4ea427b3c31f380 | [
"MIT"
] | null | null | null | tests/test_character.py | HoldenB/grunt-db | 4b8d43ee431e4788f8284449e4ea427b3c31f380 | [
"MIT"
] | null | null | null | tests/test_character.py | HoldenB/grunt-db | 4b8d43ee431e4788f8284449e4ea427b3c31f380 | [
"MIT"
] | null | null | null | import pytest
from grunt.db import get_db
def test_character_index(client, auth):
response = client.get('/')
assert b"Log In" in response.data
assert b"Register" in response.data
auth.login()
response = client.get('/')
assert b'Log Out' in response.data
assert b'test title' in response.data
assert b'by test on 2018-01-01' in response.data
assert b'test\nbody' in response.data
assert b'href="/1/update"' in response.data
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
'/1/delete',
))
def test_login_required(client, path):
response = client.post(path)
assert response.headers['Location'] == 'http://localhost/auth/login'
def test_author_required(app, client, auth):
# change the creator to another user
with app.app_context():
db = get_db()
db.execute('UPDATE character SET user_id = 2 WHERE id = 1')
db.commit()
auth.login()
# current user can't modify other user's character
assert client.post('/1/update').status_code == 403
assert client.post('/1/delete').status_code == 403
# current user doesn't see edit link
assert b'href="/1/update"' not in client.get('/').data
@pytest.mark.parametrize('path', (
'/2/update',
'/2/delete',
))
def test_exists_required(client, auth, path):
auth.login()
assert client.post(path).status_code == 404
def test_create(client, auth, app):
auth.login()
assert client.get('/create').status_code == 200
client.post('/create', data={'character_name': 'created'})
with app.app_context():
db = get_db()
count = db.execute('SELECT COUNT(id) FROM character').fetchone()[0]
assert count == 2
def test_update(client, auth, app):
auth.login()
assert client.get('/1/update').status_code == 200
client.post('/1/update', data={'character_name': 'updated'})
with app.app_context():
db = get_db()
character = db.execute('SELECT * FROM character WHERE id = 1').fetchone()
assert character['character_name'] == 'updated'
@pytest.mark.parametrize('path', (
'/create',
'/1/update',
))
def test_create_update_validate(client, auth, path):
auth.login()
response = client.post(path, data={'character_name': ''})
assert b'Character name is required.' in response.data
| 28.228916 | 81 | 0.650021 |
3313416662782580871e59e12e907eaa2d027134 | 63 | py | Python | semantic_release/__main__.py | Agilicus/python-semantic-release | 74ac2b041aeae98310c4e5f60c1783bd84ed12b4 | [
"MIT"
] | 445 | 2015-07-27T17:48:25.000Z | 2022-03-31T15:48:10.000Z | semantic_release/__main__.py | Agilicus/python-semantic-release | 74ac2b041aeae98310c4e5f60c1783bd84ed12b4 | [
"MIT"
] | 338 | 2015-07-27T18:44:52.000Z | 2022-03-31T11:38:53.000Z | semantic_release/__main__.py | Agilicus/python-semantic-release | 74ac2b041aeae98310c4e5f60c1783bd84ed12b4 | [
"MIT"
] | 168 | 2015-07-28T20:32:52.000Z | 2022-03-31T10:45:06.000Z | from .cli import entry
if __name__ == "__main__":
entry()
| 12.6 | 26 | 0.650794 |
92320ddd007a00f52d51f35a1eef58b0d51b2268 | 25,247 | py | Python | main_gui.py | muhammadalics/Image-Manager-and-Editor | 41eee91a44926cf35aec350fae0a0b41f674d10b | [
"MIT"
] | 2 | 2020-12-21T09:46:45.000Z | 2021-04-26T03:50:51.000Z | main_gui.py | muhammadalics/Image-Manager-and-Editor | 41eee91a44926cf35aec350fae0a0b41f674d10b | [
"MIT"
] | null | null | null | main_gui.py | muhammadalics/Image-Manager-and-Editor | 41eee91a44926cf35aec350fae0a0b41f674d10b | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_image_manager.ui'
#
# Created by: PyQt5 UI code generator 5.15.1
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1317, 764)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(":/icon_/cam.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
MainWindow.setWindowIcon(icon)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_imagebox = QtWidgets.QLabel(self.centralwidget)
self.label_imagebox.setText("")
self.label_imagebox.setAlignment(QtCore.Qt.AlignCenter)
self.label_imagebox.setObjectName("label_imagebox")
self.horizontalLayout.addWidget(self.label_imagebox)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1317, 26))
self.menubar.setObjectName("menubar")
self.menuFile = QtWidgets.QMenu(self.menubar)
self.menuFile.setObjectName("menuFile")
self.menuSize_and_Orientation = QtWidgets.QMenu(self.menubar)
self.menuSize_and_Orientation.setObjectName("menuSize_and_Orientation")
self.menuAdd_Noise = QtWidgets.QMenu(self.menubar)
self.menuAdd_Noise.setObjectName("menuAdd_Noise")
self.menuBlur = QtWidgets.QMenu(self.menubar)
self.menuBlur.setObjectName("menuBlur")
self.menuSpecial = QtWidgets.QMenu(self.menubar)
self.menuSpecial.setObjectName("menuSpecial")
self.menuColor_2 = QtWidgets.QMenu(self.menubar)
self.menuColor_2.setObjectName("menuColor_2")
self.menuSwap_color_channels = QtWidgets.QMenu(self.menuColor_2)
self.menuSwap_color_channels.setObjectName("menuSwap_color_channels")
self.menuBlending = QtWidgets.QMenu(self.menubar)
self.menuBlending.setObjectName("menuBlending")
self.menuEdit = QtWidgets.QMenu(self.menubar)
self.menuEdit.setObjectName("menuEdit")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.toolBar = QtWidgets.QToolBar(MainWindow)
self.toolBar.setIconSize(QtCore.QSize(25, 25))
self.toolBar.setObjectName("toolBar")
MainWindow.addToolBar(QtCore.Qt.TopToolBarArea, self.toolBar)
self.actionNew = QtWidgets.QAction(MainWindow)
self.actionNew.setObjectName("actionNew")
self.actionOpen = QtWidgets.QAction(MainWindow)
icon1 = QtGui.QIcon()
icon1.addPixmap(QtGui.QPixmap(":/icon_/add.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionOpen.setIcon(icon1)
self.actionOpen.setObjectName("actionOpen")
self.actionSave = QtWidgets.QAction(MainWindow)
icon2 = QtGui.QIcon()
icon2.addPixmap(QtGui.QPixmap(":/icon_/save.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionSave.setIcon(icon2)
self.actionSave.setObjectName("actionSave")
self.actionExit = QtWidgets.QAction(MainWindow)
self.actionExit.setObjectName("actionExit")
self.actionResize = QtWidgets.QAction(MainWindow)
icon3 = QtGui.QIcon()
icon3.addPixmap(QtGui.QPixmap(":/icon_/resize.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionResize.setIcon(icon3)
self.actionResize.setObjectName("actionResize")
self.actionMirror = QtWidgets.QAction(MainWindow)
icon4 = QtGui.QIcon()
icon4.addPixmap(QtGui.QPixmap(":/icon_/mirror.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionMirror.setIcon(icon4)
self.actionMirror.setObjectName("actionMirror")
self.actionFlip_Upside_Down = QtWidgets.QAction(MainWindow)
icon5 = QtGui.QIcon()
icon5.addPixmap(QtGui.QPixmap(":/icon_/flip.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionFlip_Upside_Down.setIcon(icon5)
self.actionFlip_Upside_Down.setObjectName("actionFlip_Upside_Down")
self.actionRotate = QtWidgets.QAction(MainWindow)
icon6 = QtGui.QIcon()
icon6.addPixmap(QtGui.QPixmap(":/icon_/rotate.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionRotate.setIcon(icon6)
self.actionRotate.setObjectName("actionRotate")
self.actionGaussian = QtWidgets.QAction(MainWindow)
icon7 = QtGui.QIcon()
icon7.addPixmap(QtGui.QPixmap(":/icon_/gaussian noise.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionGaussian.setIcon(icon7)
self.actionGaussian.setObjectName("actionGaussian")
self.actionHorizontal_Bands = QtWidgets.QAction(MainWindow)
icon8 = QtGui.QIcon()
icon8.addPixmap(QtGui.QPixmap(":/icon_/horizontalnoise.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionHorizontal_Bands.setIcon(icon8)
self.actionHorizontal_Bands.setObjectName("actionHorizontal_Bands")
self.actionVertical_Bands = QtWidgets.QAction(MainWindow)
icon9 = QtGui.QIcon()
icon9.addPixmap(QtGui.QPixmap(":/icon_/verticalnoise.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionVertical_Bands.setIcon(icon9)
self.actionVertical_Bands.setObjectName("actionVertical_Bands")
self.actionSalt_and_Pepper = QtWidgets.QAction(MainWindow)
icon10 = QtGui.QIcon()
icon10.addPixmap(QtGui.QPixmap(":/icon_/salt-n-pepper.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionSalt_and_Pepper.setIcon(icon10)
self.actionSalt_and_Pepper.setObjectName("actionSalt_and_Pepper")
self.actionGaussian_2 = QtWidgets.QAction(MainWindow)
icon11 = QtGui.QIcon()
icon11.addPixmap(QtGui.QPixmap(":/icon_/blur-gaussian.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionGaussian_2.setIcon(icon11)
self.actionGaussian_2.setObjectName("actionGaussian_2")
self.actionMedian = QtWidgets.QAction(MainWindow)
icon12 = QtGui.QIcon()
icon12.addPixmap(QtGui.QPixmap(":/icon_/blur-median.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionMedian.setIcon(icon12)
self.actionMedian.setObjectName("actionMedian")
self.actionAverage = QtWidgets.QAction(MainWindow)
icon13 = QtGui.QIcon()
icon13.addPixmap(QtGui.QPixmap(":/icon_/blur-average.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionAverage.setIcon(icon13)
self.actionAverage.setObjectName("actionAverage")
self.actionPixelate = QtWidgets.QAction(MainWindow)
icon14 = QtGui.QIcon()
icon14.addPixmap(QtGui.QPixmap(":/icon_/pixelate.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionPixelate.setIcon(icon14)
self.actionPixelate.setObjectName("actionPixelate")
self.actionAdd_Border = QtWidgets.QAction(MainWindow)
icon15 = QtGui.QIcon()
icon15.addPixmap(QtGui.QPixmap(":/icon_/border.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionAdd_Border.setIcon(icon15)
self.actionAdd_Border.setObjectName("actionAdd_Border")
self.actionBlack_and_White = QtWidgets.QAction(MainWindow)
icon16 = QtGui.QIcon()
icon16.addPixmap(QtGui.QPixmap(":/icon_/bw.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionBlack_and_White.setIcon(icon16)
self.actionBlack_and_White.setObjectName("actionBlack_and_White")
self.actionExtract_Color = QtWidgets.QAction(MainWindow)
icon17 = QtGui.QIcon()
icon17.addPixmap(QtGui.QPixmap(":/icon_/extract_color.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionExtract_Color.setIcon(icon17)
self.actionExtract_Color.setObjectName("actionExtract_Color")
self.actionBrightness_Increase = QtWidgets.QAction(MainWindow)
icon18 = QtGui.QIcon()
icon18.addPixmap(QtGui.QPixmap(":/icon_/brightness_up.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionBrightness_Increase.setIcon(icon18)
self.actionBrightness_Increase.setObjectName("actionBrightness_Increase")
self.actionBrightness_Decrease = QtWidgets.QAction(MainWindow)
icon19 = QtGui.QIcon()
icon19.addPixmap(QtGui.QPixmap(":/icon_/brightness_down.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionBrightness_Decrease.setIcon(icon19)
self.actionBrightness_Decrease.setObjectName("actionBrightness_Decrease")
self.actionContrast_Increase = QtWidgets.QAction(MainWindow)
icon20 = QtGui.QIcon()
icon20.addPixmap(QtGui.QPixmap(":/icon_/contrast-up.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionContrast_Increase.setIcon(icon20)
self.actionContrast_Increase.setObjectName("actionContrast_Increase")
self.actionContrast_Decrease = QtWidgets.QAction(MainWindow)
icon21 = QtGui.QIcon()
icon21.addPixmap(QtGui.QPixmap(":/icon_/contrast-down.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionContrast_Decrease.setIcon(icon21)
self.actionContrast_Decrease.setObjectName("actionContrast_Decrease")
self.actionSwap_Color = QtWidgets.QAction(MainWindow)
self.actionSwap_Color.setObjectName("actionSwap_Color")
self.actionReplace_Color = QtWidgets.QAction(MainWindow)
icon22 = QtGui.QIcon()
icon22.addPixmap(QtGui.QPixmap(":/icon_/replace_color.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionReplace_Color.setIcon(icon22)
self.actionReplace_Color.setObjectName("actionReplace_Color")
self.actionRed_Green = QtWidgets.QAction(MainWindow)
icon23 = QtGui.QIcon()
icon23.addPixmap(QtGui.QPixmap(":/icon_/swap-red-green.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionRed_Green.setIcon(icon23)
self.actionRed_Green.setObjectName("actionRed_Green")
self.actionGreen_Blue = QtWidgets.QAction(MainWindow)
icon24 = QtGui.QIcon()
icon24.addPixmap(QtGui.QPixmap(":/icon_/swap-green-blue.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionGreen_Blue.setIcon(icon24)
self.actionGreen_Blue.setObjectName("actionGreen_Blue")
self.actionBlue_Red = QtWidgets.QAction(MainWindow)
icon25 = QtGui.QIcon()
icon25.addPixmap(QtGui.QPixmap(":/icon_/swap-blue-red.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionBlue_Red.setIcon(icon25)
self.actionBlue_Red.setObjectName("actionBlue_Red")
self.actionImage_Histogram = QtWidgets.QAction(MainWindow)
icon26 = QtGui.QIcon()
icon26.addPixmap(QtGui.QPixmap(":/icon_/hist.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionImage_Histogram.setIcon(icon26)
self.actionImage_Histogram.setObjectName("actionImage_Histogram")
self.actionIntensity_Map = QtWidgets.QAction(MainWindow)
icon27 = QtGui.QIcon()
icon27.addPixmap(QtGui.QPixmap(":/icon_/heatmap.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionIntensity_Map.setIcon(icon27)
self.actionIntensity_Map.setObjectName("actionIntensity_Map")
self.actionHistogram_Equalization = QtWidgets.QAction(MainWindow)
icon28 = QtGui.QIcon()
icon28.addPixmap(QtGui.QPixmap(":/icon_/histequal.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionHistogram_Equalization.setIcon(icon28)
self.actionHistogram_Equalization.setObjectName("actionHistogram_Equalization")
self.actionCartoonify = QtWidgets.QAction(MainWindow)
icon29 = QtGui.QIcon()
icon29.addPixmap(QtGui.QPixmap(":/icon_/cartoonify.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionCartoonify.setIcon(icon29)
self.actionCartoonify.setObjectName("actionCartoonify")
self.actionAlpha = QtWidgets.QAction(MainWindow)
icon30 = QtGui.QIcon()
icon30.addPixmap(QtGui.QPixmap(":/icon_/alpha_blending.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionAlpha.setIcon(icon30)
self.actionAlpha.setObjectName("actionAlpha")
self.actionPyramid = QtWidgets.QAction(MainWindow)
icon31 = QtGui.QIcon()
icon31.addPixmap(QtGui.QPixmap(":/icon_/pyramid blending.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionPyramid.setIcon(icon31)
self.actionPyramid.setObjectName("actionPyramid")
self.actionDithering = QtWidgets.QAction(MainWindow)
icon32 = QtGui.QIcon()
icon32.addPixmap(QtGui.QPixmap(":/icon_/dithered.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionDithering.setIcon(icon32)
self.actionDithering.setObjectName("actionDithering")
self.actionContrast_Stretching = QtWidgets.QAction(MainWindow)
icon33 = QtGui.QIcon()
icon33.addPixmap(QtGui.QPixmap(":/icon_/stretch.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionContrast_Stretching.setIcon(icon33)
self.actionContrast_Stretching.setObjectName("actionContrast_Stretching")
self.actionGamma_Correction = QtWidgets.QAction(MainWindow)
icon34 = QtGui.QIcon()
icon34.addPixmap(QtGui.QPixmap(":/icon_/gamma_correction.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionGamma_Correction.setIcon(icon34)
self.actionGamma_Correction.setObjectName("actionGamma_Correction")
self.actionNegative = QtWidgets.QAction(MainWindow)
icon35 = QtGui.QIcon()
icon35.addPixmap(QtGui.QPixmap(":/icon_/neg.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionNegative.setIcon(icon35)
self.actionNegative.setObjectName("actionNegative")
self.actionEdge_Detection = QtWidgets.QAction(MainWindow)
icon36 = QtGui.QIcon()
icon36.addPixmap(QtGui.QPixmap(":/icon_/canny.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionEdge_Detection.setIcon(icon36)
self.actionEdge_Detection.setObjectName("actionEdge_Detection")
self.actionApply_Mask = QtWidgets.QAction(MainWindow)
icon37 = QtGui.QIcon()
icon37.addPixmap(QtGui.QPixmap(":/icon_/mask_button.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionApply_Mask.setIcon(icon37)
self.actionApply_Mask.setObjectName("actionApply_Mask")
self.actionUndo = QtWidgets.QAction(MainWindow)
icon38 = QtGui.QIcon()
icon38.addPixmap(QtGui.QPixmap(":/icon_/undo.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionUndo.setIcon(icon38)
self.actionUndo.setObjectName("actionUndo")
self.actionRedo = QtWidgets.QAction(MainWindow)
icon39 = QtGui.QIcon()
icon39.addPixmap(QtGui.QPixmap(":/icon_/redo.png"), QtGui.QIcon.Normal, QtGui.QIcon.On)
self.actionRedo.setIcon(icon39)
self.actionRedo.setObjectName("actionRedo")
self.menuFile.addAction(self.actionOpen)
self.menuFile.addAction(self.actionSave)
self.menuFile.addAction(self.actionExit)
self.menuSize_and_Orientation.addAction(self.actionResize)
self.menuSize_and_Orientation.addAction(self.actionMirror)
self.menuSize_and_Orientation.addAction(self.actionFlip_Upside_Down)
self.menuSize_and_Orientation.addAction(self.actionRotate)
self.menuSize_and_Orientation.addAction(self.actionAdd_Border)
self.menuAdd_Noise.addAction(self.actionGaussian)
self.menuAdd_Noise.addAction(self.actionHorizontal_Bands)
self.menuAdd_Noise.addAction(self.actionVertical_Bands)
self.menuAdd_Noise.addAction(self.actionSalt_and_Pepper)
self.menuBlur.addAction(self.actionGaussian_2)
self.menuBlur.addAction(self.actionMedian)
self.menuBlur.addAction(self.actionAverage)
self.menuSpecial.addAction(self.actionPixelate)
self.menuSpecial.addAction(self.actionCartoonify)
self.menuSpecial.addAction(self.actionDithering)
self.menuSpecial.addAction(self.actionContrast_Stretching)
self.menuSpecial.addAction(self.actionEdge_Detection)
self.menuSwap_color_channels.addAction(self.actionRed_Green)
self.menuSwap_color_channels.addAction(self.actionGreen_Blue)
self.menuSwap_color_channels.addAction(self.actionBlue_Red)
self.menuColor_2.addAction(self.actionBrightness_Increase)
self.menuColor_2.addAction(self.actionBrightness_Decrease)
self.menuColor_2.addAction(self.actionContrast_Increase)
self.menuColor_2.addAction(self.actionContrast_Decrease)
self.menuColor_2.addAction(self.actionBlack_and_White)
self.menuColor_2.addAction(self.actionExtract_Color)
self.menuColor_2.addAction(self.actionReplace_Color)
self.menuColor_2.addAction(self.menuSwap_color_channels.menuAction())
self.menuColor_2.addAction(self.actionImage_Histogram)
self.menuColor_2.addAction(self.actionIntensity_Map)
self.menuColor_2.addAction(self.actionHistogram_Equalization)
self.menuColor_2.addAction(self.actionGamma_Correction)
self.menuColor_2.addAction(self.actionNegative)
self.menuColor_2.addAction(self.actionApply_Mask)
self.menuBlending.addAction(self.actionAlpha)
self.menuBlending.addAction(self.actionPyramid)
self.menuEdit.addAction(self.actionUndo)
self.menuEdit.addAction(self.actionRedo)
self.menubar.addAction(self.menuFile.menuAction())
self.menubar.addAction(self.menuEdit.menuAction())
self.menubar.addAction(self.menuSize_and_Orientation.menuAction())
self.menubar.addAction(self.menuColor_2.menuAction())
self.menubar.addAction(self.menuBlending.menuAction())
self.menubar.addAction(self.menuBlur.menuAction())
self.menubar.addAction(self.menuAdd_Noise.menuAction())
self.menubar.addAction(self.menuSpecial.menuAction())
self.toolBar.addAction(self.actionOpen)
self.toolBar.addAction(self.actionSave)
self.toolBar.addAction(self.actionUndo)
self.toolBar.addAction(self.actionRedo)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionResize)
self.toolBar.addAction(self.actionMirror)
self.toolBar.addAction(self.actionFlip_Upside_Down)
self.toolBar.addAction(self.actionRotate)
self.toolBar.addAction(self.actionAdd_Border)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionBrightness_Increase)
self.toolBar.addAction(self.actionBrightness_Decrease)
self.toolBar.addAction(self.actionContrast_Increase)
self.toolBar.addAction(self.actionContrast_Decrease)
self.toolBar.addAction(self.actionBlack_and_White)
self.toolBar.addAction(self.actionExtract_Color)
self.toolBar.addAction(self.actionReplace_Color)
self.toolBar.addAction(self.actionRed_Green)
self.toolBar.addAction(self.actionGreen_Blue)
self.toolBar.addAction(self.actionBlue_Red)
self.toolBar.addAction(self.actionImage_Histogram)
self.toolBar.addAction(self.actionIntensity_Map)
self.toolBar.addAction(self.actionHistogram_Equalization)
self.toolBar.addAction(self.actionGamma_Correction)
self.toolBar.addAction(self.actionNegative)
self.toolBar.addAction(self.actionApply_Mask)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionAlpha)
self.toolBar.addAction(self.actionPyramid)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionAverage)
self.toolBar.addAction(self.actionGaussian_2)
self.toolBar.addAction(self.actionMedian)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionGaussian)
self.toolBar.addAction(self.actionHorizontal_Bands)
self.toolBar.addAction(self.actionVertical_Bands)
self.toolBar.addAction(self.actionSalt_and_Pepper)
self.toolBar.addSeparator()
self.toolBar.addAction(self.actionPixelate)
self.toolBar.addAction(self.actionCartoonify)
self.toolBar.addAction(self.actionDithering)
self.toolBar.addAction(self.actionContrast_Stretching)
self.toolBar.addAction(self.actionEdge_Detection)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Image Editor and Manager"))
self.menuFile.setTitle(_translate("MainWindow", "File"))
self.menuSize_and_Orientation.setTitle(_translate("MainWindow", "Shape"))
self.menuAdd_Noise.setTitle(_translate("MainWindow", "Add Noise"))
self.menuBlur.setTitle(_translate("MainWindow", "Blur"))
self.menuSpecial.setTitle(_translate("MainWindow", "Other"))
self.menuColor_2.setTitle(_translate("MainWindow", "Color"))
self.menuSwap_color_channels.setTitle(_translate("MainWindow", "Swap color channels"))
self.menuBlending.setTitle(_translate("MainWindow", "Blending"))
self.menuEdit.setTitle(_translate("MainWindow", "Edit"))
self.toolBar.setWindowTitle(_translate("MainWindow", "toolBar"))
self.actionNew.setText(_translate("MainWindow", "New"))
self.actionOpen.setText(_translate("MainWindow", "Open"))
self.actionSave.setText(_translate("MainWindow", "Save"))
self.actionExit.setText(_translate("MainWindow", "Exit"))
self.actionResize.setText(_translate("MainWindow", "Resize"))
self.actionMirror.setText(_translate("MainWindow", "Mirror"))
self.actionFlip_Upside_Down.setText(_translate("MainWindow", "Flip Upside Down"))
self.actionRotate.setText(_translate("MainWindow", "Rotate"))
self.actionGaussian.setText(_translate("MainWindow", "Gaussian"))
self.actionHorizontal_Bands.setText(_translate("MainWindow", "Horizontal Bands"))
self.actionVertical_Bands.setText(_translate("MainWindow", "Vertical Bands"))
self.actionSalt_and_Pepper.setText(_translate("MainWindow", "Salt and Pepper"))
self.actionGaussian_2.setText(_translate("MainWindow", "Gaussian"))
self.actionMedian.setText(_translate("MainWindow", "Median"))
self.actionAverage.setText(_translate("MainWindow", "Average"))
self.actionPixelate.setText(_translate("MainWindow", "Pixelate"))
self.actionAdd_Border.setText(_translate("MainWindow", "Add Border"))
self.actionBlack_and_White.setText(_translate("MainWindow", "Black and White"))
self.actionExtract_Color.setText(_translate("MainWindow", "Extract Color"))
self.actionBrightness_Increase.setText(_translate("MainWindow", "Brightness - Increase"))
self.actionBrightness_Decrease.setText(_translate("MainWindow", "Brightness - Decrease"))
self.actionContrast_Increase.setText(_translate("MainWindow", "Contrast - Increase"))
self.actionContrast_Decrease.setText(_translate("MainWindow", "Contrast Decrease"))
self.actionSwap_Color.setText(_translate("MainWindow", "Swap Color"))
self.actionReplace_Color.setText(_translate("MainWindow", "Replace Color"))
self.actionRed_Green.setText(_translate("MainWindow", "Red-Green"))
self.actionGreen_Blue.setText(_translate("MainWindow", "Green-Blue"))
self.actionBlue_Red.setText(_translate("MainWindow", "Blue-Red"))
self.actionImage_Histogram.setText(_translate("MainWindow", "Image Histogram"))
self.actionIntensity_Map.setText(_translate("MainWindow", "Intensity Map"))
self.actionHistogram_Equalization.setText(_translate("MainWindow", "Histogram Equalization"))
self.actionCartoonify.setText(_translate("MainWindow", "Cartoonify"))
self.actionAlpha.setText(_translate("MainWindow", "Alpha"))
self.actionPyramid.setText(_translate("MainWindow", "Pyramid"))
self.actionDithering.setText(_translate("MainWindow", "Dithering"))
self.actionContrast_Stretching.setText(_translate("MainWindow", "Contrast Stretching"))
self.actionGamma_Correction.setText(_translate("MainWindow", "Gamma Correction"))
self.actionNegative.setText(_translate("MainWindow", "Negative"))
self.actionEdge_Detection.setText(_translate("MainWindow", "Edge Detection"))
self.actionApply_Mask.setText(_translate("MainWindow", "Apply Mask"))
self.actionUndo.setText(_translate("MainWindow", "Undo"))
self.actionRedo.setText(_translate("MainWindow", "Redo"))
import icons_rc
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 59.404706 | 107 | 0.724086 |
df5d38a4492da5899ef2cf2b3f7b2d13159090e3 | 2,042 | py | Python | resotolib/test/test_args.py | someengineering/resoto | ee17313f5376e9797ed305e7fdb62d40139a6608 | [
"Apache-2.0"
] | 126 | 2022-01-13T18:22:03.000Z | 2022-03-31T11:03:14.000Z | resotolib/test/test_args.py | someengineering/resoto | ee17313f5376e9797ed305e7fdb62d40139a6608 | [
"Apache-2.0"
] | 110 | 2022-01-13T22:27:55.000Z | 2022-03-30T22:26:50.000Z | resotolib/test/test_args.py | someengineering/resoto | ee17313f5376e9797ed305e7fdb62d40139a6608 | [
"Apache-2.0"
] | 8 | 2022-01-15T10:28:16.000Z | 2022-03-30T16:38:21.000Z | import os
from typing import List
from resotolib.args import get_arg_parser, ArgumentParser, convert, NoneType
from resotolib.logger import add_args as logging_add_args
from resotolib.jwt import add_args as jwt_add_args
def test_args():
assert ArgumentParser.args.does_not_exist is None
os.environ["RESOTO_PSK"] = "changeme"
arg_parser = get_arg_parser()
logging_add_args(arg_parser)
jwt_add_args(arg_parser)
arg_parser.parse_args()
assert ArgumentParser.args.verbose is False
assert ArgumentParser.args.psk == "changeme"
os.environ["RESOTO_PSK"] = ""
os.environ["RESOTO_VERBOSE"] = "true"
os.environ["RESOTO_TEST_INT"] = "123"
os.environ["RESOTO_TEST_LIST0"] = "foobar"
arg_parser = get_arg_parser()
logging_add_args(arg_parser)
jwt_add_args(arg_parser)
arg_parser.add_argument(
"--test-int",
dest="test_int",
type=int,
default=0,
)
arg_parser.add_argument(
"--test-list",
dest="test_list",
type=str,
default=[],
nargs="+",
)
arg_parser.parse_args()
assert ArgumentParser.args.verbose is True
assert ArgumentParser.args.psk is None
assert ArgumentParser.args.test_int == 123
assert ArgumentParser.args.test_list[0] == "foobar"
def test_convert() -> None:
def make_a_list(s: str) -> List[str]:
return s.split(",")
# coercing works
assert convert(None, NoneType) is None
assert convert("3", int) == 3
assert convert("3.4", float) == 3.4
assert convert("true", bool) is True
assert convert("false", bool) is False
assert convert("123", complex) == complex(123)
# coercing is not possible
assert convert("no_int", int) == "no_int"
assert convert("no_float", float) == "no_float"
assert convert("no_complex", complex) == "no_complex"
# does not know how to handle
assert convert("args", ArgumentParser) == "args"
# call a function
assert convert("1,2,3,4", make_a_list) == ["1", "2", "3", "4"]
| 29.171429 | 76 | 0.661606 |
87fd51dcae2a11e33ad2280e4c37ef06f86ef5b6 | 405 | py | Python | input_data/migrations/0004_auto_20190615_0536.py | yudhapatria96/sidang | 67252d8ec11791444cfd2ed5330391775372afc6 | [
"bzip2-1.0.6"
] | null | null | null | input_data/migrations/0004_auto_20190615_0536.py | yudhapatria96/sidang | 67252d8ec11791444cfd2ed5330391775372afc6 | [
"bzip2-1.0.6"
] | 6 | 2019-12-05T00:12:52.000Z | 2022-02-10T09:47:41.000Z | input_data/migrations/0004_auto_20190615_0536.py | yudhapatria96/sidang | 67252d8ec11791444cfd2ed5330391775372afc6 | [
"bzip2-1.0.6"
] | null | null | null | # Generated by Django 2.2 on 2019-06-14 22:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('input_data', '0003_auto_20190612_1313'),
]
operations = [
migrations.AlterField(
model_name='penjualanmodel',
name='bulan_transaksi',
field=models.CharField(max_length=200),
),
]
| 21.315789 | 51 | 0.619753 |
355b147890285d7d30ed88e3d7ffdcfd7bac5c60 | 2,409 | py | Python | Data clean-up and code/DOAJwrite2.py | ryregier/APCPrices | fa39a878cea8a1d89551f29382e86a16f9ebcb4d | [
"MIT"
] | 2 | 2018-02-13T21:41:08.000Z | 2018-02-14T21:38:20.000Z | Data clean-up and code/DOAJwrite2.py | ryregier/APCPrices | fa39a878cea8a1d89551f29382e86a16f9ebcb4d | [
"MIT"
] | 2 | 2018-03-06T00:01:53.000Z | 2018-03-06T14:20:00.000Z | Data clean-up and code/DOAJwrite2.py | ryregier/APCPrices | fa39a878cea8a1d89551f29382e86a16f9ebcb4d | [
"MIT"
] | 1 | 2018-11-07T16:34:12.000Z | 2018-11-07T16:34:12.000Z | import json
import re
import csv
def grab(x): #returning values and returning "unknown" if value error
if x is False or x == " " or x == "" or x == "No Information":
return "unknown"
else:
try:
return x
except:
return "unknown"
def apc_existence(x,y):#Determining if there is an APC, returning 0 for APC cost if there isn't.
if x == "No":
return 0
else:
try:
int(y)
return int(y)
except:
return ""
def curr_pull(x,y): #Determining the currency. Returning None if no APC. Returning first three letters of currency if APC.
if x == "No":
return "None"
else:
try:
return y[0:3]
except:
return ""
def ISSN_list(x,y): #Collecting the ISSNs and adding them to a list
if len(x) > 10 or len(x) > 10: #If too long to be an ISSN
return ""
else:
try:
if x == y:
return [x]
elif x == "" or x == " ":
return [y]
elif y == "" or y == " ":
return [x]
else:
return [x,y]
except:
return ""
data = list()
count = 0 #Count to get an idea how many journals have been processed
#importing DOAJ CSV file
with open('DOAJ2018.csv', newline='',encoding="utf8") as myFile:
reader = csv.reader(myFile)
for row in reader:
count = count + 1
if count == 1: continue #Skipping the first line with headings
#if count > 20: break
if len(row) < 1:continue
journal_dict ={
"journal_title": grab(row[0]),
"oa_type": "Gold",
"issn": grab(ISSN_list(row[3],row[4])),
"apc": grab(row[9]),
"apc_price": grab(apc_existence(row[9],row[11])),
"currency": grab(curr_pull(row[9],row[12])),
"source_of_apc": "DOAJ",
"publisher": grab(row[5]),
"last_update": "10/02/2018"
}
data.append(journal_dict)
#print (journal_dict)
#break
#print (data)
with open('JSONtest4', 'w') as outfile: #writing file as a JSON file
json.dump(data, outfile)
print ("DOAJ analysis finished",count,"journals added to JSON file.")
| 29.378049 | 123 | 0.501453 |
f00b3c78ac21703e5cd79fa830feeb74373cdd3d | 15,571 | py | Python | watsononlinestore/tests/unit/test_watson_online_store.py | LiamFrailingWork/Gabe | 654ee86cdb7a68a8b132443dbda27ade74af99d8 | [
"Apache-2.0"
] | null | null | null | watsononlinestore/tests/unit/test_watson_online_store.py | LiamFrailingWork/Gabe | 654ee86cdb7a68a8b132443dbda27ade74af99d8 | [
"Apache-2.0"
] | null | null | null | watsononlinestore/tests/unit/test_watson_online_store.py | LiamFrailingWork/Gabe | 654ee86cdb7a68a8b132443dbda27ade74af99d8 | [
"Apache-2.0"
] | null | null | null | import unittest
import ddt
import mock
from watsononlinestore import watson_online_store
@ddt.ddt
class WOSTestCase(unittest.TestCase):
def setUp(self):
mock.Mock(watson_online_store.os.environ, return_value={})
self.slack_client = mock.Mock()
self.conv_client = mock.Mock()
self.fake_workspace_id = 'fake workspace id'
self.conv_client.list_workspaces.return_value = {
'workspaces': [{'workspace_id': self.fake_workspace_id,
'name': 'watson-online-store'}]}
self.cloudant_store = mock.Mock()
self.discovery_client = mock.Mock()
self.fake_data_source = 'IBM_STORE'
self.fake_environment_id = 'fake env id'
self.fake_collection_id = "fake collection id"
self.discovery_client.get_environment.return_value = {
'environment_id': self.fake_environment_id}
self.discovery_client.get_environments.return_value = {
'environments': [{'environment_id': self.fake_environment_id,
'name': 'ibm-logo-store'}]}
self.discovery_client.get_collection.return_value = {
'collection_id': self.fake_collection_id}
self.discovery_client.list_collections.return_value = {
'collections': [{'collection_id': self.fake_collection_id,
'name': 'ibm-logo-store'}]}
self.wos = watson_online_store.WatsonOnlineStore(
'UBOTID',
self.slack_client,
self.conv_client,
self.discovery_client,
self.cloudant_store)
def test_0(self):
fake_channel = "fake channel"
sender = watson_online_store.SlackSender(
self.slack_client, fake_channel)
fake_response = "this is a fake response"
self.conv_client.message.return_value = {
'context': {'send_no_input': 'no'},
'output': {'text': [fake_response]},
}
self.wos.handle_message("this is a test", sender)
self.conv_client.assert_has_calls([
mock.call.message(context={},
message_input={'text': 'this is a test'},
workspace_id=mock.ANY)
])
self.slack_client.api_call.assert_has_calls([
mock.call(
'chat.postMessage',
as_user=True,
channel=fake_channel,
text=fake_response + '\n')
])
@ddt.data(None, "", False)
def test_init_customer_no_user_id(self, no_user_id):
self.assertRaises(
AssertionError, self.wos.init_customer, no_user_id)
def test_init_customer_slack_fail(self):
self.slack_client.api_call = mock.Mock(side_effect=Exception("Boom"))
user = "testuser"
self.wos.init_customer(user)
self.slack_client.api_call.assert_called_once_with(
'users.info', user=user)
@ddt.data(None, "", False, {},
{'ok': False, 'error': 'yes'},
{'user': {'profile': {'no-email': 'e@mail'}}},
{'user': {'profile': {'email': None}}},
{'user': {'profile': {'email': ''}}}
)
def test_init_customer_slack_unusable(self, ret):
self.slack_client.api_call = mock.Mock(return_value=ret)
user = "testuser"
self.wos.init_customer(user)
self.slack_client.api_call.assert_called_once_with(
'users.info', user=user)
def test_init_customer_slack_user_old(self):
test_email_addr = 'e@mail'
self.slack_client.api_call = mock.Mock(return_value={
'user': {'profile': {'email': test_email_addr}}})
self.cloudant_store.find_customer = mock.Mock(return_value={
'email': 'test-email',
'first_name': 'test-first-name',
'last_name': 'test-last-name',
})
user = "testuser"
self.wos.init_customer(user)
self.slack_client.api_call.assert_called_once_with(
'users.info', user=user)
self.cloudant_store.find_customer.assert_called_once_with(
test_email_addr)
def test_init_customer_slack_new(self):
test_email_addr = 'e@mail'
self.slack_client.api_call = mock.Mock(
return_value={'user': {'profile': {'email': test_email_addr,
'first_name': 'first-name',
'last_name': 'last-name',
}}})
self.cloudant_store.find_customer = mock.Mock(return_value={})
user = "testuser"
self.wos.init_customer(user)
self.slack_client.api_call.assert_called_once_with(
'users.info', user=user)
self.cloudant_store.find_customer.assert_called_once_with(
test_email_addr)
def test_init_customer_slack_no_name(self):
test_email_addr = 'e@mail'
self.slack_client.api_call = mock.Mock(
return_value={'user': {'profile': {'email': test_email_addr,
'first_name': '',
'last_name': '',
}}})
self.cloudant_store.find_customer = mock.Mock(return_value={})
user = "testuser"
self.wos.init_customer(user)
self.slack_client.api_call.assert_called_once_with(
'users.info', user=user)
self.cloudant_store.find_customer.assert_called_once_with(
test_email_addr)
@ddt.data(
([{'text': '<@UBOTID> suFFix', 'channel': 'C', 'user': 'U'}],
('suffix', 'C', 'U')),
([{'text': 'prefix <@UBOTID> Suffix', 'channel': 'C', 'user': 'U'}],
('prefix suffix', 'C', 'U')),
([{'text': 'prefix <@UBOTID> Suffix<@UBOTID>Tail',
'channel': 'C', 'user': 'U'}],
('prefix suffixtail', 'C', 'U')),
([{'text': 'prefix <@UBOTID> suffix', 'channel': 'DXXX', 'user': 'U'}],
('prefix suffix', 'DXXX', 'U')),
([{'text': 'this is a dm', 'channel': 'DXXX', 'user': 'U'}],
('this is a dm', 'DXXX', 'U')))
@ddt.unpack
def test_parse_slack_output(self, output_list, expected):
actual = self.wos.parse_slack_output(output_list)
self.assertEqual(expected, actual)
@ddt.data([{}, # no text
{'text': '<@UBOTID> hi', 'user_profile': 'x'}, # has profile
{'text': 'hello world', 'channel': 'NOTDM'} # no at and not DM
])
def test_parse_slack_output_to_skip(self, output_list):
expected = (None, None, None)
actual = self.wos.parse_slack_output(output_list)
self.assertEqual(expected, actual)
def test_setup_conversation_workspace_by_name_default(self):
test_environ = {}
expected_workspace_id = 'this is the one'
self.conv_client.list_workspaces = mock.Mock(return_value={
'workspaces': [{'workspace_id': 'other', 'name': 'foo'},
{'workspace_id': expected_workspace_id,
'name': 'watson-online-store'}]})
wos = watson_online_store.WatsonOnlineStore
actual = wos.setup_conversation_workspace(self.conv_client,
test_environ)
self.conv_client.list_workspaces.assert_called_once()
self.assertEqual(expected_workspace_id, actual)
def test_setup_conversation_workspace_by_name_env(self):
test_environ = {'WORKSPACE_NAME': 'foo name'}
expected_workspace_id = 'this is the one'
self.conv_client.list_workspaces = mock.Mock(return_value={
'workspaces': [{'workspace_id': 'other', 'name': 'foo'},
{'workspace_id': expected_workspace_id,
'name': test_environ['WORKSPACE_NAME']}]})
wos = watson_online_store.WatsonOnlineStore
actual = wos.setup_conversation_workspace(self.conv_client,
test_environ)
self.conv_client.list_workspaces.assert_called_once()
self.assertEqual(expected_workspace_id, actual)
def test_setup_conversation_workspace_by_id(self):
expected_workspace_id = 'testing with a ws ID'
test_environ = {'WORKSPACE_ID': expected_workspace_id}
self.conv_client.list_workspaces = mock.Mock(return_value={
'workspaces': [{'workspace_id': 'other'},
{'workspace_id': expected_workspace_id,
'name': 'foo'}]})
wos = watson_online_store.WatsonOnlineStore
actual = wos.setup_conversation_workspace(
self.conv_client, test_environ)
self.conv_client.list_workspaces.assert_called_once()
self.assertEqual(expected_workspace_id, actual)
def test_setup_conversation_workspace_by_id_not_found(self):
expected_workspace_id = 'testing with a ws ID'
test_environ = {'WORKSPACE_ID': expected_workspace_id}
self.conv_client.list_workspaces = mock.Mock(return_value={
'workspaces': [{'workspace_id': 'other'},
{'workspace_id': 'wrong again'}]})
wos = watson_online_store.WatsonOnlineStore
self.assertRaises(Exception,
wos.setup_conversation_workspace,
self.conv_client,
test_environ)
self.conv_client.list_workspaces.assert_called_once()
def test_setup_conversation_workspace_create(self):
expected_workspace_id = 'this was created'
expected_workspace_name = 'and this was its name'
test_environ = {'WORKSPACE_NAME': expected_workspace_name}
self.conv_client.list_workspaces = mock.Mock(return_value={
'workspaces': [{'workspace_id': 'other', 'name': 'any'}]})
self.conv_client.create_workspace = mock.Mock(return_value={
'workspace_id': expected_workspace_id})
wos = watson_online_store.WatsonOnlineStore
ws_json = {
'counterexamples': 'c',
'intents': 'i',
'entities': 'e',
'dialog_nodes': 'd',
'metadata': 'm',
'language': 'en',
}
wos.get_workspace_json = mock.Mock(return_value=ws_json)
actual = wos.setup_conversation_workspace(
self.conv_client, test_environ)
self.conv_client.list_workspaces.assert_called_once()
self.conv_client.create_workspace.assert_called_once_with(
expected_workspace_name,
'Conversation workspace created by watson-online-store.',
ws_json['language'],
intents=ws_json['intents'],
entities=ws_json['entities'],
dialog_nodes=ws_json['dialog_nodes'],
counterexamples=ws_json['counterexamples'],
metadata=ws_json['metadata'])
self.assertEqual(expected_workspace_id, actual)
def test_setup_discovery_environment_by_id(self):
expected_environment_id = 'testing with a env ID'
expected_collection_id = 'testing with a coll ID'
test_environ = {'DISCOVERY_ENVIRONMENT_ID': expected_environment_id,
'DISCOVERY_COLLECTION_ID': expected_collection_id}
self.discovery_client.get_environment = mock.Mock(return_value={
'environment_id': expected_environment_id})
self.discovery_client.get_collection = mock.Mock(return_value={
'collection_id': expected_collection_id})
wos = watson_online_store.WatsonOnlineStore
actual_env, actual_coll = (
wos.setup_discovery_collection(self.discovery_client,
self.fake_data_source,
test_environ))
self.discovery_client.get_environment.assert_called_once()
self.discovery_client.get_collection.assert_called_once()
self.assertEqual(expected_environment_id, actual_env)
self.assertEqual(expected_collection_id, actual_coll)
def test_setup_discovery_environment_by_name_default(self):
test_environ = {}
expected_environment_id = 'this is the env'
expected_collection_id = 'this is the coll'
self.discovery_client.get_environments = mock.Mock(return_value={
'environments': [{'environment_id': 'other', 'name': 'foo'},
{'environment_id': expected_environment_id,
'name': 'watson-online-store'}]})
self.discovery_client.list_collections = mock.Mock(return_value={
'collections': [{'collection_id': 'other', 'name': 'foo'},
{'collection_id': expected_collection_id,
'name': 'ibm-logo-store'}]})
wos = watson_online_store.WatsonOnlineStore
actual_env, actual_coll = (
wos.setup_discovery_collection(self.discovery_client,
self.fake_data_source,
test_environ))
self.discovery_client.get_environments.assert_called_once()
self.discovery_client.list_collections.assert_called_once()
self.assertEqual(expected_environment_id, actual_env)
self.assertEqual(expected_collection_id, actual_coll)
def test_format_ibm_store_output(self):
ibm_product_name = "IBM Shirt"
ibm_product_id = "012345"
ibm_image_tag = '<a class="jqzoom" href="'
ibm_image_url = 'https://www.test.xxx/scale[50]'
ibm_product_tag = "/ProductDetail.aspx?pid="
ibm_product_url = ("http://www.logostore-globalid.us" +
ibm_product_tag)
ibm_expected_response = [{
'cart_number': "1",
'name': ibm_product_name,
'url': ibm_product_url + ibm_product_id,
'image': ibm_image_url
}, ]
wos = watson_online_store.WatsonOnlineStore
# Test IBM Store formatting.
# Note: use "XXX" to simulate that these tags are not at [0]
ibm_results = [{
'text': "XXXProduct:" + ibm_product_name + " Category:",
'html': ("XXX" + ibm_product_tag + ibm_product_id +
ibm_image_tag + ibm_image_url + '"')
}, ]
ibm_response = {'results': ibm_results}
output = wos.format_discovery_response(ibm_response, "IBM_STORE")
self.assertEqual(ibm_expected_response, output)
def test_format_amazon_store_output(self):
amz_product_name = "Amazon Shirt"
amz_product_tag = '<a href='
amz_product_url = 'http://www.test.xxx'
amz_expected_response = [{
'cart_number': "1",
'name': amz_product_name,
'url': amz_product_url,
'image': amz_product_url
}, ]
wos = watson_online_store.WatsonOnlineStore
# Test Amazon Store formatting.
# Note: use "XXX" to simulate that these tags are not at [0]
amz_results = [{
'extracted_metadata': {
'title': amz_product_name
},
'html': "XXX" + amz_product_tag + " " + amz_product_url + ' >'
}, ]
amz_response = {'results': amz_results}
output = wos.format_discovery_response(amz_response, "AMAZON")
self.assertEqual(amz_expected_response, output)
| 42.083784 | 79 | 0.594053 |
632df0ce18078e1855841acee6b294801e3ae86b | 1,839 | py | Python | SpiffWorkflow/util/__init__.py | LipotesOps/octo-fibula | 14c7a2b5e4929c9f441f81485949d91ef9ab66a0 | [
"MIT"
] | 1 | 2020-12-23T06:14:27.000Z | 2020-12-23T06:14:27.000Z | SpiffWorkflow/util/__init__.py | LipotesOps/octo-fibula | 14c7a2b5e4929c9f441f81485949d91ef9ab66a0 | [
"MIT"
] | 7 | 2020-06-06T01:58:04.000Z | 2022-02-10T14:02:38.000Z | SpiffWorkflow/util/__init__.py | LipotesOps/octo-fibula | 14c7a2b5e4929c9f441f81485949d91ef9ab66a0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import division
from builtins import range
def merge_dictionary(dst, src):
"""Recursive merge two dicts (vs .update which overwrites the hashes at the
root level)
Note: This updates dst.
Copied from checkmate.utils
"""
stack = [(dst, src)]
while stack:
current_dst, current_src = stack.pop()
for key in current_src:
source = current_src[key]
if key not in current_dst:
current_dst[key] = source
else:
dest = current_dst[key]
if isinstance(source, dict) and isinstance(dest, dict):
stack.append((dest, source))
elif isinstance(source, list) and isinstance(dest, list):
# Make them the same size
r = dest[:]
s = source[:]
if len(dest) > len(source):
s.append([None for i in range(len(dest) -
len(source))])
elif len(dest) < len(source):
r.append([None for i in range(len(source) -
len(dest))])
# Merge lists
for index, value in enumerate(r):
if (not value) and s[index]:
r[index] = s[index]
elif isinstance(value, dict) and \
isinstance(s[index], dict):
stack.append((dest[index], source[index]))
else:
dest[index] = s[index]
current_dst[key] = r
else:
current_dst[key] = source
return dst
| 39.12766 | 79 | 0.442088 |
549459c168866d1211b731118b01cbb4bc11e89b | 2,185 | py | Python | dml/fileutils.py | RGBCube/dml | f551821545a062e15aea1f2c2444e6016748ea34 | [
"MIT"
] | 2 | 2022-03-19T19:15:28.000Z | 2022-03-19T19:15:32.000Z | dml/fileutils.py | RGBCube/dml | f551821545a062e15aea1f2c2444e6016748ea34 | [
"MIT"
] | null | null | null | dml/fileutils.py | RGBCube/dml | f551821545a062e15aea1f2c2444e6016748ea34 | [
"MIT"
] | null | null | null | import typing as t
from .encoder import encode, decode
from .errors import DecodeError
from .symbols import symbols as s
__all__ = ("decode_file", "encode_file")
def encode_file(fp: str, *, out: str = None) -> t.Optional[t.Generator[str, None, None]]:
"""Encodes a file to DML.
Arguments:
fp (str): The filepath to the file to encode.
out (str): The filepath to write the encoded file to. If not specified, the encoded text will be returned as a generator.
Returns:
Optional[Generator[str, None, None]]: The encoded text as a generator.
"""
def read_file() -> t.Generator[str, None, None]:
with open(fp) as f_:
while char_ := f_.read(1):
yield char_
if out:
out = out + ".dml" if not out.endswith(".dml") else out
with open(out, "w") as f:
for char in encode(read_file()):
f.write(char)
else:
return encode(read_file())
def decode_file(fp: str, *, out: str = None) -> t.Optional[t.Generator[str, None, None]]:
"""Decodes a file that has DML encoded in it.
Arguments:
fp (str): The filepath to the file to decode.
out (str): The filepath to write the decoded file to. If not specified, the decoded text will be returned as a generator.
Returns:
Optional[Generator[str, None, None]]: The decoded text as a generator.
Raises:
DecodeError: If the file is not valid DML.
"""
def read_file() -> t.Generator[str, None, None]:
buffer = ""
with open(fp) as f_:
while char_ := f_.read(1):
if char_ not in s.values():
raise DecodeError(f"Invalid character: '{char_}', expected {s['1']}, {s['0']} or {s['stop']}")
elif char_ != s["stop"]:
buffer += char_
else:
yield buffer + s["stop"]
buffer = ""
if out:
out = out + ".dml" if not out.endswith(".dml") else out
with open(out, "w") as f:
for char in decode(read_file()):
f.write(char)
else:
return decode(read_file())
| 32.132353 | 130 | 0.561556 |
1323852e084a9966cf10c942f17bd6440185d58b | 4,391 | py | Python | discatpy/internal/events.py | EmreTech/DisCatPy | 3fd7c147b5ca00afab4d9b6bb0767258ca680694 | [
"MIT"
] | 2 | 2022-03-25T03:11:52.000Z | 2022-03-25T03:19:54.000Z | discatpy/internal/events.py | EmreTech/DisCatPy | 3fd7c147b5ca00afab4d9b6bb0767258ca680694 | [
"MIT"
] | null | null | null | discatpy/internal/events.py | EmreTech/DisCatPy | 3fd7c147b5ca00afab4d9b6bb0767258ca680694 | [
"MIT"
] | 1 | 2022-03-27T10:45:43.000Z | 2022-03-27T10:45:43.000Z | """
The MIT License (MIT)
Copyright (c) 2022-present EmreTech
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from typing import List
from .dispatcher import *
__all__ = ("EventsMixin",)
class EventsMixin:
"""
This mixin adds Discord Gateway events functionality like listening to an event
or all the avaliable event types out there.
This is for internal use only.
"""
dispatcher: Dispatcher
valid_events: List[str] = [
"on_ready",
"on_resumed",
"on_reconnect",
"on_channel_create",
"on_channel_update",
"on_channel_delete",
"on_channel_pins_update",
"on_thread_create",
"on_thread_update",
"on_thread_delete",
"on_thread_list_sync",
"on_thread_member_update",
"on_thread_members_update",
"on_guild_create",
"on_guild_update",
"on_guild_delete",
"on_guild_ban_add",
"on_guild_ban_remove",
"on_guild_emojis_update",
"on_guild_stickers_update",
"on_guild_intergrations_update",
"on_guild_member_add",
"on_guild_member_remove",
"on_guild_member_update",
"on_guild_members_chunk",
"on_guild_role_create",
"on_guild_role_update",
"on_guild_role_delete",
"on_guild_scheduled_event_create",
"on_guild_scheduled_event_update",
"on_guild_scheduled_event_delete",
"on_guild_scheduled_event_user_add",
"on_guild_scheduled_event_user_remove",
"on_integration_create",
"on_integration_update",
"on_integration_delete",
"on_interaction_create",
"on_invite_create",
"on_invite_delete",
"on_message_create",
"on_message_update",
"on_message_delete",
"on_message_delete_bulk",
"on_message_reaction_add",
"on_message_reaction_remove",
"on_message_reaction_remove_all",
"on_message_reaction_remove_emoji",
"on_presence_update",
"on_stage_instance_create",
"on_stage_instance_delete",
"on_stage_instance_update",
"on_typing_start",
"on_user_update",
"on_voice_state_update",
"on_voice_server_update",
"on_webhooks_update",
]
def listen(self, event: str):
"""
Registers a callback for an event. This will automatically check if the event name
provided is a valid event or the function name if they are the same value.
This function is a decorator.
Parameters
----------
event: :type:`str`
The event to listen for
"""
def wrapper(func):
name = func.__name__
if event != name:
name = event
if name not in self.valid_events:
raise ValueError("Event name provided is not a valid event!")
self.dispatcher.add_listener(func, name=name)
return func
return wrapper
def event(self, func):
"""
Similar to `EventsMixin.listen()` except that it checks if the function name is
a valid event.
This function is a decorator.
"""
if func.__name__ not in self.valid_events:
raise ValueError("Event name provided is not a valid event!")
self.dispatcher.add_listener(func)
return func
| 31.589928 | 90 | 0.661808 |
5801f5f3fde273e68be2e02326883061d8c3eb2a | 1,659 | py | Python | scripts/artifacts/crossArtifactTimeline.py | stark4n6/cLeapp | be7f7b2934e1a47b249121eca191be77ccea278b | [
"Apache-2.0"
] | 14 | 2021-05-10T14:35:53.000Z | 2022-02-10T03:05:18.000Z | scripts/artifacts/crossArtifactTimeline.py | stark4n6/cLeapp | be7f7b2934e1a47b249121eca191be77ccea278b | [
"Apache-2.0"
] | 3 | 2021-06-02T21:07:10.000Z | 2022-02-27T06:25:12.000Z | scripts/artifacts/crossArtifactTimeline.py | stark4n6/cLeapp | be7f7b2934e1a47b249121eca191be77ccea278b | [
"Apache-2.0"
] | 1 | 2021-05-10T15:20:46.000Z | 2021-05-10T15:20:46.000Z | import sqlite3
import textwrap
import os
from scripts.artifact_report import ArtifactHtmlReport
from scripts.cleapfuncs import logfunc, tsv, timeline, is_platform_windows, open_sqlite_db_readonly, usergen
def get_crossArtifactTimeline(files_found, report_folder, seeker, wrap_text):
report_folder = report_folder.rstrip('/')
report_folder = report_folder.rstrip('\\')
report_folder_base, tail = os.path.split(report_folder)
udb_report_folder = os.path.join(report_folder_base, '_Timeline')
udb_database_file = os.path.join(udb_report_folder, 'tl.db')
db = open_sqlite_db_readonly(udb_database_file)
cursor = db.cursor()
cursor.execute('''
select key, activity, datalist from data;
''')
all_rows = cursor.fetchall()
usageentries = len(all_rows)
if usageentries > 0:
report = ArtifactHtmlReport('Cross Artifact Timeline')
report.start_artifact_report(report_folder, 'Cross Artifact Timeline')
report.add_script()
data_headers = ('date_time', 'activity', 'data')
data_list_html = []
data_list = []
for row in all_rows:
data_list.append((row[0],row[1],row[2]))
report.write_artifact_data_table(data_headers, data_list, udb_database_file, html_escape=False)
report.end_artifact_report()
tsvname = f'Cross Artifact Timeline'
tsv(report_folder, data_headers, data_list, tsvname)
tlactivity = f'Cross Artifact Timeline'
timeline(report_folder, tlactivity, data_list, data_headers)
else:
logfunc('No Cross Artifact Timeline data available')
db.close()
| 36.065217 | 108 | 0.700422 |
e741c4c435eafff2a23589109b71f5d40ef950b6 | 30,451 | py | Python | cfdm/domain.py | NCAS-CMS/cfdm | 8e6ac54c1a2966ad5c07cd51ef609005a1fd70cc | [
"MIT"
] | 22 | 2018-11-07T18:16:22.000Z | 2022-03-16T16:05:21.000Z | cfdm/domain.py | davidhassell/cfdm | 8e6ac54c1a2966ad5c07cd51ef609005a1fd70cc | [
"MIT"
] | 119 | 2019-04-08T08:00:24.000Z | 2022-03-22T08:21:22.000Z | cfdm/domain.py | davidhassell/cfdm | 8e6ac54c1a2966ad5c07cd51ef609005a1fd70cc | [
"MIT"
] | 8 | 2019-04-09T10:12:26.000Z | 2021-07-22T02:41:15.000Z | import logging
from . import Constructs, core, mixin
from .decorators import (
_display_or_return,
_inplace_enabled,
_inplace_enabled_define_and_cleanup,
)
logger = logging.getLogger(__name__)
class Domain(
mixin.FieldDomain,
mixin.NetCDFVariable,
mixin.NetCDFGeometry,
mixin.NetCDFGlobalAttributes,
mixin.NetCDFGroupAttributes,
mixin.NetCDFComponents,
mixin.NetCDFUnreferenced,
mixin.Properties,
core.Domain,
):
"""A domain construct of the CF data model.
The domain represents a set of discrete "locations" in what
generally would be a multi-dimensional space, either in the real
world or in a model's simulated world. The data array elements of
a field construct correspond to individual location of a domain.
The domain construct is defined collectively by the following
constructs of the CF data model: domain axis, dimension
coordinate, auxiliary coordinate, cell measure, coordinate
reference, and domain ancillary constructs; as well as properties
to describe the domain.
**NetCDF interface**
{{netCDF variable}}
{{netCDF global attributes}}
{{netCDF variable group}}
{{netCDF group attributes}}
{{netCDF geometry group}}
Some components exist within multiple constructs, but when written
to a netCDF dataset the netCDF names associated with such
components will be arbitrarily taken from one of them. The netCDF
variable, dimension and sample dimension names and group
structures for such components may be set or removed consistently
across all such components with the `nc_del_component_variable`,
`nc_set_component_variable`, `nc_set_component_variable_groups`,
`nc_clear_component_variable_groups`,
`nc_del_component_dimension`, `nc_set_component_dimension`,
`nc_set_component_dimension_groups`,
`nc_clear_component_dimension_groups`,
`nc_del_component_sample_dimension`,
`nc_set_component_sample_dimension`,
`nc_set_component_sample_dimension_groups`,
`nc_clear_component_sample_dimension_groups` methods.
.. versionadded:: (cfdm) 1.7.0
"""
def __new__(cls, *args, **kwargs):
"""This must be overridden in subclasses.
.. versionadded:: (cfdm) 1.7.0
"""
instance = super().__new__(cls)
instance._Constructs = Constructs
return instance
def __init__(
self, properties=None, source=None, copy=True, _use_data=True
):
"""**Initialisation**
:Parameters:
{{init properties: `dict`, optional}}
*Parameter example:*
``properties={'long_name': 'Domain for model'}``
source: optional
Initialise the metadata constructs from those of
*source*.
{{init source}}
A new domain may also be instantiated with the
`fromconstructs` class method.
{{init copy: `bool`, optional}}
"""
super().__init__(
properties=properties,
source=source,
copy=copy,
_use_data=_use_data,
)
self._initialise_netcdf(source)
self._set_dataset_compliance(self.dataset_compliance(), copy=True)
def __repr__(self):
"""Called by the `repr` built-in function.
x.__repr__() <==> repr(x)
"""
shape = sorted(
[
domain_axis.get_size(None)
for domain_axis in self.domain_axes(todict=True).values()
]
)
shape = str(shape)
shape = shape[1:-1]
return f"<{self.__class__.__name__}: {self._one_line_description()}>"
def __str__(self):
"""Called by the `str` built-in function.
x.__str__() <==> str(x)
"""
def _print_item(self, cid, variable, axes):
"""Private function called by __str__."""
x = [variable.identity(default=f"key%{cid}")]
if variable.has_data():
shape = [axis_names[axis] for axis in axes]
shape = str(tuple(shape)).replace("'", "")
shape = shape.replace(",)", ")")
x.append(shape)
elif (
variable.construct_type
in ("auxiliary_coordinate", "domain_ancillary")
and variable.has_bounds()
and variable.bounds.has_data()
):
# Construct has no data but it does have bounds
shape = [axis_names[axis] for axis in axes]
shape.extend(
[str(n) for n in variable.bounds.data.shape[len(axes) :]]
)
shape = str(tuple(shape)).replace("'", "")
shape = shape.replace(",)", ")")
x.append(shape)
elif (
hasattr(variable, "nc_get_external")
and variable.nc_get_external()
):
ncvar = variable.nc_get_variable(None)
if ncvar is not None:
x.append(f" (external variable: ncvar%{ncvar})")
else:
x.append(" (external variable)")
if variable.has_data():
x.append(f" = {variable.data}")
elif (
variable.construct_type
in ("auxiliary_coordinate", "domain_ancillary")
and variable.has_bounds()
and variable.bounds.has_data()
):
# Construct has no data but it does have bounds data
x.append(f" = {variable.bounds.data}")
return "".join(x)
string = []
axis_names = self._unique_domain_axis_identities()
construct_data_axes = self.constructs.data_axes()
x = []
dimension_coordinates = self.dimension_coordinates(todict=True)
for axis_cid in sorted(self.domain_axes(todict=True)):
for cid, dim in dimension_coordinates.items():
if construct_data_axes[cid] == (axis_cid,):
name = dim.identity(default=f"key%{0}")
y = f"{name}({dim.get_data().size})"
if y != axis_names[axis_cid]:
y = f"{name}({axis_names[axis_cid]})"
if dim.has_data():
y += f" = {dim.get_data()}"
x.append(y)
if x:
x = "\n : ".join(x)
string.append(f"Dimension coords: {x}")
# Auxiliary coordinates
x = [
_print_item(self, cid, v, construct_data_axes[cid])
for cid, v in sorted(
self.auxiliary_coordinates(todict=True).items()
)
]
if x:
x = "\n : ".join(x)
string.append(f"Auxiliary coords: {x}")
# Cell measures
x = [
_print_item(self, cid, v, construct_data_axes[cid])
for cid, v in sorted(self.cell_measures(todict=True).items())
]
if x:
x = "\n : ".join(x)
string.append(f"Cell measures : {x}")
# Coordinate references
x = sorted(
[
str(ref)
for ref in list(
self.coordinate_references(todict=True).values()
)
]
)
if x:
x = "\n : ".join(x)
string.append(f"Coord references: {x}")
# Domain ancillary variables
x = [
_print_item(self, cid, anc, construct_data_axes[cid])
for cid, anc in sorted(
self.domain_ancillaries(todict=True).items()
)
]
if x:
x = "\n : ".join(x)
string.append(f"Domain ancils : {x}")
return "\n".join(string)
# ----------------------------------------------------------------
# Private methods
# ----------------------------------------------------------------
@_display_or_return
def _dump_axes(self, axis_names, display=True, _level=0):
"""Returns a string description of the field's domain axes.
:Parameters:
display: `bool`, optional
If False then return the description as a string. By
default the description is printed.
_level: `int`, optional
:Returns:
`str`
A string containing the description.
"""
indent1 = " " * _level
w = sorted(
[
f"{indent1}Domain Axis: {axis_names[axis]}"
for axis in self.domain_axes(todict=True)
]
)
return "\n".join(w)
def _one_line_description(self, axis_names_sizes=None):
"""Return a one-line description of the domain.
:Returns:
`str`
The description.
"""
if axis_names_sizes is None:
axis_names_sizes = self._unique_domain_axis_identities()
axis_names = ", ".join(sorted(axis_names_sizes.values()))
return f"{self.identity('')}{{{axis_names}}}"
@_inplace_enabled(default=False)
def apply_masking(self, inplace=False):
"""Apply masking as defined by the CF conventions.
Masking is applied to all metadata constructs with data.
Masking is applied according to any of the following criteria
that are applicable:
* where data elements are equal to the value of the
``missing_value`` property;
* where data elements are equal to the value of the
``_FillValue`` property;
* where data elements are strictly less than the value of the
``valid_min`` property;
* where data elements are strictly greater than the value of
the ``valid_max`` property;
* where data elements are within the inclusive range specified
by the two values of ``valid_range`` property.
If any of the above properties have not been set the no
masking is applied for that method.
Elements that are already masked remain so.
.. note:: If using the `apply_masking` method on a construct
that has been read from a dataset with the
``mask=False`` parameter to the `read` function,
then the mask defined in the dataset can only be
recreated if the ``missing_value``, ``_FillValue``,
``valid_min``, ``valid_max``, and ``valid_range``
properties have not been updated.
.. versionadded:: (cfdm) 1.8.9.0
.. seealso:: `{{package}}.Data.apply_masking`, `read`, `write`
:Parameters:
{{inplace: `bool`, optional}}
:Returns:
`Domain` or `None`
A new domain construct with masked values, or `None`
if the operation was in-place.
**Examples:**
>>> d = cfdm.example_field(0).domain
>>> x = d.construct('longitude')
>>> x.data[[0, -1]] = cfdm.masked
>>> print(x.data.array)
[-- 67.5 112.5 157.5 202.5 247.5 292.5 --]
>>> cfdm.write(d, 'masked.nc')
>>> no_mask = {{package}}.read('masked.nc', domain=True, mask=False)[0]
>>> no_mask_x = no_mask.construct('longitude')
>>> print(no_mask_x.data.array)
[9.96920997e+36 6.75000000e+01 1.12500000e+02 1.57500000e+02
2.02500000e+02 2.47500000e+02 2.92500000e+02 9.96920997e+36]
>>> masked = no_mask.apply_masking()
>>> masked_x = masked.construct('longitude')
>>> print(masked_x.data.array)
[-- 67.5 112.5 157.5 202.5 247.5
"""
d = _inplace_enabled_define_and_cleanup(self)
# Apply masking to the metadata constructs
d._apply_masking_constructs()
return d
def climatological_time_axes(self):
"""Return all axes which are climatological time axes.
This is ascertained by inspecting the values returned by each
coordinate construct's `is_climatology` method.
.. versionadded:: (cfdm) 1.8.9.0
:Returns:
`set`
The keys of the domain axis constructs that are
climatological time axes.
**Examples:**
>>> d = cfdm.example_field(0)
>>> d.climatological_time_axes()
set()
"""
data_axes = self.constructs.data_axes()
out = []
for ckey, c in self.coordinates(todict=True).items():
if not c.is_climatology():
continue
out.extend(data_axes.get(ckey, ()))
return set(out)
def creation_commands(
self,
representative_data=False,
namespace=None,
indent=0,
string=True,
name="domain",
data_name="data",
header=True,
_domain=True,
):
"""Return the commands that would create the domain construct.
**Construct keys**
The *key* parameter of the output `set_construct` commands is
utilised in order minimise the number of commands needed to
implement cross-referencing between constructs (e.g. between a
coordinate reference construct and coordinate
constructs). This is usually not necessary when building
domain constructs, as by default the `set_construct` method
returns a unique construct key for the construct being set.
.. versionadded:: (cfdm) 1.9.0.0
.. seealso:: `set_construct`,
`{{package}}.Data.creation_commands`,
`{{package}}.example_field`
:Parameters:
{{representative_data: `bool`, optional}}
{{namespace: `str`, optional}}
{{indent: `int`, optional}}
{{string: `bool`, optional}}
{{header: `bool`, optional}}
:Returns:
{{returns creation_commands}}
**Examples:**
>>> f = {{package}}.example_field(0)
>>> d = f.domain
>>> print(d.creation_commands())
#
# domain:
domain = {{package}}.Domain()
#
# domain_axis: ncdim%lat
c = {{package}}.DomainAxis()
c.set_size(5)
c.nc_set_dimension('lat')
domain.set_construct(c, key='domainaxis0', copy=False)
#
# domain_axis: ncdim%lon
c = {{package}}.DomainAxis()
c.set_size(8)
c.nc_set_dimension('lon')
domain.set_construct(c, key='domainaxis1', copy=False)
#
# domain_axis:
c = {{package}}.DomainAxis()
c.set_size(1)
domain.set_construct(c, key='domainaxis2', copy=False)
#
# dimension_coordinate: latitude
c = {{package}}.DimensionCoordinate()
c.set_properties({'units': 'degrees_north', 'standard_name': 'latitude'})
c.nc_set_variable('lat')
data = {{package}}.Data([-75.0, -45.0, 0.0, 45.0, 75.0], units='degrees_north', dtype='f8')
c.set_data(data)
b = {{package}}.Bounds()
b.nc_set_variable('lat_bnds')
data = {{package}}.Data([[-90.0, -60.0], [-60.0, -30.0], [-30.0, 30.0], [30.0, 60.0], [60.0, 90.0]], units='degrees_north', dtype='f8')
b.set_data(data)
c.set_bounds(b)
domain.set_construct(c, axes=('domainaxis0',), key='dimensioncoordinate0', copy=False)
#
# dimension_coordinate: longitude
c = {{package}}.DimensionCoordinate()
c.set_properties({'units': 'degrees_east', 'standard_name': 'longitude'})
c.nc_set_variable('lon')
data = {{package}}.Data([22.5, 67.5, 112.5, 157.5, 202.5, 247.5, 292.5, 337.5], units='degrees_east', dtype='f8')
c.set_data(data)
b = {{package}}.Bounds()
b.nc_set_variable('lon_bnds')
data = {{package}}.Data([[0.0, 45.0], [45.0, 90.0], [90.0, 135.0], [135.0, 180.0], [180.0, 225.0], [225.0, 270.0], [270.0, 315.0], [315.0, 360.0]], units='degrees_east', dtype='f8')
b.set_data(data)
c.set_bounds(b)
domain.set_construct(c, axes=('domainaxis1',), key='dimensioncoordinate1', copy=False)
#
# dimension_coordinate: time
c = {{package}}.DimensionCoordinate()
c.set_properties({'units': 'days since 2018-12-01', 'standard_name': 'time'})
c.nc_set_variable('time')
data = {{package}}.Data([31.0], units='days since 2018-12-01', dtype='f8')
c.set_data(data)
domain.set_construct(c, axes=('domainaxis2',), key='dimensioncoordinate2', copy=False)
>>> print(d.creation_commands(representative_data=True, namespace='',
... indent=4, header=False))
domain = Domain()
c = DomainAxis()
c.set_size(5)
c.nc_set_dimension('lat')
domain.set_construct(c, key='domainaxis0', copy=False)
c = DomainAxis()
c.set_size(8)
c.nc_set_dimension('lon')
domain.set_construct(c, key='domainaxis1', copy=False)
c = DomainAxis()
c.set_size(1)
domain.set_construct(c, key='domainaxis2', copy=False)
c = DimensionCoordinate()
c.set_properties({'units': 'degrees_north', 'standard_name': 'latitude'})
c.nc_set_variable('lat')
data = <{{repr}}Data(5): [-75.0, ..., 75.0] degrees_north> # Representative data
c.set_data(data)
b = Bounds()
b.nc_set_variable('lat_bnds')
data = <{{repr}}Data(5, 2): [[-90.0, ..., 90.0]] degrees_north> # Representative data
b.set_data(data)
c.set_bounds(b)
domain.set_construct(c, axes=('domainaxis0',), key='dimensioncoordinate0', copy=False)
c = DimensionCoordinate()
c.set_properties({'units': 'degrees_east', 'standard_name': 'longitude'})
c.nc_set_variable('lon')
data = <{{repr}}Data(8): [22.5, ..., 337.5] degrees_east> # Representative data
c.set_data(data)
b = Bounds()
b.nc_set_variable('lon_bnds')
data = <{{repr}}Data(8, 2): [[0.0, ..., 360.0]] degrees_east> # Representative data
b.set_data(data)
c.set_bounds(b)
domain.set_construct(c, axes=('domainaxis1',), key='dimensioncoordinate1', copy=False)
c = DimensionCoordinate()
c.set_properties({'units': 'days since 2018-12-01', 'standard_name': 'time'})
c.nc_set_variable('time')
data = <{{repr}}Data(1): [2019-01-01 00:00:00]> # Representative data
c.set_data(data)
domain.set_construct(c, axes=('domainaxis2',), key='dimensioncoordinate2', copy=False)
"""
if name in ("b", "c", "mask", "i"):
raise ValueError(
f"The 'name' parameter can not have the value {name!r}"
)
if name == data_name:
raise ValueError(
"The 'name' parameter can not have the same value as "
f"the 'data_name' parameter: {name!r}"
)
namespace0 = namespace
if namespace is None:
namespace = self._package() + "."
elif namespace and not namespace.endswith("."):
namespace += "."
if _domain:
out = super().creation_commands(
indent=indent,
namespace=namespace,
string=False,
name=name,
header=header,
)
nc_global_attributes = self.nc_global_attributes()
if nc_global_attributes:
if header:
out.append("#")
out.append("# netCDF global attributes")
out.append(
f"{name}.nc_set_global_attributes("
f"{nc_global_attributes!r})"
)
else:
out = []
# Domain axis constructs
for key, c in self.domain_axes(todict=True).items():
out.extend(
c.creation_commands(
indent=0,
string=False,
namespace=namespace0,
name="c",
header=header,
)
)
out.append(f"{name}.set_construct(c, key={key!r}, copy=False)")
# Metadata constructs with data
for key, c in self.constructs.filter_by_type(
"dimension_coordinate",
"auxiliary_coordinate",
"cell_measure",
"domain_ancillary",
).items():
out.extend(
c.creation_commands(
representative_data=representative_data,
string=False,
indent=0,
namespace=namespace0,
name="c",
data_name=data_name,
header=header,
)
)
out.append(
f"{name}.set_construct("
f"c, axes={self.get_data_axes(key)}, key={key!r}, copy=False)"
)
# Coordinate reference constructs
for key, c in self.coordinate_references(todict=True).items():
out.extend(
c.creation_commands(
namespace=namespace0,
indent=0,
string=False,
name="c",
header=header,
)
)
out.append(f"{name}.set_construct(c)")
if string:
indent = " " * indent
out[0] = indent + out[0]
out = ("\n" + indent).join(out)
return out
@_display_or_return
def dump(
self,
display=True,
_omit_properties=(),
_prefix="",
_title=None,
_create_title=True,
_level=0,
):
"""A full description of the domain construct.
Returns a description of all properties, including those of
metadata constructs and their components, and provides
selected values of all data arrays.
.. versionadded:: (cfdm) 1.7.0
:Parameters:
display: `bool`, optional
If False then return the description as a string. By
default the description is printed.
:Returns:
{{returns dump}}
"""
indent = " "
indent0 = indent * _level
if _create_title:
if _title is None:
ncvar = self.nc_get_variable(None)
_title = self.identity(default=None)
if ncvar is not None:
if _title is None:
_title = f"ncvar%{ncvar}"
else:
_title += f" (ncvar%{ncvar})"
if _title is None:
_title = ""
_title = f"{self.__class__.__name__}: {_title}"
line = f"{indent0}{''.ljust(len(_title), '-')}"
# Title
string = [line, indent0 + _title, line]
properties = super().dump(
display=False,
_create_title=False,
_omit_properties=_omit_properties,
_prefix=_prefix,
_title=_title,
_level=_level - 1,
)
string.append(properties)
string.append("")
else:
string = []
axis_to_name = self._unique_domain_axis_identities()
construct_name = self._unique_construct_names()
construct_data_axes = self.constructs.data_axes()
# Domain axes
axes = self._dump_axes(axis_to_name, display=False, _level=_level)
if axes:
string.append(axes)
# Dimension coordinates
dimension_coordinates = self.dimension_coordinates(todict=True)
for cid, value in sorted(dimension_coordinates.items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Dimension coordinate: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Auxiliary coordinates
auxiliary_coordinates = self.auxiliary_coordinates(todict=True)
for cid, value in sorted(auxiliary_coordinates.items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Auxiliary coordinate: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Domain ancillaries
for cid, value in sorted(self.domain_ancillaries(todict=True).items()):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Domain ancillary: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
# Coordinate references
for cid, value in sorted(
self.coordinate_references(todict=True).items()
):
string.append("")
string.append(
value.dump(
display=False,
_level=_level,
_title=f"Coordinate reference: {construct_name[cid]}",
_construct_names=construct_name,
_auxiliary_coordinates=tuple(auxiliary_coordinates),
_dimension_coordinates=tuple(dimension_coordinates),
)
)
# Cell measures
for cid, value in sorted(self.cell_measures(todict=True).items()):
string.append("")
string.append(
value.dump(
display=False,
_key=cid,
_level=_level,
_title=f"Cell measure: {construct_name[cid]}",
_axes=construct_data_axes[cid],
_axis_names=axis_to_name,
)
)
string.append("")
return "\n".join(string)
def get_filenames(self):
"""Return the file names containing the metadata construct data.
:Returns:
`set`
The file names in normalized, absolute form. If all of
the data are in memory then an empty `set` is
returned.
**Examples:**
>>> d = {{package}}.example_field(0).domain
>>> {{package}}.write(d, 'temp_file.nc')
>>> e = {{package}}.read('temp_file.nc', domain=True)[0]
>>> e.get_filenames()
{'temp_file.nc'}
"""
out = set()
for c in self.constructs.filter_by_data().values():
out.update(c.get_filenames())
return out
def identity(self, default=""):
"""Return the canonical identity.
By default the identity is the first found of the following:
* The ``cf_role`` property, preceded by ``'cf_role='``.
* The ``long_name`` property, preceded by ``'long_name='``.
* The netCDF variable name, preceded by ``'ncvar%'``.
* The value of the *default* parameter.
.. versionadded:: (cfdm) 1.9.0.0
.. seealso:: `identities`
:Parameters:
default: optional
If no identity can be found then return the value of
the default parameter.
:Returns:
The identity.
**Examples:**
>>> d = {{package}}.Domain()
>>> d.set_properties({'foo': 'bar',
... 'long_name': 'Domain for model'})
>>> d.nc_set_variable('dom1')
>>> d.identity()
'long_name=Domain for model'
>>> d.del_property('long_name')
'long_name=Domain for model'
>>> d.identity(default='no identity')
'ncvar%dom1'
>>> d.identity()
'ncvar%dom1'
>>> d.nc_del_variable()
'dom1'
>>> d.identity()
''
>>> d.identity(default='no identity')
'no identity'
"""
for prop in ("cf_role", "long_name"):
n = self.get_property(prop, None)
if n is not None:
return f"{prop}={n}"
n = self.nc_get_variable(None)
if n is not None:
return f"ncvar%{n}"
return default
def identities(self):
"""Return all possible identities.
The identities comprise:
* The ``cf_role`` property, preceded by ``'cf_role='``.
* The ``long_name`` property, preceded by ``'long_name='``.
* All other properties, preceded by the property name and a
equals e.g. ``'foo=bar'``.
* The netCDF variable name, preceded by ``'ncvar%'``.
.. versionadded:: (cfdm) 1.9.0.0
.. seealso:: `identity`
:Returns:
`list`
The identities.
**Examples:**
>>> d = {{package}}.Domain()
>>> d.set_properties({'foo': 'bar',
... 'long_name': 'Domain for model'})
>>> d.nc_set_variable('dom1')
>>> d.identities()
['long_name=Domain for model', 'foo=bar', 'ncvar%dom1']
"""
properties = self.properties()
cf_role = properties.pop("cf_role", None)
long_name = properties.pop("long_name", None)
out = []
if cf_role is not None:
out.append(f"cf_role={cf_role}")
if long_name is not None:
out.append(f"long_name={long_name}")
out += [
f"{prop}={value}" for prop, value in sorted(properties.items())
]
n = self.nc_get_variable(None)
if n is not None:
out.append(f"ncvar%{n}")
return out
| 32.394681 | 189 | 0.528817 |
d6cc85dd214813311cf30af0327051242c501063 | 6,276 | py | Python | slixmpp/xmlstream/tostring.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | slixmpp/xmlstream/tostring.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | slixmpp/xmlstream/tostring.py | marconfus/slixmpp | bcf186f42dc31d360e0a0af8a4b3aaf1e0b212aa | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
slixmpp.xmlstream.tostring
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module converts XML objects into Unicode strings and
intelligently includes namespaces only when necessary to
keep the output readable.
Part of Slixmpp: The Slick XMPP Library
:copyright: (c) 2011 Nathanael C. Fritz
:license: MIT, see LICENSE for more details
"""
XML_NS = 'http://www.w3.org/XML/1998/namespace'
def tostring(xml=None, xmlns='', stream=None, outbuffer='',
top_level=False, open_only=False, namespaces=None):
"""Serialize an XML object to a Unicode string.
If an outer xmlns is provided using ``xmlns``, then the current element's
namespace will not be included if it matches the outer namespace. An
exception is made for elements that have an attached stream, and appear
at the stream root.
:param XML xml: The XML object to serialize.
:param string xmlns: Optional namespace of an element wrapping the XML
object.
:param stream: The XML stream that generated the XML object.
:param string outbuffer: Optional buffer for storing serializations
during recursive calls.
:param bool top_level: Indicates that the element is the outermost
element.
:param set namespaces: Track which namespaces are in active use so
that new ones can be declared when needed.
:type xml: :py:class:`~xml.etree.ElementTree.Element`
:type stream: :class:`~slixmpp.xmlstream.xmlstream.XMLStream`
:rtype: Unicode string
"""
# Add previous results to the start of the output.
output = [outbuffer]
# Extract the element's tag name.
tag_split = xml.tag.split('}', 1)
tag_name = tag_split[-1]
# Extract the element's namespace if it is defined.
if '}' in xml.tag:
tag_xmlns = tag_split[0][1:]
else:
tag_xmlns = ''
default_ns = ''
stream_ns = ''
use_cdata = False
if stream:
default_ns = stream.default_ns
stream_ns = stream.stream_ns
use_cdata = stream.use_cdata
# Output the tag name and derived namespace of the element.
namespace = ''
if tag_xmlns:
if top_level and tag_xmlns not in [default_ns, xmlns, stream_ns] \
or not top_level and tag_xmlns != xmlns:
namespace = ' xmlns="%s"' % tag_xmlns
if stream and tag_xmlns in stream.namespace_map:
mapped_namespace = stream.namespace_map[tag_xmlns]
if mapped_namespace:
tag_name = "%s:%s" % (mapped_namespace, tag_name)
output.append("<%s" % tag_name)
output.append(namespace)
# Output escaped attribute values.
new_namespaces = set()
for attrib, value in xml.attrib.items():
value = escape(value, use_cdata)
if '}' not in attrib:
output.append(' %s="%s"' % (attrib, value))
else:
attrib_split = attrib.split('}')
attrib_ns = attrib_split[0][1:]
attrib = attrib_split[1]
if attrib_ns == XML_NS:
output.append(' xml:%s="%s"' % (attrib, value))
elif stream and attrib_ns in stream.namespace_map:
mapped_ns = stream.namespace_map[attrib_ns]
if mapped_ns:
if namespaces is None:
namespaces = set()
if attrib_ns not in namespaces:
namespaces.add(attrib_ns)
new_namespaces.add(attrib_ns)
output.append(' xmlns:%s="%s"' % (
mapped_ns, attrib_ns))
output.append(' %s:%s="%s"' % (
mapped_ns, attrib, value))
if open_only:
# Only output the opening tag, regardless of content.
output.append(">")
return ''.join(output)
if len(xml) or xml.text:
# If there are additional child elements to serialize.
output.append(">")
if xml.text:
output.append(escape(xml.text, use_cdata))
if len(xml):
for child in xml:
output.append(tostring(child, tag_xmlns, stream,
namespaces=namespaces))
output.append("</%s>" % tag_name)
elif xml.text:
# If we only have text content.
output.append(">%s</%s>" % (escape(xml.text, use_cdata), tag_name))
else:
# Empty element.
output.append(" />")
if xml.tail:
# If there is additional text after the element.
output.append(escape(xml.tail, use_cdata))
for ns in new_namespaces:
# Remove namespaces introduced in this context. This is necessary
# because the namespaces object continues to be shared with other
# contexts.
namespaces.remove(ns)
return ''.join(output)
def escape(text, use_cdata=False):
"""Convert special characters in XML to escape sequences.
:param string text: The XML text to convert.
:rtype: Unicode string
"""
escapes = {'&': '&',
'<': '<',
'>': '>',
"'": ''',
'"': '"'}
if not use_cdata:
return ''.join(escapes.get(c, c) for c in text)
else:
escape_needed = False
for c in text:
if c in escapes:
escape_needed = True
break
if escape_needed:
escaped = map(lambda x : "<![CDATA[%s]]>" % x, text.split("]]>"))
return "<![CDATA[]]]><![CDATA[]>]]>".join(escaped)
return text
def _get_highlight():
try:
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import Terminal256Formatter
LEXER = get_lexer_by_name('xml')
FORMATTER = Terminal256Formatter()
class Highlighter:
__slots__ = ['string']
def __init__(self, string):
self.string = string
def __str__(self):
return highlight(str(self.string), LEXER, FORMATTER).strip()
return Highlighter
except ImportError:
return lambda x: x
highlight = _get_highlight()
| 34.295082 | 77 | 0.581581 |
0de45ccb3ad468d68df81d0bf8003eadd4308f66 | 838 | py | Python | test/test_add_group.py | nkoshkina/Python_Training2-Task14 | 8619b2464aef8eab0861e4bf584f8d363da7a0f5 | [
"Apache-2.0"
] | null | null | null | test/test_add_group.py | nkoshkina/Python_Training2-Task14 | 8619b2464aef8eab0861e4bf584f8d363da7a0f5 | [
"Apache-2.0"
] | null | null | null | test/test_add_group.py | nkoshkina/Python_Training2-Task14 | 8619b2464aef8eab0861e4bf584f8d363da7a0f5 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from model.group import Group
def test_add_group(app):
old_groups = app.group.get_groups_list()
group0 = Group(name="testgroup", header="testheader", footer="testfooter")
app.group.create(group0)
assert app.group.count() == len(old_groups) + 1
new_groups = app.group.get_groups_list()
old_groups.append(group0)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_add_empty_group(app):
old_groups = app.group.get_groups_list()
group0 = Group(name="", header="", footer="")
app.group.create(group0)
assert app.group.count() == len(old_groups) + 1
new_groups = app.group.get_groups_list()
old_groups.append(group0)
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
| 29.928571 | 93 | 0.704057 |
85f0fcc9132d69c597f1a68b0a3b90156364cadb | 8,627 | py | Python | Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_protocol_type_conversions_header.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 6 | 2021-07-05T16:09:39.000Z | 2022-03-06T22:44:42.000Z | Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_protocol_type_conversions_header.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | 7 | 2022-03-15T13:25:39.000Z | 2022-03-15T13:25:44.000Z | Source/JavaScriptCore/inspector/scripts/codegen/generate_objc_protocol_type_conversions_header.py | jacadcaps/webkitty | 9aebd2081349f9a7b5d168673c6f676a1450a66d | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
#
# Copyright (c) 2014, 2016 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import string
from string import Template
try:
from .generator import Generator
from .models import EnumType, Frameworks
from .objc_generator import ObjCGenerator
from .objc_generator_templates import ObjCGeneratorTemplates as ObjCTemplates
except ValueError:
from generator import Generator
from models import EnumType, Frameworks
from objc_generator import ObjCGenerator
from objc_generator_templates import ObjCGeneratorTemplates as ObjCTemplates
log = logging.getLogger('global')
def add_newline(lines):
if not len(lines) or lines[-1] == '':
return
lines.append('')
class ObjCProtocolTypeConversionsHeaderGenerator(ObjCGenerator):
def __init__(self, *args, **kwargs):
ObjCGenerator.__init__(self, *args, **kwargs)
def output_filename(self):
return '%sTypeConversions.h' % self.protocol_name()
def domains_to_generate(self):
return list(filter(self.should_generate_types_for_domain, Generator.domains_to_generate(self)))
def generate_output(self):
headers = [
'"%s.h"' % self.protocol_name(),
Generator.string_for_file_include('%sArrayConversions.h' % ObjCGenerator.OBJC_STATIC_PREFIX, Frameworks.WebInspector, self.model().framework),
]
headers.sort()
header_args = {
'includes': '\n'.join(['#import ' + header for header in headers]),
}
domains = self.domains_to_generate()
sections = []
sections.append(self.generate_license())
sections.append(Template(ObjCTemplates.TypeConversionsHeaderPrelude).substitute(None, **header_args))
sections.append(Template(ObjCTemplates.TypeConversionsHeaderStandard).substitute(None))
sections.extend(list(map(self._generate_enum_conversion_functions, domains)))
sections.append(Template(ObjCTemplates.TypeConversionsHeaderPostlude).substitute(None, **header_args))
return '\n\n'.join(sections)
def _generate_enum_conversion_functions(self, domain):
lines = []
# Type enums and member enums.
for declaration in self.type_declarations_for_domain(domain):
declaration_lines = []
if isinstance(declaration.type, EnumType):
add_newline(declaration_lines)
declaration_lines.append(self._generate_anonymous_enum_conversion_for_declaration(domain, declaration))
else:
for member in declaration.type_members:
if (isinstance(member.type, EnumType) and member.type.is_anonymous):
add_newline(declaration_lines)
declaration_lines.append(self._generate_anonymous_enum_conversion_for_member(domain, declaration, member))
if len(declaration_lines):
lines.append(self.wrap_with_guard_for_condition(declaration.condition, '\n\n'.join(declaration_lines)))
# Anonymous command enums.
for command in self.commands_for_domain(domain):
command_lines = []
for parameter in command.call_parameters:
if (isinstance(parameter.type, EnumType) and parameter.type.is_anonymous):
add_newline(command_lines)
command_lines.append(self._generate_anonymous_enum_conversion_for_parameter(domain, command.command_name, parameter))
for parameter in command.return_parameters:
if (isinstance(parameter.type, EnumType) and parameter.type.is_anonymous):
add_newline(command_lines)
command_lines.append(self._generate_anonymous_enum_conversion_for_parameter(domain, command.command_name, parameter))
if len(command_lines):
lines.append(self.wrap_with_guard_for_condition(command.condition, '\n\n'.join(command_lines)))
# Anonymous event enums.
for event in self.events_for_domain(domain):
event_lines = []
for parameter in event.event_parameters:
if (isinstance(parameter.type, EnumType) and parameter.type.is_anonymous):
add_newline(event_lines)
event_lines.append(self._generate_anonymous_enum_conversion_for_parameter(domain, event.event_name, parameter))
if len(event_lines):
lines.append(self.wrap_with_guard_for_condition(event.condition, '\n\n'.join(event_lines)))
if not len(lines):
return ''
return self.wrap_with_guard_for_condition(domain.condition, '\n\n'.join(lines))
def _generate_anonymous_enum_conversion_for_declaration(self, domain, declaration):
objc_enum_name = self.objc_enum_name_for_anonymous_enum_declaration(declaration)
enum_values = declaration.type.enum_values()
lines = []
lines.append(self._generate_enum_objc_to_protocol_string(objc_enum_name, enum_values))
lines.append(self._generate_enum_from_protocol_string(objc_enum_name, enum_values))
return '\n\n'.join(lines)
def _generate_anonymous_enum_conversion_for_member(self, domain, declaration, member):
objc_enum_name = self.objc_enum_name_for_anonymous_enum_member(declaration, member)
enum_values = member.type.enum_values()
lines = []
lines.append(self._generate_enum_objc_to_protocol_string(objc_enum_name, enum_values))
lines.append(self._generate_enum_from_protocol_string(objc_enum_name, enum_values))
return '\n\n'.join(lines)
def _generate_anonymous_enum_conversion_for_parameter(self, domain, event_or_command_name, parameter):
objc_enum_name = self.objc_enum_name_for_anonymous_enum_parameter(domain, event_or_command_name, parameter)
enum_values = parameter.type.enum_values()
lines = []
lines.append(self._generate_enum_objc_to_protocol_string(objc_enum_name, enum_values))
lines.append(self._generate_enum_from_protocol_string(objc_enum_name, enum_values))
return '\n\n'.join(lines)
def _generate_enum_objc_to_protocol_string(self, objc_enum_name, enum_values):
lines = []
lines.append('inline String toProtocolString(%s value)' % objc_enum_name)
lines.append('{')
lines.append(' switch(value) {')
for enum_value in enum_values:
lines.append(' case %s%s:' % (objc_enum_name, Generator.stylized_name_for_enum_value(enum_value)))
lines.append(' return "%s"_s;' % enum_value)
lines.append(' }')
lines.append('}')
return '\n'.join(lines)
def _generate_enum_from_protocol_string(self, objc_enum_name, enum_values):
lines = []
lines.append('template<>')
lines.append('inline Optional<%s> fromProtocolString(const String& value)' % objc_enum_name)
lines.append('{')
for enum_value in enum_values:
lines.append(' if (value == "%s")' % enum_value)
lines.append(' return %s%s;' % (objc_enum_name, Generator.stylized_name_for_enum_value(enum_value)))
lines.append(' return WTF::nullopt;')
lines.append('}')
return '\n'.join(lines)
| 49.58046 | 154 | 0.704648 |
e505521c981f965c7eba04c77dfef7a46898d14f | 34,479 | py | Python | uf3/representation/bspline.py | henk789/uf3 | 60ad0de74417fa0b1406c261330dfec6b7d57cdb | [
"Apache-2.0"
] | 17 | 2021-10-01T14:52:16.000Z | 2022-02-11T13:10:03.000Z | uf3/representation/bspline.py | henk789/uf3 | 60ad0de74417fa0b1406c261330dfec6b7d57cdb | [
"Apache-2.0"
] | 3 | 2021-10-06T13:39:28.000Z | 2021-12-03T16:39:03.000Z | uf3/representation/bspline.py | henk789/uf3 | 60ad0de74417fa0b1406c261330dfec6b7d57cdb | [
"Apache-2.0"
] | 4 | 2022-01-27T08:49:00.000Z | 2022-03-22T11:55:35.000Z | """
This module provides the BSplineBasis class for defining BSpline basis sets
from knots and/or pair distance constraints.
"""
from typing import List, Dict, Tuple, Any, Collection
import os
import re
import warnings
import numpy as np
from scipy import interpolate
from uf3.data import composition
from uf3.representation import angles
from uf3.regression import regularize
from uf3.util import json_io
class BSplineBasis:
"""
Handler class for BSpline basis sets defined using knot sequences and/or
pair distance constraints. Functions include generating regularizer
matrices and masking basis functions with symmetry.
"""
def __init__(self,
chemical_system,
r_min_map=None,
r_max_map=None,
resolution_map=None,
knot_strategy='linear',
offset_1b=True,
trailing_trim=3,
mask_trim=True,
knots_map=None):
"""
Args:
chemical_system (uf3.data.composition.ChemicalSystem)
r_min_map (dict): map of minimum pair distance per interaction.
If unspecified, defaults to 1.0 for all interactions.
e.g. {(A-A): 2.0, (A-B): 3.0, (B-B): 4.0}
r_max_map (dict): map of maximum pair distance per interaction.
If unspecified, defaults to 6.0 angstroms for all
interactions, which probably encompasses 2nd-nearest neighbors,
resolution_map (dict): map of resolution (number of knot intervals)
per interaction. If unspecified, defaults to 20 for all two-
body interactions and 5 for three-body interactions.
knot_strategy (str): "linear" for uniform spacing
or "lammps" for knot spacing by r^2.
trailing_trim (int): number of basis functions at trailing edge
to suppress. Useful for ensuring smooth cutoffs.
knots_map (dict): pre-generated map of knots.
Overrides other settings.
"""
self.chemical_system = chemical_system
self.knot_strategy = knot_strategy
self.offset_1b = offset_1b
self.trailing_trim = trailing_trim
self.mask_trim = mask_trim
self.r_min_map = {}
self.r_max_map = {}
self.resolution_map = {}
self.knots_map = {}
self.knot_subintervals = {}
self.basis_functions = {}
self.symmetry = {}
self.flat_weights = {}
self.template_mask = {}
self.templates = {}
self.partition_sizes = []
self.frozen_c = []
self.col_idx = []
self.r_cut = 0.0
self.update_knots(r_max_map, r_min_map, resolution_map, knots_map)
self.knot_spacer = get_knot_spacer(self.knot_strategy)
self.update_basis_functions()
@staticmethod
def from_config(config):
"""Instantiate from configuration dictionary"""
if "chemical_system" not in config:
raise ValueError("No chemical system specified.")
chemical_system = config["chemical_system"]
basis_settings = dict()
if "knots_path" in config and config["load_knots"]:
knots_fname = config["knots_path"]
if os.path.isfile(knots_fname):
try:
knots_json = json_io.load_interaction_map(knots_fname)
knots_map = knots_json["knots"]
except (ValueError, KeyError, IOError):
knots_map = None
basis_settings["knots_map"] = knots_map
aliases = dict(r_min="r_min_map",
r_max="r_max_map",
resolution="resolution_map",
fit_offsets="offset_1b")
for key, alias in aliases.items():
if key in config:
basis_settings[alias] = config[key]
if alias in config: # higher priority in case of duplicate
basis_settings[alias] = config[alias]
keys = ["trailing_trim", "mask_trim", "knot_strategy"]
basis_settings.update({k: v for k, v in config.items() if k in keys})
bspline_config = BSplineBasis(chemical_system, **basis_settings)
if "knots_path" in config and config["dump_knots"]:
knots_map = bspline_config.knots_map
json_io.dump_interaction_map(dict(knots=knots_map),
filename=config["knots_path"],
write=True)
return bspline_config
@property
def degree(self):
return self.chemical_system.degree
@property
def element_list(self):
return self.chemical_system.element_list
@property
def interactions_map(self):
return self.chemical_system.interactions_map
@property
def interactions(self):
return self.chemical_system.interactions
@property
def n_feats(self) -> int:
return int(np.sum(self.get_feature_partition_sizes()))
def __repr__(self):
summary = ["BSplineBasis:",
f" Basis functions: {self.n_feats}",
self.chemical_system.__repr__()
]
return "\n".join(summary)
def __str__(self):
return self.__repr__()
def get_cutoff(self):
values = []
for interaction in self.r_max_map:
r_max = self.r_max_map[interaction]
if isinstance(r_max, (float, np.floating, int)):
values.append(r_max)
else:
values.append(r_max[0])
return max(values)
def update_knots(self,
r_max_map: Dict[Tuple, Any] = None,
r_min_map: Dict[Tuple, Any] = None,
resolution_map: Dict[Tuple, Any] = None,
knots_map: Dict[Tuple, Any] = None):
# lower and upper distance cutoffs
if r_min_map is not None:
r_min_map = composition.sort_interaction_map(r_min_map)
self.r_min_map.update(r_min_map)
if r_max_map is not None:
r_max_map = composition.sort_interaction_map(r_max_map)
self.r_max_map.update(r_max_map)
if resolution_map is not None:
resolution_map = composition.sort_interaction_map(resolution_map)
self.resolution_map.update(resolution_map)
# Update with pregenerated knots_map
if knots_map is not None:
self.update_knots_from_dict(knots_map)
# Update with provided and default values
for pair in self.interactions_map.get(2, []):
self.r_min_map[pair] = self.r_min_map.get(pair, 1.0)
self.r_max_map[pair] = self.r_max_map.get(pair, 6.0)
self.resolution_map[pair] = self.resolution_map.get(pair, 20)
for trio in self.interactions_map.get(3, []):
self.r_min_map[trio] = self.r_min_map.get(trio, [1.0, 1.0, 1.0])
self.r_max_map[trio] = self.r_max_map.get(trio, [4.0, 4.0, 8.0])
self.resolution_map[trio] = self.resolution_map.get(trio,
[5, 5, 10])
min_set = len(set(self.r_min_map[trio]))
max_set = len(set(self.r_max_map[trio]))
res_set = len(set(self.resolution_map[trio]))
if min_set == 1 and max_set == 1 and res_set == 1:
self.symmetry[trio] = 3
elif min_set <= 2 and max_set <= 2 and res_set <= 2:
self.symmetry[trio] = 2
else:
self.symmetry[trio] = 1
self.r_cut = self.get_cutoff()
def update_knots_from_dict(self, knots_map):
for pair in self.interactions_map.get(2, []):
if pair in knots_map:
knot_sequence = knots_map[pair]
self.knots_map[pair] = knot_sequence
self.r_min_map[pair] = knot_sequence[0]
self.r_max_map[pair] = knot_sequence[-1]
self.resolution_map[pair] = len(knot_sequence) - 7
for trio in self.interactions_map.get(3, []):
if trio in knots_map:
knot_sequence = knots_map[trio]
# specified one or more knot sequences
if isinstance(knot_sequence[0], (float, np.floating, int)):
# one knot sequence provided (three-fold symmetry)
self.symmetry[trio] = 3
l_sequence = knot_sequence
m_sequence = knot_sequence
n_sequence = knot_sequence
else: # zero or one mirror plane
if len(knot_sequence) == 2:
self.symmetry[trio] = 2
l_sequence, n_sequence = knot_sequence
m_sequence = l_sequence
else:
if len(knot_sequence) > 3:
warnings.warn(
"More than three knot sequences provided "
"for {} interaction.".format(trio),
RuntimeWarning)
self.symmetry[trio] = 1
l_sequence = knot_sequence[0]
m_sequence = knot_sequence[1]
n_sequence = knot_sequence[2]
self.knots_map[trio] = [l_sequence,
m_sequence,
n_sequence]
self.r_min_map[trio] = [l_sequence[0],
m_sequence[0],
n_sequence[0]]
self.r_max_map[trio] = [l_sequence[-1],
m_sequence[-1],
n_sequence[-1]]
self.resolution_map[trio] = [len(l_sequence) - 7,
len(m_sequence) - 7,
len(n_sequence) - 7]
def update_basis_functions(self):
# Generate subintervals and basis functions for two-body
for pair in self.interactions_map.get(2, []):
if pair not in self.knots_map: # compute knots if not provided
r_min = self.r_min_map[pair]
r_max = self.r_max_map[pair]
n_intervals = self.resolution_map[pair]
knot_sequence = self.knot_spacer(r_min, r_max, n_intervals)
knot_sequence[knot_sequence == 0] = 1e-6
self.knots_map[pair] = knot_sequence
subintervals = get_knot_subintervals(self.knots_map[pair])
self.knot_subintervals[pair] = subintervals
self.basis_functions[pair] = generate_basis_functions(subintervals)
# Generate subintervals and basis functions for two-body
# Maps must contain three entries each.
if self.degree > 2:
for trio in self.interactions_map.get(3, []):
if trio not in self.knots_map:
r_min = self.r_min_map[trio]
r_max = self.r_max_map[trio]
r_resolution = self.resolution_map[trio]
knot_sequences = []
for i in range(3): # ij, ik, jk dimensions.
knot_sequence = self.knot_spacer(r_min[i],
r_max[i],
r_resolution[i])
knot_sequence[knot_sequence == 0] = 1e-6
knot_sequences.append(knot_sequence)
self.knots_map[trio] = knot_sequences
subintervals = []
basis_functions = []
for knot_sequence in self.knots_map[trio]:
subinterval = get_knot_subintervals(knot_sequence)
basis_set = generate_basis_functions(subinterval)
subintervals.append(subinterval)
basis_functions.append(basis_set)
self.knot_subintervals[trio] = subintervals
self.basis_functions[trio] = basis_functions
self.set_flatten_template_3B()
self.partition_sizes = self.get_feature_partition_sizes()
self.col_idx, self.frozen_c = self.generate_frozen_indices(
offset_1b=self.offset_1b,
n_trim=self.trailing_trim)
def get_regularization_matrix(self,
ridge_map={},
curvature_map={},
**kwargs):
"""
Args:
ridge_map (dict): n-body term ridge regularizer strengths.
default: {1: 1e-4, 2: 1e-6, 3: 1e-5}
curvature_map (dict): n-body term curvature regularizer strengths.
default: {1: 0.0, 2: 1e-5, 3: 1e-5}
TODO: refactor to break up into smaller, reusable functions
Returns:
combined_matrix (np.ndarray): regularization matrix made up of
individual matrices per n-body interaction.
"""
for k in kwargs:
if k.lower()[0] == 'r':
ridge_map[int(re.sub('[^0-9]', '', k))] = float(kwargs[k])
elif k.lower()[0] == 'c':
curvature_map[int(re.sub('[^0-9]', '', k))] = float(kwargs[k])
ridge_map = {1: 1e-8, 2: 0.0, 3: 0.0, **ridge_map}
curvature_map = {1: 0.0, 2: 1e-8, 3: 1e-8, **curvature_map}
# one-body element terms
n_elements = len(self.chemical_system.element_list)
matrix = regularize.get_regularizer_matrix(n_elements,
ridge=ridge_map[1],
curvature=0.0)
matrices = [matrix]
# two- and three-body terms
for degree in range(2, self.chemical_system.degree + 1):
r = ridge_map[degree]
c = curvature_map[degree]
interactions = self.chemical_system.interactions_map[degree]
for interaction in interactions:
size = self.resolution_map[interaction]
if degree == 2:
matrix = regularize.get_regularizer_matrix(size + 3,
ridge=r,
curvature=c)
elif degree == 3:
matrix = regularize.get_penalty_matrix_3D(size[0] + 3,
size[1] + 3,
size[2] + 3,
ridge=r,
curvature=c)
mask = np.where(self.flat_weights[interaction] > 0)[0]
matrix = matrix[mask[None, :], mask[:, None]]
else:
raise ValueError(
"Four-body terms and beyond are not yet implemented.")
matrices.append(matrix)
combined_matrix = regularize.combine_regularizer_matrices(matrices)
return combined_matrix
def get_feature_partition_sizes(self) -> List:
"""Get partition sizes: one-body, two-body, and three-body terms."""
partition_sizes = [1] * len(self.chemical_system.element_list)
for degree in range(2, self.chemical_system.degree + 1):
interactions = self.chemical_system.interactions_map[degree]
for interaction in interactions:
if degree == 2:
size = self.resolution_map[interaction] + 3
partition_sizes.append(size)
elif degree == 3:
mask = np.where(self.flat_weights[interaction] > 0)[0]
size = len(mask)
partition_sizes.append(size)
else:
raise ValueError(
"Four-body terms and beyond are not yet implemented.")
self.partition_sizes = partition_sizes
return partition_sizes
def get_interaction_partitions(self):
interactions_list = self.interactions
partition_sizes = self.get_feature_partition_sizes()
offsets = np.cumsum(partition_sizes)
offsets = np.insert(offsets, 0, 0)
component_sizes = {}
component_offsets = {}
for j in range(len(interactions_list)):
interaction = interactions_list[j]
component_sizes[interaction] = partition_sizes[j]
component_offsets[interaction] = offsets[j]
return component_sizes, component_offsets
def generate_frozen_indices(self,
offset_1b: bool = True,
n_trim: int = 3,
value: float = 0.0):
pairs = self.interactions_map.get(2, [])
trios = self.interactions_map.get(3, [])
component_sizes, component_offsets = self.get_interaction_partitions()
col_idx = []
frozen_c = []
for pair in pairs:
offset = component_offsets[pair]
size = component_sizes[pair]
for trim_idx in range(1, n_trim + 1):
idx = offset + size - trim_idx
col_idx.append(idx)
frozen_c.append(value)
for trio in trios:
template = np.zeros_like(self.templates[trio])
for trim_idx in range(1, n_trim + 1):
template[-trim_idx, :, :] = 1
template[:, -trim_idx, :] = 1
template[:, :, -trim_idx] = 1
template = self.compress_3B(template, trio)
mask = np.where(template > 0)[0]
for idx in mask:
col_idx.append(idx)
frozen_c.append(value)
if not offset_1b:
for j in range(len(self.element_list)):
col_idx.insert(0, j)
frozen_c.insert(0, 0)
col_idx = np.array(col_idx, dtype=int)
frozen_c = np.array(frozen_c)
return col_idx, frozen_c
def set_flatten_template_3B(self):
"""
Compute masks for flattening and unflattening 3B grid. The 3B BSpline
set has three planes of symmetry corresponding to permutation
of i, j, and k indices. Training is therefore performed with
only the subset of basis functions corresponding to i < j < k.
Basis functions on planes of symmetry have reduced weight.
Returns:
flat_weights (np.ndarray): vector of subset indices to use.
unflatten_mask (np.ndarray): L x L x L boolean array for
regenerating full basis function set.
"""
if self.mask_trim:
trailing_trim = self.trailing_trim
else:
trailing_trim = 0
for trio in self.interactions_map[3]:
l_space, m_space, n_space = self.knots_map[trio]
template = angles.get_symmetry_weights(self.symmetry[trio],
l_space,
m_space,
n_space,
trailing_trim,)
template_flat = template.flatten()
template_mask, = np.where(template_flat > 0)
self.template_mask[trio] = template_mask
self.flat_weights[trio] = template_flat[template_mask]
self.templates[trio] = template
def compress_3B(self, grid, interaction):
if self.symmetry[interaction] == 1:
vec = grid.flatten()
elif self.symmetry[interaction] == 2:
vec = grid + grid.transpose(1, 0, 2)
vec = vec.flat[self.template_mask[interaction]]
vec = vec * self.flat_weights[interaction]
elif self.symmetry[interaction] == 3:
vec = (grid
+ grid.transpose(0, 2, 1)
+ grid.transpose(1, 0, 2)
+ grid.transpose(1, 2, 0)
+ grid.transpose(2, 0, 1)
+ grid.transpose(2, 1, 0))
vec = vec.flat[self.template_mask[interaction]]
vec = vec * self.flat_weights[interaction]
return vec
def decompress_3B(self, vec, interaction):
l_space, m_space, n_space = self.knots_map[interaction]
L = len(l_space) - 4
M = len(m_space) - 4
N = len(n_space) - 4
grid = np.zeros((L, M, N))
grid.flat[self.template_mask[interaction]] = vec
return grid
def get_knot_spacer(knot_strategy):
# select knot spacing option
if knot_strategy == 'lammps':
spacing_function = generate_lammps_knots
elif knot_strategy == 'linear':
spacing_function = generate_uniform_knots
elif knot_strategy == 'geometric':
spacing_function = generate_geometric_knots
elif knot_strategy == 'inverse':
spacing_function = generate_inv_knots
elif knot_strategy == 'custom':
pass
else:
raise ValueError('Invalid value of knot_strategy:', knot_strategy)
return spacing_function
def generate_basis_functions(knot_subintervals):
"""
Args:
knot_subintervals (list): list of knot subintervals,
e.g. from ufpotential.representation.knots.get_knot_subintervals
Returns:
basis_functions (list): list of scipy B-spline basis functions.
"""
n_splines = len(knot_subintervals)
basis_functions = []
for idx in range(n_splines):
# loop over number of basis functions
b_knots = knot_subintervals[idx]
bs = interpolate.BSpline.basis_element(b_knots, extrapolate=False)
basis_functions.append(bs)
return basis_functions
def evaluate_basis_functions(points,
basis_functions,
nu=0,
trailing_trim=0,
flatten=True,
):
"""
Evaluate basis functions.
Args:
points (np.ndarray): vector of points to sample, e.g. pair distances
basis_functions (list): list of callable basis functions.
nu (int): compute n-th derivative of basis function. Default 0.
trailing_trim (int): number of basis functions at trailing edge
to suppress. Useful for ensuring smooth cutoffs.
flatten (bool): whether to flatten values per spline.
Returns:
if flatten:
value_per_spline (np.ndarray): vector of cubic B-spline value,
summed across queried points, for each knot subinterval.
Used as a rotation-invariant representation generated
using a BSpline basis.
else:
values_per_spline (list): list of vector of cubic B-spline
evaluations for each knot subinterval.
"""
n_splines = len(basis_functions)
values_per_spline = [0] * n_splines
for idx in range(n_splines - trailing_trim):
# loop over number of basis functions
bspline_values = basis_functions[idx](points, nu=nu)
bspline_values[np.isnan(bspline_values)] = 0
values_per_spline[idx] = bspline_values
if not flatten:
return values_per_spline
value_per_spline = np.array([np.sum(values)
for values in values_per_spline])
return value_per_spline
def featurize_force_2B(basis_functions,
distances,
drij_dR,
knot_sequence,
trailing_trim=0,
):
"""
Args:
basis_functions (list): list of callable basis functions.
distances (np.ndarray): vector of distances of the same length as
the last dimension of drij_dR.
drij_dR (np.ndarray): distance-derivatives, e.g. from
ufpotential.data.two_body.derivatives_by_interaction.
Shape is (n_atoms, 3, n_distances).
knot_sequence (np.ndarray): list of knot positions.
trailing_trim (int): number of basis functions at trailing edge
to suppress. Useful for ensuring smooth cutoffs.
Returns:
x (np.ndarray): rotation-invariant representations generated
using BSpline basis corresponding to force information.
Array shape is (n_atoms, 3, n_basis_functions), where the
second dimension corresponds to the three cartesian directions.
"""
n_splines = len(basis_functions)
n_atoms, _, n_distances = drij_dR.shape
x = np.zeros((n_atoms, 3, n_splines))
for bspline_idx in np.arange(n_splines - trailing_trim):
# loop over number of basis functions
basis_function = basis_functions[bspline_idx]
b_knots = knot_sequence[bspline_idx: bspline_idx+5]
mask = np.logical_and(distances > b_knots[0],
distances < b_knots[-1])
# first derivative
bspline_values = basis_function(distances[mask], nu=1)
# mask position deltas by distances
deltas = drij_dR[:, :, mask]
# broadcast multiplication over atomic and cartesian axis dimensions
x_splines = np.multiply(bspline_values, deltas)
x_splines = np.sum(x_splines, axis=-1)
x[:, :, bspline_idx] = x_splines
x = -x
return x
def fit_spline_1d(x, y, knot_sequence):
"""
Utility function for fitting spline coefficients to a sampled 1D function.
Useful for comparing fit coefficients against true pair potentials.
Args:
x (np.ndarray): vector of function inputs.
y (np.ndarray): vector of corresponding function outputs.
knot_sequence (np.ndarray): list of knot positions.
Returns:
coefficients (np.ndarray): vector of cubic B-spline coefficients.
"""
# scipy requirement: data must not lie outside of knot range
b_min = knot_sequence[0]
b_max = knot_sequence[-1]
y = y[(x > b_min) & (x < b_max)]
x = x[(x > b_min) & (x < b_max)]
# scipy requirement: knot intervals must include at least 1 point each
lowest_idx = np.argmin(x)
highest_idx = np.argmax(x)
x_min = x[lowest_idx]
y_min = y[lowest_idx]
x_max = x[highest_idx]
y_max = y[highest_idx]
unique_knots = np.unique(knot_sequence)
n_knots = len(unique_knots)
for i in range(n_knots-1):
# loop over knots to ensure each interval has at least one point
midpoint = 0.5 * (unique_knots[i] + unique_knots[i+1])
if x_min > unique_knots[i]: # pad with zeros below lower-bound
x = np.insert(x, 0, midpoint)
y = np.insert(y, 0, y_min)
elif x_max < unique_knots[i]: # pad with zeros above upper-bound
x = np.insert(x, -1, midpoint)
y = np.insert(y, -1, y_max)
# scipy requirement: samples must be in increasing order
x_sort = np.argsort(x)
x = x[x_sort]
y = y[x_sort]
if knot_sequence[0] == knot_sequence[3]:
knot_sequence = knot_sequence[4:-4]
else:
knot_sequence = knot_sequence[1:-1]
lsq = interpolate.LSQUnivariateSpline(x,
y,
knot_sequence,
bbox=(b_min, b_max))
coefficients = lsq.get_coeffs()
return coefficients
def find_spline_indices(points: np.ndarray,
knot_sequence: np.ndarray
) -> Tuple[np.ndarray, np.ndarray]:
"""
Identify basis functions indices that are non-zero at each point.
Args:
points (np.ndarray): list of points.
knot_sequence (np.ndarray): list of knot positions.
Returns:
points (np.ndarray): array of points repeated four times
idx (np.ndarray): corresponding basis function index for each
point (four each).
"""
# identify basis function "center" per point
idx = np.searchsorted(np.unique(knot_sequence), points, side='left') - 1
# tile to identify four non-zero basis functions per point
offsets = np.tile([0, 1, 2, 3], len(points))
idx = np.repeat(idx, 4) + offsets
points = np.repeat(points, 4)
return points, idx
def knot_sequence_from_points(knot_points: Collection) -> np.ndarray:
"""
Repeat endpoints to satisfy knot sequence requirements (i.e. fixing first
and second derivatives to zero).
Args:
knot_points (list or np.ndarray): sorted knot points in
increasing order.
Returns:
knots (np.ndarray): knot sequence with repeated ends.
"""
knots = np.concatenate([np.repeat(knot_points[0], 3),
knot_points,
np.repeat(knot_points[-1], 3)])
return knots
def get_knot_subintervals(knots: np.ndarray) -> List:
"""
Generate 5-knot subintervals for individual basis functions
from specified knot sequence.
Args:
knots (np.ndarray): knot sequence with repeated ends.
Returns:
subintervals (list): list of 5-knot subintervals.
"""
subintervals = [knots[i:i+5]
for i in range(len(knots)-4)]
return subintervals
def generate_uniform_knots(r_min: float,
r_max: float,
n_intervals: int,
sequence: bool = True
) -> np.ndarray:
"""
Generate evenly-spaced knot points or knot sequence.
Args:
r_min (float): lower-bound for knot points.
r_max (float): upper-bound for knot points.
n_intervals (int): number of unique intervals in the knot sequence,
i.e. n_intervals + 1 samples will be taken between r_min and r_max.
sequence (bool): whether to repeat ends to yield knot sequence.
Returns:
knots (np.ndarray): knot points or knot sequence.
"""
knots = np.linspace(r_min, r_max, n_intervals + 1)
if sequence:
knots = knot_sequence_from_points(knots)
return knots
def generate_inv_knots(r_min: float,
r_max: float,
n_intervals: int,
sequence: bool = True
) -> np.ndarray:
"""
Generate knot points or knot sequence using an inverse transformation.
This scheme yields higher resolution at smaller distances.
Args:
r_min (float): lower-bound for knot points.
r_max (float): upper-bound for knot points.
n_intervals (int): number of unique intervals in the knot sequence,
i.e. n_intervals + 1 samples will be taken between r_min and r_max.
sequence (bool): whether to repeat ends to yield knot sequence.
Returns:
knots (np.ndarray): knot points or knot sequence.
"""
knots = np.linspace(1/r_min, 1/r_max, n_intervals + 1)**-1
if sequence:
knots = knot_sequence_from_points(knots)
return knots
def generate_geometric_knots(r_min: float,
r_max: float,
n_intervals: int,
sequence: bool = True
) -> np.ndarray:
"""
Generate knot points or knot sequence using a geometric progression.
Points are evenly spaced on a log scale. This scheme yields higher
resolution at smaller distances.
Args:
r_min (float): lower-bound for knot points.
r_max (float): upper-bound for knot points.
n_intervals (int): number of unique intervals in the knot sequence,
i.e. n_intervals + 1 samples will be taken between r_min and r_max.
sequence (bool): whether to repeat ends to yield knot sequence.
Returns:
knots (np.ndarray): knot points or knot sequence.
"""
knots = np.geomspace(r_min, r_max, n_intervals + 1)
if sequence:
knots = knot_sequence_from_points(knots)
return knots
def generate_lammps_knots(r_min: float,
r_max: float,
n_intervals: int,
sequence: bool = True
) -> np.ndarray:
"""
Generate knot points or knot sequence using LAMMPS convention of
distance^2. This scheme yields somewhat higher resolution at larger
distances and somewhat lower resolution at smaller distances.
Since speed is mostly unaffected by the number of basis functions, due
to the local support, a high value of n_intervals ensures resolution
while ensuring expected behavior in LAMMPS.
Args:
r_min (float): lower-bound for knot points.
r_max (float): upper-bound for knot points.
n_intervals (int): number of unique intervals in the knot sequence,
i.e. n_intervals + 1 samples will be taken between r_min and r_max.
sequence (bool): whether to repeat ends to yield knot sequence.
Returns:
knots (np.ndarray): knot points or knot sequence.
"""
knots = np.linspace(r_min ** 2, r_max ** 2, n_intervals + 1) ** 0.5
if sequence:
knots = knot_sequence_from_points(knots)
return knots
def parse_knots_file(filename: str,
chemical_system: composition.ChemicalSystem) -> Dict:
"""
Parse a nested dictionary of knot sequences from JSON file.
Args:
filename (str): path to file.
chemical_system (composition.ChemicalSystem): chemical system.
Returns:
knots_map (dict): map of knots per chemical interaction.
"""
json_data = json_io.load_interaction_map(filename)
knots_map = {}
for d in range(2, chemical_system.degree + 1):
for interaction in chemical_system.interactions_map[d]:
if interaction in json_data:
array = json_data[interaction]
conditions = [np.ptp(array[:4]) == 0,
np.ptp(array[-4:]) == 0,
np.all(np.gradient(array) >= 0)]
if all(conditions):
knots_map[interaction] = array
return knots_map
| 41.742131 | 79 | 0.567969 |
a122c699745c0cacee252df3ff5bddefd90c04c7 | 6,719 | py | Python | server/functions.py | PiRobotLm5G/Adeept_RaspTank | e3261645ba2688fac96fac5a58f925ccb5e4b0ef | [
"MIT"
] | null | null | null | server/functions.py | PiRobotLm5G/Adeept_RaspTank | e3261645ba2688fac96fac5a58f925ccb5e4b0ef | [
"MIT"
] | null | null | null | server/functions.py | PiRobotLm5G/Adeept_RaspTank | e3261645ba2688fac96fac5a58f925ccb5e4b0ef | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# File name : servo.py
# Description : Control Functions
# Author : William
# Date : 2020/03/17
import time
import RPi.GPIO as GPIO
import threading
from mpu6050 import mpu6050
import Adafruit_PCA9685
import os
import json
import ultra
import Kalman_filter
import move
move.setup()
kalman_filter_X = Kalman_filter.Kalman_filter(0.01,0.1)
pwm = Adafruit_PCA9685.PCA9685()
pwm.set_pwm_freq(50)
# MPU_connection = 1
# try:
# sensor = mpu6050(0x68)
# print('mpu6050 connected, PT MODE ON')
# except:
# MPU_connection = 0
# print('mpu6050 disconnected, ARM MODE ON')
curpath = os.path.realpath(__file__)
thisPath = "/" + os.path.dirname(curpath)
def num_import_int(initial): #Call this function to import data from '.txt' file
global r
with open(thisPath+"/RPIservo.py") as f:
for line in f.readlines():
if(line.find(initial) == 0):
r=line
begin=len(list(initial))
snum=r[begin:]
n=int(snum)
return n
pwm0_direction = 1
pwm0_init = num_import_int('init_pwm0 = ')
pwm0_max = 520
pwm0_min = 100
pwm0_pos = pwm0_init
pwm1_direction = 1
pwm1_init = num_import_int('init_pwm1 = ')
pwm1_max = 520
pwm1_min = 100
pwm1_pos = pwm1_init
pwm2_direction = 1
pwm2_init = num_import_int('init_pwm2 = ')
pwm2_max = 520
pwm2_min = 100
pwm2_pos = pwm2_init
line_pin_right = 19
line_pin_middle = 16
line_pin_left = 20
def pwmGenOut(angleInput):
return int(round(23/9*angleInput))
def setup():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(line_pin_right,GPIO.IN)
GPIO.setup(line_pin_middle,GPIO.IN)
GPIO.setup(line_pin_left,GPIO.IN)
class Functions(threading.Thread):
def __init__(self, *args, **kwargs):
self.functionMode = 'none'
self.steadyGoal = 0
self.scanNum = 3
self.scanList = [0,0,0]
self.scanPos = 1
self.scanDir = 1
self.rangeKeep = 0.7
self.scanRange = 100
self.scanServo = 1
self.turnServo = 0
self.turnWiggle = 200
setup()
super(Functions, self).__init__(*args, **kwargs)
self.__flag = threading.Event()
self.__flag.clear()
def radarScan(self):
global pwm0_pos
scan_speed = 3
result = []
if pwm0_direction:
pwm0_pos = pwm0_max
pwm.set_pwm(1, 0, pwm0_pos)
time.sleep(0.8)
while pwm0_pos>pwm0_min:
pwm0_pos-=scan_speed
pwm.set_pwm(1, 0, pwm0_pos)
dist = ultra.checkdist()
if dist > 20:
continue
theta = 180 - (pwm0_pos-100)/2.55 # +30 deviation
result.append([dist, theta])
else:
pwm0_pos = pwm0_min
pwm.set_pwm(1, 0, pwm0_pos)
time.sleep(0.8)
while pwm0_pos<pwm0_max:
pwm0_pos+=scan_speed
pwm.set_pwm(1, 0, pwm0_pos)
dist = ultra.checkdist()
if dist > 20:
continue
theta = (pwm0_pos-100)/2.55
result.append([dist, theta])
pwm.set_pwm(1, 0, pwm0_init)
return result
def pause(self):
self.functionMode = 'none'
move.move(80, 'no', 'no', 0.5)
self.__flag.clear()
def resume(self):
self.__flag.set()
def automatic(self):
self.functionMode = 'Automatic'
self.resume()
def trackLine(self):
self.functionMode = 'trackLine'
self.resume()
def steady(self,goalPos):
self.functionMode = 'Steady'
self.steadyGoal = goalPos
self.resume()
def trackLineProcessing(self):
status_right = GPIO.input(line_pin_right)
status_middle = GPIO.input(line_pin_middle)
status_left = GPIO.input(line_pin_left)
#print('R%d M%d L%d'%(status_right,status_middle,status_left))
if status_middle == 1:
move.move(100, 'forward', 'no', 1)
elif status_left == 1:
move.move(100, 'no', 'right', 1)
elif status_right == 1:
move.move(100, 'no', 'left', 1)
else:
move.move(100, 'backward', 'no', 1)
time.sleep(0.1)
def automaticProcessing(self):
print('automaticProcessing')
if self.rangeKeep/3 > ultra.checkdist():
move.move(100, 'backward', 'no', 0.5)
elif self.rangeKeep > ultra.checkdist():
move.move(100, 'no', 'left', 0.5)
else:
move.move(100, 'forward', 'no', 0.5)
time.sleep(0.1)
if self.functionMode == 'none':
move.move(80, 'no', 'no', 0.5)
# pwm.set_pwm(2, 0, pwm2_init)
# if self.scanPos == 1:
# pwm.set_pwm(self.scanServo, 0, pwm1_init-self.scanRange)
# time.sleep(0.3)
# self.scanList[0] = ultra.checkdist()
# elif self.scanPos == 2:
# pwm.set_pwm(self.scanServo, 0, pwm1_init)
# time.sleep(0.3)
# self.scanList[1] = ultra.checkdist()
# elif self.scanPos == 3:
# pwm.set_pwm(self.scanServo, 0, pwm1_init+self.scanRange)
# time.sleep(0.3)
# self.scanList[2] = ultra.checkdist()
# self.scanPos = self.scanPos + self.scanDir
# if self.scanPos > self.scanNum or self.scanPos < 1:
# if self.scanDir == 1:self.scanDir = -1
# elif self.scanDir == -1:self.scanDir = 1
# self.scanPos = self.scanPos + self.scanDir*2
# print(self.scanList)
# if min(self.scanList) < self.rangeKeep:
# if self.scanList.index(min(self.scanList)) == 0:
# pwm.set_pwm(self.turnServo, 0, pwm0_init+int(self.turnWiggle/3.5))
# elif self.scanList.index(min(self.scanList)) == 1:
# if self.scanList[0] < self.scanList[2]:
# pwm.set_pwm(self.turnServo, 0, pwm0_init+self.turnWiggle)
# else:
# pwm.set_pwm(self.turnServo, 0, pwm0_init-self.turnWiggle)
# elif self.scanList.index(min(self.scanList)) == 2:
# pwm.set_pwm(self.turnServo, 0, pwm0_init-int(self.turnWiggle/3.5))
# if max(self.scanList) < self.rangeKeep or min(self.scanList) < self.rangeKeep/3:
# move.move(80, 'backward', 'no', 0.5)
# else:
# #move along
# move.move(80, 'forward', 'no', 0.5)
# pass
def steadyProcessing(self):
print('steadyProcessing')
xGet = sensor.get_accel_data()
xGet = xGet['x']
xOut = kalman_filter_X.kalman(xGet)
pwm.set_pwm(2, 0, self.steadyGoal+pwmGenOut(xOut*9))
# pwm.set_pwm(2, 0, self.steadyGoal+pwmGenOut(xGet*10))
time.sleep(0.05)
def functionGoing(self):
if self.functionMode == 'none':
self.pause()
elif self.functionMode == 'Automatic':
self.automaticProcessing()
elif self.functionMode == 'Steady':
self.steadyProcessing()
elif self.functionMode == 'trackLine':
self.trackLineProcessing()
def run(self):
while 1:
self.__flag.wait()
self.functionGoing()
pass
if __name__ == '__main__':
pass
# fuc=Functions()
# fuc.radarScan()
# fuc.start()
# fuc.automatic()
# # fuc.steady(300)
# time.sleep(30)
# fuc.pause()
# time.sleep(1)
# move.move(80, 'no', 'no', 0.5)
| 24.885185 | 88 | 0.641018 |
ee6a6bfcf4946f69b81fb2b571117385e8ecd832 | 2,619 | py | Python | app/core/models.py | marinswk/recipe-app-api | c829ee4ce2650d6d377b1d04bd267702f315ea72 | [
"MIT"
] | null | null | null | app/core/models.py | marinswk/recipe-app-api | c829ee4ce2650d6d377b1d04bd267702f315ea72 | [
"MIT"
] | null | null | null | app/core/models.py | marinswk/recipe-app-api | c829ee4ce2650d6d377b1d04bd267702f315ea72 | [
"MIT"
] | null | null | null | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
from django.conf import settings
import uuid
import os
def recipe_image_file_path(instance, file_name):
"""generate file path for new recipe image"""
ext = file_name.split('.')[-1]
file_name = f'{uuid.uuid4()}.{ext}'
return os.path.join('uploads/recipe/', file_name)
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_field):
"""creates and saves a new user"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(email=self.normalize_email(email), **extra_field)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password):
"""creates and saves a new superuser"""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self._db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""custom user model that support using email instead of username"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
class Tag(models.Model):
"""tag to be used for a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Ingredient(models.Model):
"""Ingredient to be used in a recipe"""
name = models.CharField(max_length=255)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
def __str__(self):
return self.name
class Recipe(models.Model):
"""Recipe object"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,
)
title = models.CharField(max_length=255)
time_minutes = models.IntegerField()
price = models.DecimalField(max_digits=5, decimal_places=2)
link = models.CharField(max_length=255, blank=True)
ingredients = models.ManyToManyField('Ingredient')
tags = models.ManyToManyField('Tag')
image = models.ImageField(null=True, upload_to=recipe_image_file_path)
def __str__(self):
return self.title
| 28.78022 | 75 | 0.672394 |
dd08aa972995545c465823b4e7303fc544db9e4a | 1,391 | py | Python | spotty/commands/download.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | 1 | 2020-07-17T07:02:09.000Z | 2020-07-17T07:02:09.000Z | spotty/commands/download.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | null | null | null | spotty/commands/download.py | Inculus/spotty | 56863012668a6c13ad13c2a04f900047e229fbe6 | [
"MIT"
] | null | null | null | from argparse import Namespace, ArgumentParser
from spotty.commands.abstract_config_command import AbstractConfigCommand
from spotty.commands.writers.abstract_output_writrer import AbstractOutputWriter
from spotty.providers.abstract_instance_manager import AbstractInstanceManager
class DownloadCommand(AbstractConfigCommand):
name = 'download'
description = 'Download files from the running instance'
def configure(self, parser: ArgumentParser):
super().configure(parser)
parser.add_argument('-i', '--include', metavar='PATTERN', action='append', type=str, required=True,
help='Download all files that matches the specified pattern (see Include Filters '
'for the "aws s3 sync" command). Paths must be relative to your project directory, '
'they cannot be absolute.')
parser.add_argument('--dry-run', action='store_true', help='Show files to be downloaded')
def _run(self, instance_manager: AbstractInstanceManager, args: Namespace, output: AbstractOutputWriter):
filters = [
{'exclude': ['*']},
{'include': args.include}
]
dry_run = args.dry_run
with output.prefix('[dry-run] ' if dry_run else ''):
instance_manager.download(filters, output, dry_run)
output.write('Done')
| 44.870968 | 117 | 0.670022 |
949d97edef568b4e8d0bd1fc05f5670af24a1168 | 1,183 | py | Python | setup.py | ivanpustogarov/manticore | f17410b8427ddbd5d751d8824bdf10ce33c9f3ce | [
"Apache-2.0"
] | null | null | null | setup.py | ivanpustogarov/manticore | f17410b8427ddbd5d751d8824bdf10ce33c9f3ce | [
"Apache-2.0"
] | null | null | null | setup.py | ivanpustogarov/manticore | f17410b8427ddbd5d751d8824bdf10ce33c9f3ce | [
"Apache-2.0"
] | null | null | null | import os
from setuptools import setup, find_packages
on_rtd = os.environ.get('READTHEDOCS') == 'True'
def rtd_dependent_deps():
# RTD tries to build z3, ooms, and fails to build.
if on_rtd:
return []
else:
return ['z3-solver']
setup(
name='manticore',
description='Manticore is a symbolic execution tool for analysis of binaries and smart contracts.',
url='https://github.com/trailofbits/manticore',
author='Trail of Bits',
version='0.1.10',
packages=find_packages(),
install_requires=[
'capstone>=3.0.5rc2',
'pyelftools',
'unicorn',
'ply',
'pysha3',
'functools32',
] + rtd_dependent_deps(),
dependency_links=[
'https://github.com/aquynh/capstone/archive/next.zip#egg=capstone-4&subdirectory=bindings/python',
],
extras_require={
'dev': [
'keystone-engine',
'coverage',
'nose',
'Sphinx',
'redis',
],
'redis': [
'redis',
]
},
entry_points={
'console_scripts': [
'manticore = manticore.__main__:main'
]
}
)
| 23.196078 | 106 | 0.554522 |
ce371675dd9914752d82c1d920e2697c89b2f6da | 7,770 | py | Python | api/categories.py | JinShiyin/sast_backend | b2e282d393497da1d300d83c1a045c9f78f854ea | [
"MIT"
] | null | null | null | api/categories.py | JinShiyin/sast_backend | b2e282d393497da1d300d83c1a045c9f78f854ea | [
"MIT"
] | null | null | null | api/categories.py | JinShiyin/sast_backend | b2e282d393497da1d300d83c1a045c9f78f854ea | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@author: Shanda Lau 刘祥德
@license: (C) Copyright 2019-now, Node Supply Chain Manager Corporation Limited.
@contact: shandalaulv@gmail.com
@software:
@file: categories.py
@time: 2020/8/12 10:46
@version 1.0
@desc:
"""
import json
import os
from flask_restplus import Namespace, Resource, reqparse
from flask_login import login_required, current_user
from mongoengine.errors import NotUniqueError
import datetime
from config import Config
api = Namespace('category', description='Category related operations')
os.makedirs(Config.CATEGORIES_DIRECTORY, exist_ok=True)
create_category = reqparse.RequestParser()
create_category.add_argument('name', required=True, location='json')
create_category.add_argument('supercategory', location='json')
create_category.add_argument('color', location='json')
create_category.add_argument('metadata', type=dict, location='json')
create_category.add_argument(
'keypoint_edges', type=list, default=[], location='json')
create_category.add_argument(
'keypoint_labels', type=list, default=[], location='json')
create_category.add_argument(
'keypoint_colors', type=list, default=[], location='json')
update_category = reqparse.RequestParser()
update_category.add_argument('name', required=True, location='json')
update_category.add_argument('supercategory', location='json')
update_category.add_argument('color', location='json')
update_category.add_argument('metadata', type=dict, location='json')
update_category.add_argument('keypoint_edges', type=list, location='json')
update_category.add_argument('keypoint_labels', type=list, location='json')
update_category.add_argument('keypoint_colors', type=list, location='json')
page_data = reqparse.RequestParser()
page_data.add_argument('page', default=1, type=int)
page_data.add_argument('limit', default=20, type=int)
@api.route('/')
class Category(Resource):
def get(self):
""" Returns all categories """
# return query_util.fix_ids(current_user.categories.all())
category_ids = os.listdir(Config.CATEGORIES_DIRECTORY)
categories = []
for c in category_ids:
categories.append(json.load(open(os.path.join(Config.CATEGORIES_DIRECTORY, c))))
return categories
@api.expect(create_category)
def post(self):
""" Creates a category """
args = create_category.parse_args()
name = args.get('name')
supercategory = args.get('supercategory')
metadata = args.get('metadata', {})
color = args.get('color')
keypoint_edges = args.get('keypoint_edges')
keypoint_labels = args.get('keypoint_labels')
keypoint_colors = args.get('keypoint_colors')
category_id = len(os.listdir(Config.CATEGORIES_DIRECTORY))
try:
category = {
'name': name,
'supercategory': supercategory,
'color': color,
'metadata': metadata,
'keypoint_edges': keypoint_edges,
'keypoint_labels': keypoint_labels,
'keypoint_colors': keypoint_colors,
}
with open(os.path.join(Config.CATEGORIES_DIRECTORY, f'{category_id}.json'), 'w') as f:
json.dump(category, f)
except NotUniqueError as e:
return {'message': 'Category already exists. Check the undo tab to fully delete the category.'}, 400
return category
@api.route('/<int:category_id>')
class Category(Resource):
def get(self, category_id):
""" Returns a category by ID """
# category = current_user.categories.filter(id=category_id).first()
category = json.load(open(os.path.join(Config.CATEGORIES_DIRECTORY, category_id)))
if category is None:
return {'success': False}, 400
# return query_util.fix_ids(category)
return category
def delete(self, category_id):
""" Deletes a category by ID """
category = current_user.categories.filter(id=category_id).first()
if category is None:
return {"message": "Invalid image id"}, 400
if not current_user.can_delete(category):
return {"message": "You do not have permission to delete this category"}, 403
category.update(set__deleted=True,
set__deleted_date=datetime.datetime.now())
return {'success': True}
@api.expect(update_category)
def put(self, category_id):
""" Updates a category name by ID """
category = current_user.categories.filter(id=category_id).first()
# check if the id exits
if category is None:
return {"message": "Invalid category id"}, 400
args = update_category.parse_args()
name = args.get('name')
supercategory = args.get('supercategory', category.supercategory)
color = args.get('color', category.color)
metadata = args.get('metadata', category.metadata)
keypoint_edges = args.get('keypoint_edges', category.keypoint_edges)
keypoint_labels = args.get('keypoint_labels', category.keypoint_labels)
keypoint_colors = args.get('keypoint_colors', category.keypoint_colors)
# check if there is anything to update
if category.name == name \
and category.supercategory == supercategory \
and category.color == color \
and category.keypoint_edges == keypoint_edges \
and category.keypoint_labels == keypoint_labels \
and category.keypoint_colors == keypoint_colors:
return {"message": "Nothing to update"}, 200
# check if the name is empty
if not name:
return {"message": "Invalid category name to update"}, 400
# update name of the category
# check if the name to update exits already in db
# @ToDo: Is it necessary to allow equal category names among different creators?
category.name = name
category.supercategory = supercategory
category.color = color
category.keypoint_edges = keypoint_edges
category.keypoint_labels = keypoint_labels
category.keypoint_colors = keypoint_colors
try:
category.update(
name=category.name,
supercategory=category.supercategory,
color=category.color,
metadata=category.metadata,
keypoint_edges=category.keypoint_edges,
keypoint_labels=category.keypoint_labels,
keypoint_colors=category.keypoint_colors,
)
except NotUniqueError:
# it is only triggered when the name already exists and the creator is the same
return {"message": "Category '" + name_to_update + "' already exits"}, 400
return {"success": True}
@api.route('/data')
class CategoriesData(Resource):
@api.expect(page_data)
def get(self):
""" Endpoint called by category viewer client """
pass
# args = page_data.parse_args()
# limit = args['limit']
# page = args['page']
#
# categories = current_user.categories.filter(deleted=False)
#
# pagination = Pagination(categories.count(), limit, page)
# categories = query_util.fix_ids(
# categories[pagination.start:pagination.end])
#
# for category in categories:
# category['numberAnnotations'] = AnnotationModel.objects(
# deleted=False, category_id=category.get('id')).count()
#
# return {
# "pagination": pagination.export(),
# "page": page,
# "categories": categories
# }
| 37.355769 | 112 | 0.649163 |
564c743577f0921319ab39f516afbce881cb165c | 2,273 | py | Python | application/workprogramsapp/educational_program/general_prof_competencies/models.py | oooooooooooooooosip/analytics_backend | 4006b736c2824af6308b414e32a53a77f805d4cb | [
"MIT"
] | 1 | 2021-01-24T21:25:04.000Z | 2021-01-24T21:25:04.000Z | application/workprogramsapp/educational_program/general_prof_competencies/models.py | oooooooooooooooosip/analytics_backend | 4006b736c2824af6308b414e32a53a77f805d4cb | [
"MIT"
] | 35 | 2020-06-06T01:48:56.000Z | 2022-03-09T08:59:48.000Z | application/workprogramsapp/educational_program/general_prof_competencies/models.py | oooooooooooooooosip/analytics_backend | 4006b736c2824af6308b414e32a53a77f805d4cb | [
"MIT"
] | 31 | 2020-04-26T13:12:53.000Z | 2022-03-28T13:13:35.000Z | from django.db import models
"""
Ключевые компетенции
"""
class GroupOfGeneralProfCompetencesInGeneralCharacteristic(models.Model):
"""
Группа общепрофессиональных компетенций в общей характеристике
"""
name = models.CharField(max_length=512, verbose_name="трудовая функция")
general_characteristic = models.ForeignKey('GeneralCharacteristics', on_delete=models.CASCADE,
verbose_name="Общая характеристика",
related_name = "group_of_general_prof_competences")
def __str__(self):
return str(self.name) + '/' + str(self.general_characteristic)
class GeneralProfCompetencesInGroupOfGeneralCharacteristic(models.Model):
"""
общепрофессиональная компетенция в общей характеристике
"""
group_of_pk = models.ForeignKey('GroupOfGeneralProfCompetencesInGeneralCharacteristic', on_delete=models.CASCADE,
verbose_name="Группа общепрофессиональных компетенций в ОХ",
related_name = "competence_in_group_of_general_prof_competences")
#labor_functions = models.CharField(max_length=512, verbose_name="Трудовая функция")
competence = models.ForeignKey('Competence', on_delete=models.CASCADE, verbose_name="Компетенция",
blank=True, null=True)
def __str__(self):
return str(self.group_of_pk) + '/' + str(self.competence)
class IndicatorInGeneralProfCompetenceInGeneralCharacteristic(models.Model):
"""
Индикатор компетенции в общей характеристике
"""
competence_in_group_of_pk = models.ForeignKey('GeneralProfCompetencesInGroupOfGeneralCharacteristic',
on_delete=models.CASCADE,
verbose_name="Группа общепрофессиональных компетенций в ОХ",
related_name = "indicator_of_competence_in_group_of_general_prof_competences")
indicator = models.ForeignKey('Indicator', on_delete=models.CASCADE, verbose_name="Индикатор ПК компетенции в ОХ")
def __str__(self):
return str(self.competence_in_group_of_pk) + '/' + str(self.indicator)
| 43.711538 | 128 | 0.66432 |
1f43548fe9ab2ded34f884bb5adbed20645fdf98 | 2,174 | py | Python | ambari-client/src/main/python/ambari_client/model/paths.py | flipkart-incubator/incubator-ambari | bf747346312170834c6beb89a60c8624b47aa288 | [
"Apache-2.0"
] | 2 | 2015-07-29T22:50:10.000Z | 2021-11-10T16:05:59.000Z | ambari-client/src/main/python/ambari_client/model/paths.py | flipkart-incubator/incubator-ambari | bf747346312170834c6beb89a60c8624b47aa288 | [
"Apache-2.0"
] | 1 | 2021-11-04T13:31:30.000Z | 2021-11-04T13:31:30.000Z | ambari-client/src/main/python/ambari_client/model/paths.py | isabella232/incubator-ambari | bf747346312170834c6beb89a60c8624b47aa288 | [
"Apache-2.0"
] | 9 | 2016-01-08T21:11:06.000Z | 2021-11-10T16:05:51.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CLUSTERS_PATH = "/clusters"
CLUSTERS_CONFIG_PATH = "/clusters/%s"
CLUSTER_HOSTS_PATH = "/clusters/%s/hosts"
CLUSTER_HOST_PATH = "/clusters/%s/hosts/%s"
CLUSTER_START_ALL_SERVICES = "/clusters/%s/services?ServiceInfo/state=INSTALLED"
CLUSTER_STOP_ALL_SERVICES = "/clusters/%s/services?ServiceInfo"
SERVICES_PATH = "/clusters/%s/services"
SERVICE_PATH = "/clusters/%s/services/%s"
SERVICE_CREATE_PATH = "/clusters/%s/services/?ServiceInfo/service_name=%s"
SERVICE_COMPONENTS_PATH = "/clusters/%s/services/%s/components?fields=*"
SERVICE_COMPONENT_PATH = "/clusters/%s/services/%s/components/%s"
HOST_PATH = "/hosts/%s"
HOSTS_PATH = "/hosts"
HOSTS_CREATE_PATH = "/clusters/%s/hosts"
HOSTS_COMPONENTS_PATH = "/clusters/%s/hosts/%s/host_components?fields=HostRoles/state"
HOSTS_COMPONENT_PATH = "/clusters/%s/hosts/%s/host_components/%s"
HOSTS_ASSIGN_ROLE = "/clusters/%s/hosts?Hosts/host_name=%s"
BOOTSTRAP_PATH = "/bootstrap"
REQUEST_STATUS_PATH = "/clusters/%s/requests/%s?fields=tasks/Tasks/status"
REQUEST_PATH = "clusters/%s/requests/%s"
CONFIGURATION_PATH = "/clusters/%s/configurations?type=%s&tag=%s"
CREATE_CONFIGURATION_PATH = "/clusters/%s/configurations"
STACK_SERVICES_COMPONENTS_PATH = "/stacks2/HDP/versions/%s/stackServices/%s/serviceComponents?fields=*"
STACK_SERVICES_CONFIG_PATH = "/stacks2/HDP/versions/%s/stackServices/%s/configurations?fields=*"
| 44.367347 | 103 | 0.770009 |
8c61368e653f5a71dcdc810024da47e3a809748d | 1,484 | py | Python | Computer Engineering/Third Year/Operating Systems/First-Come, First-Served Scheduling Non Pre-emptive/fcfs.py | jatin-eleven/Somaiya-University | a839fe9b8c60bd1e0949bc47986ba6655a23617d | [
"MIT"
] | 7 | 2020-10-01T09:33:52.000Z | 2021-10-05T13:42:16.000Z | Computer Engineering/Third Year/Operating Systems/First-Come, First-Served Scheduling Non Pre-emptive/fcfs.py | jatin-eleven/Somaiya-University | a839fe9b8c60bd1e0949bc47986ba6655a23617d | [
"MIT"
] | null | null | null | Computer Engineering/Third Year/Operating Systems/First-Come, First-Served Scheduling Non Pre-emptive/fcfs.py | jatin-eleven/Somaiya-University | a839fe9b8c60bd1e0949bc47986ba6655a23617d | [
"MIT"
] | 9 | 2020-10-01T04:40:32.000Z | 2021-10-01T19:09:59.000Z | # Implementation of First Come First Serve Algorithm for non-preemptive processes
a = []
burst_time = []
waiting_time = []
turn_around_time = []
total_waiting_time = 0
total_turnaround_time = 0
sum = 0
n = int(input("Enter the number of processes"))
for i in range (n):
a.append(input("Enter process id "))
burst_time.append(int(input("Enetr corresponding burst time")))
for i in range (n):
for j in range (i,n):
if(a[i]>=a[j]):
temp1 = a[i]
a[i] = a[j]
a[j] = temp1
temp1 = burst_time[i]
burst_time[i]=burst_time[j]
burst_time[j] = temp1
print("The turn-around times are ")
for i in range (n):
sum = sum + burst_time[i]
turn_around_time.append(sum)
total_turnaround_time = total_turnaround_time + sum
print(str(a[i])+" "+str(sum))
print("The waiting times are ")
for i in range (n):
temp = turn_around_time[i]-burst_time[i]
print(str(a[i])+" "+str(temp))
waiting_time.append(temp)
total_waiting_time = total_waiting_time + temp
print( "Processes Burst time " + " Waiting time " + " Turn around time")
for i in range (n):
print(" " + str(a[i]) + "\t\t" +
str(burst_time[i]) + "\t\t" +
str(waiting_time[i]) + "\t\t " +
str(turn_around_time[i]))
print("The average turnaround time is ")
print(total_turnaround_time/n)
print("The average waiting time is ")
print(total_waiting_time/n)
| 30.916667 | 81 | 0.607817 |
b0c83f4ab8ff0051a96db78f3729f85df6f04e9d | 3,492 | py | Python | colour/plotting/tm3018/tests/test_report.py | wenh06/colour | 445fdad2711ae39c95b4375166905568d24a95f4 | [
"BSD-3-Clause"
] | 1 | 2021-09-09T01:53:40.000Z | 2021-09-09T01:53:40.000Z | colour/plotting/tm3018/tests/test_report.py | wenh06/colour | 445fdad2711ae39c95b4375166905568d24a95f4 | [
"BSD-3-Clause"
] | null | null | null | colour/plotting/tm3018/tests/test_report.py | wenh06/colour | 445fdad2711ae39c95b4375166905568d24a95f4 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Defines unit tests for :mod:`colour.plotting.tm3018.report` module.
"""
from __future__ import division, unicode_literals
import unittest
from matplotlib.pyplot import Axes, Figure
from colour.colorimetry import SDS_ILLUMINANTS
from colour.plotting.tm3018.report import (
plot_single_sd_colour_rendition_report_full,
plot_single_sd_colour_rendition_report_intermediate,
plot_single_sd_colour_rendition_report_simple,
plot_single_sd_colour_rendition_report)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2020 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-developers@colour-science.org'
__status__ = 'Production'
__all__ = [
'TestPlotSingleSdColourRenditionReportFull',
'TestPlotSingleSdColourRenditionReportIntermediate',
'TestPlotSingleSdColourRenditionReportSimple',
'TestPlotSingleSdColourRenditionReport'
]
class TestPlotSingleSdColourRenditionReportFull(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_full` definition unit tests methods.
"""
def test_plot_single_sd_colour_rendition_report_full(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_full` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report_full(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourRenditionReportIntermediate(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_intermediate` definition unit tests
methods.
"""
def test_plot_single_sd_colour_rendition_report_intermediate(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_intermediate` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report_intermediate(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourRenditionReportSimple(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_simple` definition unit tests methods.
"""
def test_plot_color_vector_graphic(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report_simple` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report_simple(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
class TestPlotSingleSdColourRenditionReport(unittest.TestCase):
"""
Defines :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report` definition unit tests methods.
"""
def test_plot_single_sd_colour_rendition_report(self):
"""
Tests :func:`colour.plotting.tm3018.report.\
plot_single_sd_colour_rendition_report` definition.
"""
figure, axes = plot_single_sd_colour_rendition_report(
SDS_ILLUMINANTS['FL2'])
self.assertIsInstance(figure, Figure)
self.assertIsInstance(axes, Axes)
if __name__ == '__main__':
unittest.main()
| 31.178571 | 78 | 0.749714 |
69f3aeeb35e965a8a1d4da708567ab677d39092d | 8,062 | py | Python | code/core_nn/nn_irl.py | naivety77/SAIL | 452c63cd8b80dd29921fa166ebbc3b3943c1d3c9 | [
"MIT"
] | 8 | 2022-02-21T08:59:22.000Z | 2022-02-28T02:14:39.000Z | code/core_nn/nn_irl.py | naivety77/SAIL | 452c63cd8b80dd29921fa166ebbc3b3943c1d3c9 | [
"MIT"
] | null | null | null | code/core_nn/nn_irl.py | naivety77/SAIL | 452c63cd8b80dd29921fa166ebbc3b3943c1d3c9 | [
"MIT"
] | null | null | null | from my_utils import *
class Discriminator(nn.Module):
def __init__(self, state_dim, action_dim, num_outputs=1, hidden_size=(100, 100), activation='tanh', normalization=None, clip=0):
super().__init__()
if activation == 'tanh':
self.activation = torch.tanh
elif activation == 'relu':
self.activation = F.relu
elif activation == 'sigmoid':
self.activation = F.sigmoid
elif activation == "leakyrelu":
self.activation = F.leaky_relu
self.clip = clip
self.affine_layers = nn.ModuleList()
last_dim = state_dim + action_dim
for nh in hidden_size:
if normalization == "spectral":
self.affine_layers.append(nn.utils.spectral_norm(nn.Linear(last_dim, nh)))
elif normalization == "weight":
self.affine_layers.append(nn.utils.weight_norm(nn.Linear(last_dim, nh)))
else:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
self.score_out = nn.Linear(last_dim, num_outputs)
self.score_out.weight.data.mul_(0.1)
self.score_out.bias.data.mul_(0.0)
if normalization == "spectral":
self.score_out = nn.utils.spectral_norm(self.score_out)
elif normalization == "weight_norm":
self.score_out = nn.utils.weight_norm(self.score_out)
## use by GP regularization code. Take x as (s,a) or s.
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
x = self.score_out(x)
if self.clip > 0: # if clip, we use sigmoid to bound in (0, clip) (positive)
x = torch.sigmoid(x) * self.clip
if self.clip < 0: # tanh to bound in (clip, -clip)
x = torch.tanh(x) * -self.clip
return x
def noclip_forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
x = self.score_out(x)
return x
def get_noclip_reward(self, s, a=None):
x = torch.cat((s,a), 1) if a is not None else s
score = self.forward(x)
return score
## used for reward.
def get_reward(self, s, a=None):
x = torch.cat((s,a), 1) if a is not None else s
score = self.forward(x)
return score
class VDB_discriminator(nn.Module):
def __init__(self, state_dim, action_dim, encode_dim=128, num_outputs=1, hidden_size=(100, 100), activation='tanh', normalization=None, clip=0):
super().__init__()
if activation == 'tanh':
self.activation = torch.tanh
elif activation == 'relu':
self.activation = F.relu
elif activation == 'sigmoid':
self.activation = F.sigmoid
elif activation == "leakyrelu":
self.activation = F.leaky_relu
self.clip = clip
self.affine_layers = nn.ModuleList()
last_dim = state_dim + action_dim
for nh in hidden_size:
if normalization == "spectral":
self.affine_layers.append(nn.utils.spectral_norm(nn.Linear(last_dim, nh)))
elif normalization == "weight":
self.affine_layers.append(nn.utils.weight_norm(nn.Linear(last_dim, nh)))
else:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
self.encoder_mean = nn.Linear(last_dim, encode_dim)
self.encoder_mean.weight.data.mul_(0.1)
self.encoder_mean.bias.data.mul_(0.0)
self.encoder_logstd = nn.Linear(last_dim, encode_dim)
self.encoder_logstd.weight.data.mul_(0.1)
self.encoder_logstd.bias.data.mul_(0.0)
self.score_out = nn.Linear(encode_dim, num_outputs)
self.score_out.weight.data.mul_(0.1)
self.score_out.bias.data.mul_(0.0)
if normalization == "spectral":
self.score_out = nn.utils.spectral_norm(self.score_out)
self.encoder_mean = nn.utils.spectral_norm(self.encoder_mean)
self.encoder_logstd = nn.utils.spectral_norm(self.encoder_logstd)
elif normalization == "weight_norm":
self.score_out = nn.utils.weight_norm(self.score_out)
self.encoder_mean = nn.utils.weight_norm(self.encoder_mean)
self.encoder_logstd = nn.utils.weight_norm(self.encoder_logstd)
## use by GP regularization code Take x as (s,a) or s.
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
z_mean = self.encoder_mean(x)
z_logstd = self.encoder_logstd(x)
z = z_mean + torch.exp(z_logstd) * torch.randn_like(z_logstd)
score = self.score_out(z)
if self.clip > 0: # if clip, we use sigmoid to bound in (0, clip) (positive argument)
score = torch.sigmoid(score) * self.clip
if self.clip < 0: # tanh to bound in (clip, -clip)
score = torch.tanh(score) * -self.clip
return score
## used for reward. Sigmoid is applied in the main code
def get_reward(self, s, a=None):
x = torch.cat((s,a), 1) if a is not None else s
for affine in self.affine_layers:
x = self.activation(affine(x))
z_mean = self.encoder_mean(x)
score = self.score_out(z_mean)
return score
def get_full(self, s, a=None):
x = torch.cat((s,a), 1) if a is not None else s
for affine in self.affine_layers:
x = self.activation(affine(x))
z_mean = self.encoder_mean(x)
z_logstd = self.encoder_logstd(x)
score = self.score_out(z_mean + torch.exp(z_logstd) * torch.randn_like(z_logstd) )
return score, z_mean, z_logstd
class Posterior(nn.Module):
def __init__(self, input_dim, encode_dim, hidden_size=(256, 256), activation='relu'):
super().__init__()
if activation == 'tanh':
self.activation = torch.tanh
elif activation == 'relu':
self.activation = F.relu
elif activation == 'sigmoid':
self.activation = F.sigmoid
elif activation == "leakyrelu":
self.activation = F.leaky_relu
self.encode_dim = encode_dim
self.affine_layers = nn.ModuleList()
last_dim = input_dim
for nh in hidden_size:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
self.score_head = nn.Linear(last_dim, encode_dim)
self.score_head.weight.data.mul_(0.1)
self.score_head.bias.data.mul_(0.0)
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
x = self.score_head(x)
return x
def get_logposterior(self, states, actions, latent_code):
x = torch.cat((states, actions), 1)
p = F.log_softmax(self.forward(x), dim=1) # [batch_size, code_dim]
## Gather output according to the latent code, which should have size [batch_size, 1] and have discrete value in range [0, code_dim-1]
latent_code_e = latent_code.view(-1,1).expand(-1, self.encode_dim ).long().to(device) ## [batch_size, 1] to [batch_size, code_dim]
p_gather = p.gather(1, latent_code_e)[:,0].unsqueeze(-1)
return p_gather
def sample_code(self):
return torch.randint(0, self.encode_dim, size=(1,1))
class Reward(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size=256):
super(Reward, self).__init__()
self.state_dim = state_dim
self.action_dim = action_dim
self.fc1 = nn.Linear(self.state_dim+self.action_dim, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, 1)
def forward(self, traj):
x = F.relu(self.fc1(traj))
x = F.relu(self.fc2(x))
x = self.fc3(x)
# x = torch.sigmoid(x) * 5
return x
| 38.390476 | 148 | 0.601464 |
1da8d017e3a8746ee3506e04501838692cd3036c | 2,595 | py | Python | research/object_detection/utils/category_util.py | hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | [
"Apache-2.0"
] | 1 | 2021-05-22T12:50:50.000Z | 2021-05-22T12:50:50.000Z | object_detection/utils/category_util.py | DemonDamon/mask-detection-based-on-tf2odapi | 192ae544169c1230c21141c033800aa1bd94e9b6 | [
"MIT"
] | null | null | null | object_detection/utils/category_util.py | DemonDamon/mask-detection-based-on-tf2odapi | 192ae544169c1230c21141c033800aa1bd94e9b6 | [
"MIT"
] | 1 | 2021-09-14T15:04:34.000Z | 2021-09-14T15:04:34.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions for importing/exporting Object Detection categories."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import tensorflow.compat.v1 as tf
def load_categories_from_csv_file(csv_path):
"""Loads categories from a csv file.
The CSV file should have one comma delimited numeric category id and string
category name pair per line. For example:
0,"cat"
1,"dog"
2,"bird"
...
Args:
csv_path: Path to the csv file to be parsed into categories.
Returns:
categories: A list of dictionaries representing all possible categories.
The categories will contain an integer 'id' field and a string
'name' field.
Raises:
ValueError: If the csv file is incorrectly formatted.
"""
categories = []
with tf.gfile.Open(csv_path, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in reader:
if not row:
continue
if len(row) != 2:
raise ValueError('Expected 2 fields per row in csv: %s' % ','.join(row))
category_id = int(row[0])
category_name = row[1]
categories.append({'id': category_id, 'name': category_name})
return categories
def save_categories_to_csv_file(categories, csv_path):
"""Saves categories to a csv file.
Args:
categories: A list of dictionaries representing categories to save to file.
Each category must contain an 'id' and 'name' field.
csv_path: Path to the csv file to be parsed into categories.
"""
categories.sort(key=lambda x: x['id'])
with tf.gfile.Open(csv_path, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=',', quotechar='"')
for category in categories:
writer.writerow([category['id'], category['name']])
| 33.269231 | 81 | 0.660886 |
c0b07c4ca697f8b11c8267b27bf1ee56a83a1d0e | 5,177 | py | Python | mailchimp_marketing_asyncio/models/members_to_addremove_tofrom_a_static_segment.py | john-parton/mailchimp-asyncio | 3865ca0867bec8f537dc1e3256aa3a160c00f8a2 | [
"Apache-2.0"
] | null | null | null | mailchimp_marketing_asyncio/models/members_to_addremove_tofrom_a_static_segment.py | john-parton/mailchimp-asyncio | 3865ca0867bec8f537dc1e3256aa3a160c00f8a2 | [
"Apache-2.0"
] | null | null | null | mailchimp_marketing_asyncio/models/members_to_addremove_tofrom_a_static_segment.py | john-parton/mailchimp-asyncio | 3865ca0867bec8f537dc1e3256aa3a160c00f8a2 | [
"Apache-2.0"
] | 1 | 2022-03-09T14:52:22.000Z | 2022-03-09T14:52:22.000Z | # coding: utf-8
"""
Mailchimp Marketing API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 3.0.74
Contact: apihelp@mailchimp.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MembersToAddremoveTofromAStaticSegment(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'members_to_add': 'list[str]',
'members_to_remove': 'list[str]'
}
attribute_map = {
'members_to_add': 'members_to_add',
'members_to_remove': 'members_to_remove'
}
def __init__(self, members_to_add=None, members_to_remove=None): # noqa: E501
"""MembersToAddremoveTofromAStaticSegment - a model defined in Swagger""" # noqa: E501
self._members_to_add = None
self._members_to_remove = None
self.discriminator = None
if members_to_add is not None:
self.members_to_add = members_to_add
if members_to_remove is not None:
self.members_to_remove = members_to_remove
@property
def members_to_add(self):
"""Gets the members_to_add of this MembersToAddremoveTofromAStaticSegment. # noqa: E501
An array of emails to be used for a static segment. Any emails provided that are not present on the list will be ignored. A maximum of 500 members can be sent. # noqa: E501
:return: The members_to_add of this MembersToAddremoveTofromAStaticSegment. # noqa: E501
:rtype: list[str]
"""
return self._members_to_add
@members_to_add.setter
def members_to_add(self, members_to_add):
"""Sets the members_to_add of this MembersToAddremoveTofromAStaticSegment.
An array of emails to be used for a static segment. Any emails provided that are not present on the list will be ignored. A maximum of 500 members can be sent. # noqa: E501
:param members_to_add: The members_to_add of this MembersToAddremoveTofromAStaticSegment. # noqa: E501
:type: list[str]
"""
self._members_to_add = members_to_add
@property
def members_to_remove(self):
"""Gets the members_to_remove of this MembersToAddremoveTofromAStaticSegment. # noqa: E501
An array of emails to be used for a static segment. Any emails provided that are not present on the list will be ignored. A maximum of 500 members can be sent. # noqa: E501
:return: The members_to_remove of this MembersToAddremoveTofromAStaticSegment. # noqa: E501
:rtype: list[str]
"""
return self._members_to_remove
@members_to_remove.setter
def members_to_remove(self, members_to_remove):
"""Sets the members_to_remove of this MembersToAddremoveTofromAStaticSegment.
An array of emails to be used for a static segment. Any emails provided that are not present on the list will be ignored. A maximum of 500 members can be sent. # noqa: E501
:param members_to_remove: The members_to_remove of this MembersToAddremoveTofromAStaticSegment. # noqa: E501
:type: list[str]
"""
self._members_to_remove = members_to_remove
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MembersToAddremoveTofromAStaticSegment, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MembersToAddremoveTofromAStaticSegment):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 35.458904 | 181 | 0.639173 |
ade3db1022b6a682d41f3f488b52af369d57ab1c | 410 | py | Python | repertoire_manager/migrations/0003_auto_20180808_2336.py | vevurka/squirrel-songer | 3672aafda0842cdd787e16f61c0595992d9d6112 | [
"MIT"
] | 2 | 2018-08-31T22:50:08.000Z | 2018-09-07T23:58:41.000Z | repertoire_manager/migrations/0003_auto_20180808_2336.py | vevurka/squirrel-songer | 3672aafda0842cdd787e16f61c0595992d9d6112 | [
"MIT"
] | 9 | 2020-05-01T11:52:15.000Z | 2021-09-22T17:44:15.000Z | repertoire_manager/migrations/0003_auto_20180808_2336.py | vevurka/squirrel-songer | 3672aafda0842cdd787e16f61c0595992d9d6112 | [
"MIT"
] | 1 | 2020-05-09T19:01:54.000Z | 2020-05-09T19:01:54.000Z | # Generated by Django 2.0.7 on 2018-08-08 23:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('repertoire_manager', '0002_piecemodel_active'),
]
operations = [
migrations.AlterField(
model_name='piecemodel',
name='level',
field=models.IntegerField(blank=True, null=True),
),
]
| 21.578947 | 61 | 0.614634 |
c900797eed016fbcd42d4f1ae5a44612011fb85b | 8,175 | py | Python | m/expressions.py | minersoft/miner | 247ae1ffb27a4ce3203ac236afd2ed145b31a465 | [
"BSD-3-Clause"
] | 1 | 2015-04-18T16:48:48.000Z | 2015-04-18T16:48:48.000Z | m/expressions.py | minersoft/miner | 247ae1ffb27a4ce3203ac236afd2ed145b31a465 | [
"BSD-3-Clause"
] | null | null | null | m/expressions.py | minersoft/miner | 247ae1ffb27a4ce3203ac236afd2ed145b31a465 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright Michael Groys, 2012-2014
#
#
# This file implements expressions used in commands
# Two Expressions classes are available:
# Expression - general Expression which are translated as is to python expressions
# MatchExpression - uses regular expressions for match statements
#
EXP_TYPE_UNKNOWN = ""
EXP_TYPE_ATOMIC = "atomic"
EXP_TYPE_COAL = "coal"
EXP_TYPE_ACCUMULATED = "accumulated"
EXP_TYPE_DIAMOND = "diamond"
class Expression:
def __init__(self):
self.name = ""
self.exp = ""
self.myType = EXP_TYPE_UNKNOWN
self.myGlobalExpressions = []
def setDeref(self, expParent, childName):
self.name = childName
self.exp = expParent.exp + "." + childName
self.myGlobalExpressions = expParent.getGlobalExpressions()
def setBinary(self, left, op, right):
self.exp = "%s %s %s" % (left.exp, op, right.exp)
self.myGlobalExpressions = left.getGlobalExpressions() + right.getGlobalExpressions()
def setUnary(self, op, exp):
self.name = ""
self.exp = op + " " + exp.exp
self.myGlobalExpressions = exp.getGlobalExpressions()
def setListAccess(self, exp, indexExp):
self.name = indexExp.getName()
self.exp = "%s[%s]" % (exp.exp, indexExp.exp)
self.myGlobalExpressions = exp.getGlobalExpressions() + indexExp.getGlobalExpressions()
def setListRange(self, exp, fromExp, toExp):
self.name = ""
self.myGlobalExpressions = exp.getGlobalExpressions()
if fromExp and toExp:
self.exp = "%s[%s:%s]" % (exp.exp, fromExp.getValue(), toExp.getValue())
self.myGlobalExpressions += fromExp.getGlobalExpressions() + toExp.getGlobalExpressions()
elif fromExp:
self.exp = "%s[%s:]" % (exp.exp, fromExp.getValue())
self.myGlobalExpressions += fromExp.getGlobalExpressions()
elif toExp:
self.exp = "%s[:%s]" % (exp.exp, toExp.getValue())
self.myGlobalExpressions += toExp.getGlobalExpressions()
else:
self.exp = "%s[:]" % exp.exp
def setFunctionCall(self, exp, parameters, namedParameters):
self.name = ""
if not parameters and not namedParameters:
self.exp = "%s()" % exp.getValue()
elif not namedParameters:
self.exp = "%s(%s)" % (exp.getValue(), ", ".join(e.getValue() for e in parameters))
elif not parameters:
self.exp = "%s(%s)" % (exp.getValue(), ", ".join(e.getValue() for e in namedParameters))
else:
self.exp = "%s(%s, %s)" % (exp.getValue(),
", ".join(e.getValue() for e in parameters),
", ".join(e.getValue() for e in namedParameters))
self.myGlobalExpressions = exp.getGlobalExpressions()
if parameters:
for exp in parameters:
self.myGlobalExpressions.extend(exp.getGlobalExpressions())
if namedParameters:
for exp in namedParameters:
self.myGlobalExpressions.extend(exp.getGlobalExpressions())
def setList(self, listOfExpressions):
self.name = ""
for exp in listOfExpressions:
self.myGlobalExpressions.extend(exp.getGlobalExpressions())
listStr = ", ".join(exp.getValue() for exp in listOfExpressions)
self.exp = '[' + listStr + ']'
def setAssignment(self, name, exp):
self.exp = "%s = %s" % (name , exp.getValue())
self.name = name
self.myGlobalExpressions = exp.getGlobalExpressions()
def setId(self, id):
self.name = id
self.exp = id
def setValue(self, value):
self.name = ""
self.exp = value
def getName(self):
return self.name
def setName(self, name):
self.name = name
def getValue(self):
return self.exp
def __str__(self):
return self.exp
def setBracketExpression(self, exp):
self.exp = "( " + exp.exp + " )"
self.name = exp.name
self.myGlobalExpressions = exp.getGlobalExpressions()
def getGlobalExpressions(self):
return self.myGlobalExpressions
def getGlobalSection(self):
return ""
def setConditional(self, ifExp, thenExp, elseExp):
self.name = ""
self.exp = "(%s if %s else %s)" % (thenExp.getValue(), ifExp.getValue(), elseExp.getValue())
self.myGlobalExpressions = ifExp.getGlobalExpressions() + thenExp.getGlobalExpressions() + elseExp.getGlobalExpressions()
def setTupleWithComa(self, expressions):
self.name = ""
self.exp = "(" + "".join("%s, "%e.getValue() for e in expressions ) + ")"
for e in expressions:
self.myGlobalExpressions += e.getGlobalExpressions()
def setTupleWithoutComa(self, expressions):
self.name = ""
self.exp = "(" + ", ".join(e.getValue() for e in expressions ) + ")"
for e in expressions:
self.myGlobalExpressions += e.getGlobalExpressions()
def setListComprehension(self, itemExpression, iteratorId, listExpression):
self.name = ""
self.exp = "%s for %s in %s" % (itemExpression.getValue(), iteratorId, listExpression.getValue())
self.myGlobalExpressions = itemExpression.getGlobalExpressions() + listExpression.getGlobalExpressions()
def setDictionaryItems(self, itemsList):
s = ", ".join("%s: %s" % (key, value) for (key, value) in itemsList)
self.name = ""
self.exp = "{ " + s + " }"
self.myGlobalExpressions = []
for (key, value) in itemsList:
self.myGlobalExpressions.extend(key.getGlobalExpressions())
self.myGlobalExpressions.extend(value.getGlobalExpressions())
def setLambda(self, paramList, expression):
self.exp = "lambda " + ", ".join(paramList) + ": "
self.exp += expression.getValue()
self.name = ""
class MatchExpression(Expression):
_expressionId = 1
def __init__(self, exp, regExp, negate = False):
Expression.__init__(self)
self.name = ""
self.myRegExp = regExp
self.myId = MatchExpression._expressionId
MatchExpression._expressionId += 1
self.myGlobalExpressions = exp.getGlobalExpressions() + regExp.getGlobalExpressions() + [self]
self.exp = "_regExp%d.search(%s)" % (self.myId, exp.exp)
if negate:
self.exp = "(not %s)" % self.exp
def getGlobalSection(self):
return " _regExp%d = re.compile(%s)\n" % (self.myId, self.myRegExp.exp)
class CounterExpression(Expression):
def __init__(self, name):
Expression.__init__(self)
self.name = name
self.exp = "%s.val" % self.name
self.myGlobalExpressions = [self]
def getGlobalSection(self):
return " %s = _runtime.Counter()\n" % self.name
def preIncr(self):
self.exp = "%s.preIncr()" % self.name
def postIncr(self):
self.exp = "%s.postIncr()" % self.name
def preDecr(self):
self.exp = "%s.preDecr()" % self.name
def postDecr(self):
self.exp = "%s.postDecr()" % self.name
def add(self, exp):
self.exp = "%s.add(%s)" % (self.name, exp.getValue())
self.myGlobalExpressions += exp.getGlobalExpressions()
def sub(self, exp):
self.exp = "%s.sub(%s)" % (self.name, exp.getValue())
self.myGlobalExpressions += exp.getGlobalExpressions()
def method(self, methodName, expressionList):
self.exp = "%s.%s(%s)" % (self.name, methodName, ", ".join([e.getValue() for e in expressionList]))
for exp in expressionList:
self.myGlobalExpressions += exp.getGlobalExpressions()
class DictCounterExpression(CounterExpression):
def __init__(self, name, indexExp):
self.realName = name
CounterExpression.__init__(self, "_%s_dict[%s]" % (name, indexExp.getValue()))
self.indexExp = indexExp
self.myGlobalExpressions = [self] + indexExp.getGlobalExpressions()
def getName(self):
return self.realName
def getGlobalSection(self):
return " _%s_dict = collections.defaultdict(_runtime.Counter)\n" % self.realName
| 41.497462 | 129 | 0.617615 |
be5b049a67b198ea3e5771bdb4e943a08e28db11 | 11,299 | py | Python | utilities/Gnip-Analysis-Pipeline/measurements.py | compston/TAP-Workshop | 84a7987081f71d45e057b30cc8e0cbc1b6af90d7 | [
"MIT"
] | null | null | null | utilities/Gnip-Analysis-Pipeline/measurements.py | compston/TAP-Workshop | 84a7987081f71d45e057b30cc8e0cbc1b6af90d7 | [
"MIT"
] | null | null | null | utilities/Gnip-Analysis-Pipeline/measurements.py | compston/TAP-Workshop | 84a7987081f71d45e057b30cc8e0cbc1b6af90d7 | [
"MIT"
] | null | null | null | import collections
import operator
import sys
# master list of class definitions
m = []
"""
This file is just a bunch of class definitions,
each of which defines a particular measurement..
All classes appended to "m" will be run on all tweets;
by calling the 'add_tweet' method. This method is defined
in MeasurementBase and applies any filters
as described below. If a tweet passes all filters, it is
passed to the 'update' method.
All classes inheriting from MeasurementBase must
define or inherit the methods:
- update:
udpates internal data store; no return value
- get:
returns a representation of internal data store
Measurements can be selectively applied to tweets by
defining the class member 'filters', which is a list of 3-tuples:
([list of JSON key names to access the Tweet element]
, comparison_function
, comparison_value).
Tweets will only be parsed if comparison_function(Tweet_element,comparison_value)
is true.
"""
#
#Helper functions:
#
def term_comparator(term1, term2):
t1 = term1.lower().strip(' ').rstrip(' ')
t2 = term2.lower().strip(' ').rstrip(' ')
return t1 == t2
try:
from simple_n_grams.stop_words import StopWords
stop_words = StopWords()
except ImportError:
stop_words = []
def token_ok(token):
if len(token) < 3:
return False
if stop_words[token]:
return False
return True
#
# parent classes:
#
# naming convention: classes that inherit from MeasurementBase
# also have names that end in "Base".
# other parent classes do not
#
class MeasurementBase(object):
"""
Base class for all measurement objects.
It implements 'get_name' and 'add_tweet'.
Note that 'add_tweet' calls 'update',
which must be defined in a derived class."""
def get_name(self):
return self.__class__.__name__
def add_tweet(self,tweet):
""" this method is called by the aggregator script, for each enriched tweet """
def get_element(data, key_path):
""" recursive helper function to get tweet elements """
key = key_path[0]
if len(key_path) == 1:
return data[key]
else:
new_key_path = key_path[1:]
obj = data[key]
if isinstance(obj,list):
results = []
for o in obj:
results.append( get_element(o,new_key_path) )
return results
else:
return get_element(obj,new_key_path)
# return before calling 'update' if tweet fails any filter
if hasattr(self,"filters"):
for key_path,comparator,value in self.filters:
data = get_element(tweet,key_path)
if not comparator(data,value):
return
self.update(tweet)
class SimpleCounterBase(MeasurementBase):
""" base class for any single integer counter """
def __init__(self):
self.counter = 0
def update(self,tweet):
self.counter += 1
def get(self):
return self.counter
class SimpleCountersBase(MeasurementBase):
""" base class for multiple integer counters """
def __init__(self):
self.counters = collections.defaultdict(int)
def get(self):
return self.counters
# these classes provide 'get_tokens' methods for
# various tweet components
class TokenizedBody(object):
""" provides a 'get_tokens' method for tokens in tweet body
assumes Stanford NLP enrichment was run on Tweet body"""
def get_tokens(self,tweet):
tokens = []
try:
for sentence in tweet['enrichments']['BodyNLPEnrichment']['sentences']:
for token in sentence["tokens"]:
if token_ok(token):
tokens.append(token)
except TypeError,KeyError:
pass
return tokens
class TokenizedBio(object):
""" provides a 'get_tokens' method for tokens in user bio
assumes Stanford NLP enrichment was run on Tweet user bio"""
def get_tokens(self,tweet):
tokens = []
try:
for sentence in tweet['enrichments']['BioNLPEnrichment']['sentences']:
for token in sentence["tokens"]:
if token_ok(token):
tokens.append(token)
except TypeError,KeyError:
pass
return tokens
# these classes provide specialized 'get' methods
# for classes with 'counters' members
class TopCounts(object):
""" provides a 'get' method that deals with top-n type measurements
must define a 'self.counters' variable """
def get(self):
k = 20
sorted_top = [ tup for tup in reversed(sorted(self.counters.items(),key=operator.itemgetter(1))) ]
return sorted_top[:k]
class CutoffTopCounts(TopCounts):
""" drops items with < 10 counts """
def get(self):
for token,count in self.counters.items():
if count < 20:
del self.counters[token]
return super(CutoffTopCounts,self).get()
# term counter bases
class BodyTermCountersBase(SimpleCountersBase,TokenizedBody):
""" provides an update method that counts instances of tokens in body """
def update(self,tweet):
for token in self.get_tokens(tweet):
self.counters[token] += 1
class BioTermCountersBase(SimpleCountersBase,TokenizedBio):
""" provides an update method that counts instances of tokens in bio"""
def update(self,tweet):
for token in self.get_tokens(tweet):
self.counters[token] += 1
class SpecifiedBodyTermCountersBase(SimpleCountersBase,TokenizedBody):
""" base class for integer counts of specified body terms
derived classes must define 'term_list' """
def update(self,tweet):
for token in self.get_tokens(tweet):
for term in self.term_list:
if term_comparator(token,term):
self.counters[term] += 1
class SpecifiedBioTermCountersBase(SimpleCountersBase,TokenizedBio):
""" base class for integer counts of specified body terms
derived classes must define 'term_list' """
def update(self,tweet):
for token in self.get_tokens(tweet):
for term in self.term_list:
if term_comparator(token,term):
self.counters[term] += 1
# top body term parent classes
class ExactTopBodyTerms(TopCounts,BodyTermCountersBase):
pass
class CutoffTopBodyTerms(CutoffTopCounts,ExactTopBodyTerms):
pass
class ExactTopBioTerms(TopCounts,BioTermCountersBase):
pass
class CutoffTopBioTerms(CutoffTopCounts,ExactTopBioTerms):
pass
#
# simple global counters
#
class TweetCounter(SimpleCounterBase):
pass
m.append(TweetCounter)
retweet_filter = (["verb"],operator.eq,"share")
class ReTweetCounter(SimpleCounterBase):
def __init__(self):
self.filters = [retweet_filter]
super(ReTweetCounter,self).__init__()
m.append(ReTweetCounter)
#
# user mentions
#
class MentionCounter(TopCounts,SimpleCountersBase):
def update(self,tweet):
for mention in tweet["twitter_entities"]["user_mentions"]:
self.counters[mention["name"]] += 1
class CutoffTopMentions(CutoffTopCounts,MentionCounter):
pass
class CutoffTopBioTermsUniqUser(CutoffTopCounts,SimpleCountersBase,TokenizedBio):
def __init__(self):
self.users = []
super(CutoffTopBioTermsUniqUser,self).__init__()
def update(self,tweet):
if tweet['actor']['id'] not in self.users:
for token in self.get_tokens(tweet):
self.counters[token] += 1
self.users.append(tweet['actor']['id'])
#
# NLP
#
class POSCounter(SimpleCounterBase):
def get_pos(self,rep, pos="NN"):
ans = []
for s in rep["sentences"]:
for i in range(len(s["tokens"])):
if s["pos"][i] == pos:
ans.append(s["lemmas"][i])
return ans
class BodyNNCounter(POSCounter):
def update(self,tweet):
rep = tweet["enrichments"]["BodyNLPEnrichment"]
ans = self.get_pos(rep,pos="NN")
self.counter += len(ans)
class BodyNNPCounter(POSCounter):
def update(self,tweet):
rep = tweet["enrichments"]["BodyNLPEnrichment"]
ans = self.get_pos(rep,pos="NNP")
self.counter += len(ans)
class BodyDTCounter(POSCounter):
def update(self,tweet):
rep = tweet["enrichments"]["BodyNLPEnrichment"]
ans = self.get_pos(rep,pos="DT")
self.counter += len(ans)
## Non-targeted term counters
class AllBodyTermsCounter(SimpleCountersBase,TokenizedBody):
def update(self,tweet):
for token in self.get_tokens(tweet):
self.counters[token] +=1
class AllBioTermsUniqUserCounter(SimpleCountersBase,TokenizedBio):
def __init__(self):
self.users = []
super(AllBioTermsUniqUserCounter,self).__init__()
def update(self,tweet):
if tweet['actor']['id'] not in self.users:
for token in self.get_tokens(tweet):
self.counters[token] += 1
self.users.append(tweet['actor']['id'])
##
# generate measurements per rule, per topic model, etc.
##
# tuples of (rule tag, rule name)
# the rule name is arbitrary and will be included in the measurement class name
rules = [
## (RULE_TAG, RULE_NAME)
]
# specified terms to count occurences of
term_list = []
##
# loop over PowerTrack rules
##
for rule_tag,rule_name in rules:
rule_filter = (["gnip","matching_rules","tag"], operator.contains, rule_tag)
# tweet count
cls_name = rule_name + "RuleCounter"
cls_def = type(cls_name, (SimpleCounterBase,), {"filters": [rule_filter] })
globals()[cls_name] = cls_def
m.append(cls_def)
## retweet counts
cls_name = rule_name + "RuleRetweetCounter"
cls_def = type(cls_name,
(SimpleCounterBase,),
{"filters": [rule_filter, retweet_filter] }
)
globals()[cls_name] = cls_def
m.append(cls_def)
# top mentions
cls_name = rule_name + "RuleCutoffTopMentions"
cls_def = type(cls_name, (CutoffTopMentions,), {"filters": [rule_filter] })
globals()[cls_name] = cls_def
m.append(cls_def)
# top body terms
cls_name = rule_name + "RuleCutoffTopBodyTerms"
cls_def = type(cls_name, (CutoffTopBodyTerms,), {"filters": [rule_filter] })
globals()[cls_name] = cls_def
m.append(cls_def)
# top bio terms per uniq user
cls_name = rule_name + "RuleCutoffTopBioTermsUniqUser"
cls_def = type(cls_name, (CutoffTopBioTermsUniqUser,), {"filters": [rule_filter] })
globals()[cls_name] = cls_def
m.append(cls_def)
# parts of speech in body
cls_name = rule_name + "RuleBodyNNCounter"
cls_def = type(cls_name, (BodyNNCounter,), {"filters":[rule_filter] })
globals()[cls_name] = cls_def
m.append(cls_def)
cls_name = rule_name + "RuleBodyNNPCounter"
cls_def = type(cls_name, (BodyNNPCounter,), {"filters":[rule_filter] })
globals()[cls_name] = cls_def
m.append(cls_def)
cls_name = rule_name + "RuleBodyDTCounter"
cls_def = type(cls_name, (BodyDTCounter,), {"filters":[rule_filter] })
globals()[cls_name] = cls_def
m.append(cls_def)
| 32.56196 | 106 | 0.650235 |
cac8ede23743334d5a75eca494e622e4663331dd | 5,793 | py | Python | OLD/NeuralNetworks.py | WenyueDai/Notes | f4108d092741cfb7446f2e88b4fc32d3bf4be648 | [
"MIT"
] | 23 | 2019-11-20T00:50:02.000Z | 2022-02-04T07:13:43.000Z | OLD/NeuralNetworks.py | WenyueDai/Notes | f4108d092741cfb7446f2e88b4fc32d3bf4be648 | [
"MIT"
] | 2 | 2020-04-25T18:52:34.000Z | 2020-07-07T18:02:35.000Z | OLD/NeuralNetworks.py | WenyueDai/Notes | f4108d092741cfb7446f2e88b4fc32d3bf4be648 | [
"MIT"
] | 44 | 2019-04-02T17:58:20.000Z | 2022-02-04T10:11:22.000Z | import numpy , pandas
from sklearn import model_selection
from sklearn import neural_network
#--------------------------------------------------
''' Representation '''
data = pandas.read_csv('sonar.csv')
X = data[data.columns[0:60]]
Y = data[data.columns[60]]
X , Y = sklearn.utils.shuffle(X , Y , random_state = 1)
X_train , X_test , Y_train , Y_test = model_selection.train_test_split(X , Y , random_state = 0)
prediction = [[0.0260,0.0363,0.0136,0.0272,0.0214,0.0338,0.0655,0.1400,0.1843,0.2354,0.2720,0.2442,0.1665,0.0336,0.1302,0.1708,0.2177,0.3175,0.3714,0.4552,0.5700,0.7397,0.8062,0.8837,0.9432,1.0000,0.9375,0.7603,0.7123,0.8358,0.7622,0.4567,0.1715,0.1549,0.1641,0.1869,0.2655,0.1713,0.0959,0.0768,0.0847,0.2076,0.2505,0.1862,0.1439,0.1470,0.0991,0.0041,0.0154,0.0116,0.0181,0.0146,0.0129,0.0047,0.0039,0.0061,0.0040,0.0036,0.0061,0.0115]]
#--------------------------------------------------
''' Neural Network '''
ML = neural_network.MLPClassifier(hidden_layer_sizes = (100 , 100 , 100) , alpha = 3 , random_state = 0).fit(X_train , Y_train)
#ML = neural_network.MLPRegressor(hidden_layer_sizes = (10 , 100 , 1) , random_state = 0).fit(X_train , Y_train)
''' default values
hidden_layer_sizes = (100 , ) #Number of hidden layers and number of units in each layer
activation = 'relu' #Activation function: 'identity' , 'logistic' , 'tanh' , 'relu'
solver = 'adam' #The solver for weight optimization: 'lbfgs' , 'sgd' , 'adam'
alpha = 0.0001 #L2 Regularisation (lower = less regularisation)
batch_size = 'auto' #Size of minibatches for stochastic optimizers. If the solver is 'lbfgs', the classifier will not use minibatch. When set to 'auto', batch_size = min(200 , n_samples)
learning_rate = 'constant' #Learning rate schedule for weight updates 'constant' , 'invscaling' , 'adaptive'
learning_rate_init = 0.001 #The initial learning rate used. It controls the step-size in updating the weights. Only used when solver = 'sgd' or 'adam'
power_t = 0.5 #The exponent for inverse scaling learning rate. It is used in updating effective learning rate when the learning_rate is set to 'invscaling'. Only used when solver = 'sgd'
max_iter = 200 #Maximum number of iterations. The solver iterates until convergence (determined by ‘tol’) or this number of iterations. For stochastic solvers (‘sgd’, ‘adam’), note that this determines the number of epochs (how many times each data point will be used), not the number of gradient steps
shuffle = True #Whether to shuffle samples in each iteration. Only used when solver = 'sgd' or 'adam'
random_state = None #If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random
tol = 0.0001 #Tolerance for the optimization. When the loss or score is not improving by at least tol for two consecutive iterations, unless learning_rate is set to ‘adaptive’, convergence is considered to be reached and training stops
verbose = False #Whether to print progress messages to stdout
warm_start = False #When set to True, reuse the solution of the previous call to fit as initialization, otherwise, just erase the previous solution
momentum = 0.9 #Momentum for gradient descent update. Should be between 0 and 1. Only used when solver=’sgd’
nesterovs_momentum = True #Whether to use Nesterov’s momentum. Only used when solver=’sgd’ and momentum > 0
early_stopping = False #Whether to use early stopping to terminate training when validation score is not improving. If set to true, it will automatically set aside 10% of training data as validation and terminate training when validation score is not improving by at least tol for two consecutive epochs. Only effective when solver=’sgd’ or ‘adam’
validation_fraction = 0.1 #The proportion of training data to set aside as validation set for early stopping. Must be between 0 and 1. Only used if early_stopping is True
beta_1 = 0.9 #Exponential decay rate for estimates of first moment vector in adam, should be in [0, 1). Only used when solver=’adam’
beta_2 = 0.999 #Exponential decay rate for estimates of second moment vector in adam, should be in [0, 1). Only used when solver=’adam’
epsilon = 0.00000001 #Value for numerical stability in adam. Only used when solver=’adam’
---------------------------------------------------------------------------------------------------------------------------------
.classes_ #Class labels for each output
.loss_ #The current loss computed with the loss function
.coefs_ #The ith element in the list represents the weight matrix corresponding to layer i
.intercepts_ #The ith element in the list represents the bias vector corresponding to layer i + 1
.n_iter_ #The number of iterations the solver has ran
.n_layers_ #Number of layers
.n_outputs_ #Number of outputs
.out_activation_ #Name of the output activation function
---------------------------------------------------------------------------------------------------------------------------------
.fit(X , Y) #Fit the model to data matrix X and target(s) y
.get_params([deep]) #Get parameters for this estimator
.predict(X) #Predict using the multi-layer perceptron classifier
.predict_log_proba(X) #Return the log of probability estimates
.predict_proba(X) #Probability estimates
.score(X , Y) #Returns the mean accuracy on the given test data and labels
.set_params(**params) #Set the parameters of this estimator
'''
#--------------------------------------------------
''' Evaluate '''
print(ML.score(X_train , Y_train))
print(ML.score(X_test , Y_test))
#--------------------------------------------------
''' Prediction '''
print(ML.predict(prediction))
| 91.952381 | 436 | 0.690489 |
1c091033e26fb5482fe2071d70fb7898f7a7bba5 | 19,186 | py | Python | bin/scp.py | TioWang/stash | a214cbc4c8a2be2cfe44977947f02bc37068329b | [
"MIT"
] | null | null | null | bin/scp.py | TioWang/stash | a214cbc4c8a2be2cfe44977947f02bc37068329b | [
"MIT"
] | null | null | null | bin/scp.py | TioWang/stash | a214cbc4c8a2be2cfe44977947f02bc37068329b | [
"MIT"
] | 1 | 2019-01-07T09:32:22.000Z | 2019-01-07T09:32:22.000Z | """
Secure Copy - Copy files between local and remote
usage:
GET
scp [user@host:dir/file] [files/dir]
PUT
scp [file/dir] [file/dir] [user@host:dir]
"""
import argparse
# scp.py
# Copyright (C) 2008 James Bardin <j.bardin@gmail.com>
"""
Created by jbardin
https://github.com/jbardin/scp.py
Utilities for sending files over ssh using the scp1 protocol.
"""
__version__ = '0.8.0'
import locale
import os
import sys
import re
from socket import timeout as SocketTimeout
from distutils.version import StrictVersion
def install_module_from_github(username, package_name, version):
"""
Install python module from github zip files
"""
cmd_string = """
echo Installing {1} {2} ...
wget https://github.com/{0}/{1}/archive/{2}.zip -o $TMPDIR/{1}.zip
mkdir $TMPDIR/{1}_src
unzip $TMPDIR/{1}.zip -d $TMPDIR/{1}_src
rm -f $TMPDIR/{1}.zip
mv $TMPDIR/{1}_src/{1} $STASH_ROOT/lib/
rm -rf $TMPDIR/{1}_src
echo Done
""".format(username,
package_name,
version
)
globals()['_stash'](cmd_string)
import paramiko
if StrictVersion(paramiko.__version__) < StrictVersion('1.15'):
# Install paramiko 1.16.0 to fix a bug with version < 1.15
install_module_from_github('paramiko', 'paramiko', 'v1.16.0')
print 'Please restart Pythonista for changes to take full effect'
sys.exit(0)
DEBUG = False
APP_DIR = os.environ['STASH_ROOT']
# this is quote from the shlex module, added in py3.3
_find_unsafe = re.compile(br'[^\w@%+=:,./~-]').search
def _sh_quote(s):
"""Return a shell-escaped version of the string `s`."""
if not s:
return b""
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return b"'" + s.replace(b"'", b"'\"'\"'") + b"'"
# Unicode conversion functions; assume UTF-8
def asbytes(s):
"""Turns unicode into bytes, if needed.
Assumes UTF-8.
"""
if isinstance(s, bytes):
return s
else:
return s.encode('utf-8')
def asunicode(s):
"""Turns bytes into unicode, if needed.
Uses UTF-8.
"""
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
else:
return s
# os.path.sep is unicode on Python 3, no matter the platform
bytes_sep = asbytes(os.path.sep)
# Unicode conversion function for Windows
# Used to convert local paths if the local machine is Windows
def asunicode_win(s):
"""Turns bytes into unicode, if needed.
"""
if isinstance(s, bytes):
return s.decode(locale.getpreferredencoding())
else:
return s
class SCPClient(object):
"""
An scp1 implementation, compatible with openssh scp.
Raises SCPException for all transport related errors. Local filesystem
and OS errors pass through.
Main public methods are .put and .get
The get method is controlled by the remote scp instance, and behaves
accordingly. This means that symlinks are resolved, and the transfer is
halted after too many levels of symlinks are detected.
The put method uses os.walk for recursion, and sends files accordingly.
Since scp doesn't support symlinks, we send file symlinks as the file
(matching scp behaviour), but we make no attempt at symlinked directories.
"""
def __init__(self, transport, buff_size=16384, socket_timeout=5.0,
progress=None, sanitize=_sh_quote):
"""
Create an scp1 client.
@param transport: an existing paramiko L{Transport}
@type transport: L{Transport}
@param buff_size: size of the scp send buffer.
@type buff_size: int
@param socket_timeout: channel socket timeout in seconds
@type socket_timeout: float
@param progress: callback - called with (filename, size, sent) during
transfers
@param sanitize: function - called with filename, should return
safe or escaped string. Uses _sh_quote by default.
@type progress: function(string, int, int)
"""
self.transport = transport
self.buff_size = buff_size
self.socket_timeout = socket_timeout
self.channel = None
self.preserve_times = False
self._progress = progress
self._recv_dir = b''
self._rename = False
self._utime = None
self.sanitize = sanitize
self._dirtimes = {}
def put(self, files, remote_path=b'.',
recursive=False, preserve_times=False):
"""
Transfer files to remote host.
@param files: A single path, or a list of paths to be transfered.
recursive must be True to transfer directories.
@type files: string OR list of strings
@param remote_path: path in which to receive the files on the remote
host. defaults to '.'
@type remote_path: str
@param recursive: transfer files and directories recursively
@type recursive: bool
@param preserve_times: preserve mtime and atime of transfered files
and directories.
@type preserve_times: bool
"""
self.preserve_times = preserve_times
self.channel = self.transport.open_session()
self.channel.settimeout(self.socket_timeout)
scp_command = (b'scp -t ', b'scp -r -t ')[recursive]
self.channel.exec_command(scp_command +
self.sanitize(asbytes(remote_path)))
self._recv_confirm()
if not isinstance(files, (list, tuple)):
files = [files]
if recursive:
self._send_recursive(files)
else:
self._send_files(files)
if self.channel:
self.channel.close()
def get(self, remote_path, local_path='',
recursive=False, preserve_times=False):
"""
Transfer files from remote host to localhost
@param remote_path: path to retreive from remote host. since this is
evaluated by scp on the remote host, shell wildcards and
environment variables may be used.
@type remote_path: str
@param local_path: path in which to receive files locally
@type local_path: str
@param recursive: transfer files and directories recursively
@type recursive: bool
@param preserve_times: preserve mtime and atime of transfered files
and directories.
@type preserve_times: bool
"""
if not isinstance(remote_path, (list, tuple)):
remote_path = [remote_path]
remote_path = [self.sanitize(asbytes(r)) for r in remote_path]
self._recv_dir = local_path or os.getcwd()
self._rename = (len(remote_path) == 1 and
not os.path.isdir(os.path.abspath(local_path)))
if len(remote_path) > 1:
if not os.path.exists(self._recv_dir):
raise SCPException("Local path '%s' does not exist" %
asunicode(self._recv_dir))
elif not os.path.isdir(self._recv_dir):
raise SCPException("Local path '%s' is not a directory" %
asunicode(self._recv_dir))
rcsv = (b'', b' -r')[recursive]
prsv = (b'', b' -p')[preserve_times]
self.channel = self.transport.open_session()
self.channel.settimeout(self.socket_timeout)
self.channel.exec_command(b"scp" +
rcsv +
prsv +
b" -f " +
b' '.join(remote_path))
self._recv_all()
if self.channel:
self.channel.close()
def _read_stats(self, name):
"""return just the file stats needed for scp"""
stats = os.stat(name)
mode = oct(stats.st_mode)[-4:]
size = stats.st_size
atime = int(stats.st_atime)
mtime = int(stats.st_mtime)
return (mode, size, mtime, atime)
def _send_files(self, files):
for name in files:
basename = asbytes(os.path.basename(name))
(mode, size, mtime, atime) = self._read_stats(name)
if self.preserve_times:
self._send_time(mtime, atime)
file_hdl = open(name, 'rb')
# The protocol can't handle \n in the filename.
# Quote them as the control sequence \^J for now,
# which is how openssh handles it.
self.channel.sendall(("C%s %d " % (mode, size)).encode('ascii') +
basename.replace(b'\n', b'\\^J') + b"\n")
self._recv_confirm()
file_pos = 0
if self._progress:
if size == 0:
# avoid divide-by-zero
self._progress(basename, 1, 1)
else:
self._progress(basename, size, 0)
buff_size = self.buff_size
chan = self.channel
while file_pos < size:
chan.sendall(file_hdl.read(buff_size))
file_pos = file_hdl.tell()
if self._progress:
self._progress(basename, size, file_pos)
chan.sendall('\x00')
file_hdl.close()
self._recv_confirm()
def _chdir(self, from_dir, to_dir):
# Pop until we're one level up from our next push.
# Push *once* into to_dir.
# This is dependent on the depth-first traversal from os.walk
# add path.sep to each when checking the prefix, so we can use
# path.dirname after
common = os.path.commonprefix([from_dir + bytes_sep,
to_dir + bytes_sep])
# now take the dirname, since commonprefix is character based,
# and we either have a seperator, or a partial name
common = os.path.dirname(common)
cur_dir = from_dir.rstrip(bytes_sep)
while cur_dir != common:
cur_dir = os.path.split(cur_dir)[0]
self._send_popd()
# now we're in our common base directory, so on
self._send_pushd(to_dir)
def _send_recursive(self, files):
for base in files:
if not os.path.isdir(base):
# filename mixed into the bunch
self._send_files([base])
continue
last_dir = asbytes(base)
for root, dirs, fls in os.walk(base):
self._chdir(last_dir, asbytes(root))
self._send_files([os.path.join(root, f) for f in fls])
last_dir = asbytes(root)
# back out of the directory
for i in range(len(os.path.split(last_dir))):
self._send_popd()
def _send_pushd(self, directory):
(mode, size, mtime, atime) = self._read_stats(directory)
basename = asbytes(os.path.basename(directory))
if self.preserve_times:
self._send_time(mtime, atime)
self.channel.sendall(('D%s 0 ' % mode).encode('ascii') +
basename.replace(b'\n', b'\\^J') + b'\n')
self._recv_confirm()
def _send_popd(self):
self.channel.sendall('E\n')
self._recv_confirm()
def _send_time(self, mtime, atime):
self.channel.sendall(('T%d 0 %d 0\n' % (mtime, atime)).encode('ascii'))
self._recv_confirm()
def _recv_confirm(self):
# read scp response
msg = b''
try:
msg = self.channel.recv(512)
except SocketTimeout:
raise SCPException('Timout waiting for scp response')
# slice off the first byte, so this compare will work in python2 and python3
if msg and msg[0:1] == b'\x00':
return
elif msg and msg[0:1] == b'\x01':
raise SCPException(asunicode(msg[1:]))
elif self.channel.recv_stderr_ready():
msg = self.channel.recv_stderr(512)
raise SCPException(asunicode(msg))
elif not msg:
raise SCPException('No response from server')
else:
raise SCPException('Invalid response from server', msg)
def _recv_all(self):
# loop over scp commands, and receive as necessary
command = {b'C': self._recv_file,
b'T': self._set_time,
b'D': self._recv_pushd,
b'E': self._recv_popd}
while not self.channel.closed:
# wait for command as long as we're open
self.channel.sendall('\x00')
msg = self.channel.recv(1024)
if not msg: # chan closed while recving
break
assert msg[-1:] == b'\n'
msg = msg[:-1]
code = msg[0:1]
try:
command[code](msg[1:])
except KeyError:
raise SCPException(str(msg).strip())
# directory times can't be set until we're done writing files
self._set_dirtimes()
def _set_time(self, cmd):
try:
times = cmd.split(b' ')
mtime = int(times[0])
atime = int(times[2]) or mtime
except:
self.channel.send(b'\x01')
raise SCPException('Bad time format')
# save for later
self._utime = (atime, mtime)
def _recv_file(self, cmd):
chan = self.channel
parts = cmd.strip().split(b' ', 2)
try:
mode = int(parts[0], 8)
size = int(parts[1])
if self._rename:
path = self._recv_dir
self._rename = False
elif os.name == 'nt':
path = os.path.join(asunicode_win(self._recv_dir),
parts[2].decode('utf-8'))
else:
path = os.path.join(asbytes(self._recv_dir),
parts[2])
except:
chan.send('\x01')
chan.close()
raise SCPException('Bad file format')
try:
file_hdl = open(path, 'wb')
except IOError as e:
chan.send(b'\x01' + str(e).encode('utf-8'))
chan.close()
raise
if self._progress:
if size == 0:
# avoid divide-by-zero
self._progress(path, 1, 1)
else:
self._progress(path, size, 0)
buff_size = self.buff_size
pos = 0
chan.send(b'\x00')
try:
while pos < size:
# we have to make sure we don't read the final byte
if size - pos <= buff_size:
buff_size = size - pos
file_hdl.write(chan.recv(buff_size))
pos = file_hdl.tell()
if self._progress:
self._progress(path, size, pos)
msg = chan.recv(512)
if msg and msg[0:1] != b'\x00':
raise SCPException(msg[1:])
except SocketTimeout:
chan.close()
raise SCPException('Error receiving, socket.timeout')
file_hdl.truncate()
try:
os.utime(path, self._utime)
self._utime = None
os.chmod(path, mode)
# should we notify the other end?
finally:
file_hdl.close()
# '\x00' confirmation sent in _recv_all
def _recv_pushd(self, cmd):
parts = cmd.split(b' ', 2)
try:
mode = int(parts[0], 8)
if self._rename:
path = self._recv_dir
self._rename = False
elif os.name == 'nt':
path = os.path.join(asunicode_win(self._recv_dir),
parts[2].decode('utf-8'))
else:
path = os.path.join(asbytes(self._recv_dir),
parts[2])
except:
self.channel.send(b'\x01')
raise SCPException('Bad directory format')
try:
if not os.path.exists(path):
os.mkdir(path, mode)
elif os.path.isdir(path):
os.chmod(path, mode)
else:
raise SCPException('%s: Not a directory' % path)
self._dirtimes[path] = (self._utime)
self._utime = None
self._recv_dir = path
except (OSError, SCPException) as e:
self.channel.send(b'\x01' + asbytes(str(e)))
raise
def _recv_popd(self, *cmd):
self._recv_dir = os.path.split(self._recv_dir)[0]
def _set_dirtimes(self):
try:
for d in self._dirtimes:
os.utime(d, self._dirtimes[d])
finally:
self._dirtimes = {}
class SCPException(Exception):
"""SCP exception class"""
pass
############################################
def find_ssh_keys():
#dir = os.path.expanduser('~/Documents/.ssh/')
files = []
try:
for file in os.listdir(APP_DIR+'/.ssh'):
if '.' not in file:
files.append(APP_DIR+'/.ssh/'+file)
except OSError:
pass
return files
def parse_host(arg):
user,temp = arg.split('@')
host, path = temp.split(':')
return host,user,path
def scp_callback(filename, size, sent):
if size == sent:
print filename
if __name__ == '__main__':
files = []
ap = argparse.ArgumentParser()
ap.add_argument('--password', help='login password')
ap.add_argument('-p', '--port', action='store', default=22, type=int,
help='port for ssh default: 22')
ap.add_argument('files', nargs='*', help='file or module name')
args = ap.parse_args()
#scp_mode 0 put 1 get
if '@' in args.files[0]:
scp_mode = 1
else:
scp_mode = 0
for file in args.files:
if '@' in file:
host,user,host_path = parse_host(file)
else:
files.append(file)
ssh = paramiko.SSHClient()
#ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_filename = find_ssh_keys()
if args.password is not None:
ssh.connect(host, username=user, password=args.password, port=args.port)
else:
if len(key_filename) == 0: # no key file found
password = raw_input('Enter passsword:')
ssh.connect(host, username=user, password=password, port=args.port)
else:
ssh.connect(host, username=user, key_filename=key_filename, port=args.port)
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh.get_transport(),progress=scp_callback)
#scp.put('stash',remote_path='stash/',recursive=True)
if scp_mode:
print 'Copying from server...'
scp.get(host_path, local_path=files[0], recursive=True)
else:
print 'Copying to server...'
scp.put(files, recursive=True, remote_path=host_path)
ssh.close()
| 33.541958 | 87 | 0.559835 |
90290676810869e480a0589424483ad09f4fbc0c | 5,575 | py | Python | src/tvl/__init__.py | ashwhall/tvl | 78fa8d2908d8eac8a032273d3142ab530cee1a33 | [
"Apache-2.0"
] | 21 | 2019-02-28T02:58:21.000Z | 2021-06-02T03:36:34.000Z | src/tvl/__init__.py | ashwhall/tvl | 78fa8d2908d8eac8a032273d3142ab530cee1a33 | [
"Apache-2.0"
] | 25 | 2019-02-22T11:39:34.000Z | 2021-06-02T00:12:26.000Z | src/tvl/__init__.py | ashwhall/tvl | 78fa8d2908d8eac8a032273d3142ab530cee1a33 | [
"Apache-2.0"
] | 6 | 2019-05-31T02:06:14.000Z | 2021-07-14T06:27:17.000Z | import importlib
import os
from contextlib import contextmanager
from threading import RLock, Condition
from typing import Dict, Sequence, Iterator, Union
import torch
import tvl.backend
from tvl.backend import BackendFactory
# Explicitly set backends for particular device types.
_device_backends: Dict[str, BackendFactory] = {}
# Known backends. These will be searched if a device type does not have a backend factory
# set explicitly.
_known_backends = {
'cpu': [
# 'tvl_backends.fffr.FffrBackendFactory', # PyPI package: tvl-backends-fffr
'tvl_backends.pyav.PyAvBackendFactory', # PyPI package: tvl-backends-pyav
'tvl_backends.opencv.OpenCvBackendFactory', # PyPI package: tvl-backends-opencv
],
'cuda': [
'tvl_backends.fffr.FffrBackendFactory', # PyPI package: tvl-backends-fffr
'tvl_backends.nvdec.NvdecBackendFactory', # PyPI package: tvl-backends-nvdec
],
}
def set_backend_factory(device_type, backend_factory):
"""Set the backend factory to be used for a particular device type."""
_device_backends[device_type] = backend_factory
def _auto_set_backend_factory(device_type):
"""Attempt to automatically set the backend for `device_type` if not set already."""
if device_type in _device_backends and _device_backends[device_type] is not None:
return
if device_type in _known_backends:
for backend_name in _known_backends[device_type]:
try:
module_name, class_name = backend_name.rsplit('.', 1)
module = importlib.import_module(module_name)
set_backend_factory(device_type, getattr(module, class_name)())
return
except ImportError:
pass
def get_backend_factory(device_type) -> BackendFactory:
"""Get the backend factory which will be used for a particular device type."""
_auto_set_backend_factory(device_type)
if device_type in _device_backends:
return _device_backends[device_type]
raise Exception(f'failed to find a backend factory for device type: {device_type}')
class VideoLoader:
def __init__(self, filename, device: Union[torch.device, str], dtype=torch.float32, backend_opts=None):
if isinstance(device, str):
device = torch.device(device)
filename = os.fspath(filename)
self.backend = get_backend_factory(device.type).create(filename, device, dtype, backend_opts)
def seek(self, time_secs):
self.backend.seek(time_secs)
def seek_to_frame(self, frame_index):
self.backend.seek_to_frame(frame_index)
def read_frame(self):
return self.backend.read_frame()
def read_frames(self, n):
return self.backend.read_frames(n)
@property
def duration(self):
return self.backend.duration
@property
def frame_rate(self):
return self.backend.frame_rate
@property
def n_frames(self):
return self.backend.n_frames
@property
def width(self):
return self.backend.width
@property
def height(self):
return self.backend.height
def remaining_frames(self):
"""Iterate sequentially over remaining frames in the video."""
more_frames = True
while more_frames:
try:
yield self.read_frame()
except EOFError:
more_frames = False
def read_all_frames(self):
"""Iterate over all frames in the video."""
self.seek_to_frame(0)
return self.remaining_frames()
def select_frames(self, frame_indices):
"""Iterate over frames selected by frame index.
Frames will be yielded in ascending order of frame index, regardless of the way
`frame_indices` is ordered. Duplicate frame indices will be ignored.
Args:
frame_indices (Sequence of int): Indices of frames to read.
Returns:
Iterator[torch.Tensor]: An iterator of image tensors.
"""
return self.backend.select_frames(frame_indices)
def select_frame(self, frame_index):
"""Read a single frame by frame index.
Args:
frame_index (int): Index of frame to read.
Returns:
torch.Tensor: Frame image tensor.
"""
return self.backend.select_frame(frame_index)
class VideoLoaderPool:
def __init__(self, slots: Dict[str, int]):
self.slots = slots
self.condition = Condition(RLock())
def peek_slot(self):
for device, available in self.slots.items():
if available > 0:
return device
return None
def remove_slot(self):
device = self.peek_slot()
if device is None:
raise Exception('No slots available')
self.slots[device] -= 1
return device
def add_slots(self, device, n=1):
available = self.slots.get(device, 0)
self.slots[device] = available + n
@contextmanager
def loader(self, filename, dtype=torch.float32, backend_opts_by_device=None):
with self.condition:
while self.peek_slot() is None:
self.condition.wait()
device = self.remove_slot()
if backend_opts_by_device is None:
backend_opts_by_device = {}
try:
yield VideoLoader(filename, device, dtype, backend_opts_by_device.get(device, None))
finally:
with self.condition:
self.add_slots(device, 1)
self.condition.notify()
| 32.04023 | 107 | 0.655426 |
f5ebae3e774ce9b3b010d4075840cd3a679001a7 | 596 | py | Python | oauth/migrations/0002_oauthuser_user.py | Stuff7/stuff7 | c4210ad99c7d745ded3742a645cc9173243946b1 | [
"MIT"
] | null | null | null | oauth/migrations/0002_oauthuser_user.py | Stuff7/stuff7 | c4210ad99c7d745ded3742a645cc9173243946b1 | [
"MIT"
] | null | null | null | oauth/migrations/0002_oauthuser_user.py | Stuff7/stuff7 | c4210ad99c7d745ded3742a645cc9173243946b1 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-05-08 04:14
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('oauth', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.AddField(
model_name='oauthuser',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 24.833333 | 121 | 0.666107 |
41c5b7389776ce86268585878f3a4145eca58429 | 1,723 | py | Python | vint/linting/policy/prohibit_unused_variable.py | tmsanrinsha/vint | 8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0 | [
"MIT"
] | 2 | 2021-06-15T15:07:28.000Z | 2021-10-05T12:23:23.000Z | vint/linting/policy/prohibit_unused_variable.py | tmsanrinsha/vint | 8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0 | [
"MIT"
] | null | null | null | vint/linting/policy/prohibit_unused_variable.py | tmsanrinsha/vint | 8c34196252b43d7361d0f58cb78cf2d3e4e4fbd0 | [
"MIT"
] | null | null | null | from vint.ast.node_type import NodeType
from vint.linting.level import Level
from vint.linting.policy.abstract_policy import AbstractPolicy
from vint.linting.policy_registry import register_policy
from vint.ast.plugin.scope_plugin import ScopeVisibility
@register_policy
class ProhibitUnusedVariable(AbstractPolicy):
def __init__(self):
super(ProhibitUnusedVariable, self).__init__()
self.reference = ':help E738'
self.level = Level.WARNING
def listen_node_types(self):
return [NodeType.IDENTIFIER]
def is_valid(self, identifier, lint_context):
""" Whether the variables are used.
This policy cannot determine the following node types:
- Global identifier like nodes
- ENV
- REG
- OPTION
- Dynamic variables
- CURLYNAME
- SLICE
- DOT
- SUBSCRIPT
"""
scope_plugin = lint_context['plugins']['scope']
is_used = not scope_plugin.is_unused_declarative_identifier(identifier)
scope_visibility = scope_plugin.get_objective_scope_visibility(identifier)
is_global = scope_visibility is ScopeVisibility.GLOBAL_LIKE
is_builtin = scope_visibility is ScopeVisibility.BUILTIN
is_unanalyzable = scope_visibility is ScopeVisibility.UNANALYZABLE
# Ignore global like variables.
is_valid = is_used or is_global or is_builtin or is_unanalyzable
if not is_valid:
self._make_description(identifier)
return is_valid
def _make_description(self, identifier):
self.description = 'Unused variable: {var_name}'.format(
var_name=identifier['value'])
| 31.907407 | 82 | 0.684272 |
30dae15177299c887d63f2666589df5f03778960 | 1,309 | py | Python | mlpf/timing/evaluate_timing.py | edcuba/particleflow | 1c6189f499ae4807ecca42d459e363fd5b3a12ab | [
"Apache-2.0"
] | 12 | 2019-09-29T21:24:18.000Z | 2022-02-22T13:20:38.000Z | mlpf/timing/evaluate_timing.py | edcuba/particleflow | 1c6189f499ae4807ecca42d459e363fd5b3a12ab | [
"Apache-2.0"
] | 39 | 2019-10-03T18:21:01.000Z | 2021-12-07T11:58:57.000Z | mlpf/timing/evaluate_timing.py | edcuba/particleflow | 1c6189f499ae4807ecca42d459e363fd5b3a12ab | [
"Apache-2.0"
] | 19 | 2019-09-29T21:24:27.000Z | 2022-03-31T12:17:04.000Z | #!/use/bin/env python3
#on manivald: singularity exec /home/software/singularity/base.simg:latest python3 test/evaluate_timing.py
import torch
import torch_geometric
from torch_geometric.data import Data, DataLoader, DataListLoader, Batch
from graph_data import PFGraphDataset, elem_to_id, class_to_id, class_labels
dataset_path = "/home/joosep/particleflow/data/TTbar_14TeV_TuneCUETP8M1_cfi"
#Goal: measure the evaluation cost of the MLPF model as a function of input multiplicity
if __name__ == "__main__":
full_dataset = PFGraphDataset(dataset_path)
#events in bunches of 5
for data_items in full_dataset:
#loop over each event in the bunch
for data in data_items:
#get the input matrix
input_matrix = data.x
print("input_matrix.shape=", input_matrix.shape)
#this is the number of input elements in the event
input_multiplicity = input_matrix.shape[0]
#task 1: plot the distribution of the input multiplicities across the events using numpy.histogram and matplotlib.histogram
#task 2: save the `data` object using torch.save(data, "data/TTbar_14TeV_TuneCUETP8M1_cfi/bin_i/file_j.pt") to
#subfolders based on the input multiplicity binning
| 35.378378 | 135 | 0.71505 |
d1083873d3b35dd8541554001f5416252f389d4d | 21,978 | py | Python | py_src/lingtree/conll.py | yv/lingtree | 7356baa6792db0f88aa7b4f0ab4c2e32907741d6 | [
"MIT"
] | 1 | 2021-08-06T14:16:42.000Z | 2021-08-06T14:16:42.000Z | py_src/lingtree/conll.py | yv/lingtree | 7356baa6792db0f88aa7b4f0ab4c2e32907741d6 | [
"MIT"
] | null | null | null | py_src/lingtree/conll.py | yv/lingtree | 7356baa6792db0f88aa7b4f0ab4c2e32907741d6 | [
"MIT"
] | 1 | 2021-08-06T14:16:44.000Z | 2021-08-06T14:16:44.000Z | # Copyright 2008-2020 Yannick Versley
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
# associated documentation files (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
# TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import print_function
import sys
import re
import codecs
import optparse
from gzip import GzipFile
from six.moves import zip_longest
from .tree import Tree, TerminalNode
from .folds import do_recombine
def detect_encoding(fname):
try:
from cchardet import detect
method = 'cchardet'
except ImportError:
def detect(s):
try:
s.decode('UTF-8')
return {'encoding':'UTF-8'}
except UnicodeDecodeError:
return {'encoding':'ISO-8859-15'}
method = 'stupid [no cchardet detected]'
if fname.endswith('.gz'):
open_fn = GzipFile
else:
open_fn = open
f = open_fn(fname, 'rb')
data = f.read(100000)
f.close()
val = detect(data)
encoding = val['encoding']
print("%s: %s detected encoding %s"%(
fname, method, encoding), file=sys.stderr)
return encoding
latin1_encodings = {'iso8859-1', 'iso8859-15', 'cp1252'}
def encoding_equivalent(enc1, enc2):
if enc1 is None:
return enc2 is None
else:
if enc2 is None:
return False
enc1 = codecs.lookup(enc1).name
enc2 = codecs.lookup(enc2).name
if enc1 == enc2:
return True
elif enc1 in latin1_encodings and enc2 in latin1_encodings:
return True
else:
return False
sno_re = re.compile('<s ([^ >]*)>(.*)</s>')
def read_conll(fname, encoding=None, use_fmt=None,
tree_encoding=None,
f_sno=None, sno_words=True,
use_pdep=False,
error_treatment='strict'):
'''
reads in a conll file, possibly autodetecting
the format, and returns a sequence of trees.
If f_sno is a file (in Charniak-like format with
<s ID> words words words </s>
the sentence numbers are attached to the trees returned
and the words are stored in the orig_word attribute.
With the use_pdep argument, the predicted (instead of gold)
dependencies would be read
'''
#print("read_conll use_fmt", use_fmt, file=sys.stderr)
lines = []
if tree_encoding is None:
encode_fn = lambda x: x
else:
encode_fn = lambda x: x.encode(tree_encoding)
if encoding is None:
encoding = detect_encoding(fname)
if fname.endswith('.gz'):
open_fn = GzipFile
else:
open_fn = open
if use_fmt is None:
with open_fn(fname, "rb") as f_guess:
l = f_guess.readline()
num_fields = len(l.strip().split())
if num_fields == 10:
use_fmt = 'conll06'
elif num_fields == 14:
use_fmt = 'conll09'
elif num_fields == 8:
use_fmt = 'conll06'
elif num_fields == 6:
# CoNLL-X without dep/attach columns
use_fmt = 'conll06'
else:
print("Cannot guess format of %s (%d columns)"%(
fname, len(l.strip().split())), file=sys.stderr)
raise ValueError()
if use_fmt == 'conll06':
cat_idx = 4
mor_idx = 5
lem_idx = 2
if use_pdep:
gov_idx = 8
lbl_idx = 9
else:
gov_idx = 6
lbl_idx = 7
elif use_fmt == 'conll09':
cat_idx = 5
mor_idx = 7
lem_idx = 3
if use_pdep:
gov_idx = 9
lbl_idx = 11
else:
gov_idx = 8
lbl_idx = 10
elif use_fmt == 'conll09g':
cat_idx = 4
mor_idx = 6
lem_idx = 2
if use_pdep:
gov_idx = 9
lbl_idx = 11
else:
gov_idx = 8
lbl_idx = 10
else:
print("Unknown format: %s"%(use_fmt,), file=sys.stderr)
raise ValueError()
reader_fn = codecs.getreader(encoding)
line_no = old_line_no = 0
for l in reader_fn(open_fn(fname, "rb"), error_treatment):
line = l.strip().split()
line_no += 1
if not line:
# careful: this code is duplicated below for the
# case where there is no empty line at the end of the
# .conll file
t = Tree()
if tree_encoding is None:
t.encoding = encoding
else:
t.encoding = tree_encoding
nodes = []
for i, item in enumerate(lines):
if len(item) < lem_idx:
print("[line %d] conll format error: not enough fields in %s"%(
old_line_no+i, item,), file=sys.stderr)
break
n = TerminalNode(encode_fn(item[cat_idx]), encode_fn(item[1]))
n.start = i
n.end = i+1
n.morph = encode_fn(item[mor_idx])
n.lemma = encode_fn(item[lem_idx])
nodes.append(n)
for n, item in zip(nodes, lines):
if len(item) < lbl_idx:
item += ['_']*(lbl_idx-len(item))
try:
parent_id = item[gov_idx]
except IndexError:
parent_id = '0'
if parent_id in ['0', '_']:
n.syn_parent = None
else:
try:
n.syn_parent = nodes[int(parent_id)-1]
except ValueError:
print("[line %d] conll format error: %s is not a node reference"%(
old_line_no+n.start, parent_id,), file=sys.stderr)
n.syn_parent = None
except IndexError:
print("[line %d] conll format error: %s is not a node reference"%(
old_line_no+n.start, parent_id,), file=sys.stderr)
n.syn_parent = None
try:
n.syn_label = encode_fn(item[lbl_idx])
except IndexError:
n.syn_label = '_'
t.terminals = nodes
t.roots = nodes[:]
if f_sno:
l_sno = f_sno.readline()
m = sno_re.match(l_sno)
assert m
t.sent_no = m.group(1)
words = m.group(2).strip().split(' ')
assert len(words) == len(t.terminals)
if sno_words:
for w, n in zip(words, t.terminals):
n.word = w
else:
for w, n in zip(words, t.terminals):
n.sno_word = w
yield t
lines = []
old_line_no = line_no
else:
lines.append(line)
if lines:
t = Tree()
if tree_encoding is None:
t.encoding = encoding
else:
t.encoding = tree_encoding
nodes = []
for i, item in enumerate(lines):
n = TerminalNode(encode_fn(item[cat_idx]), encode_fn(item[1]))
n.start = i
n.end = i+1
n.morph = encode_fn(item[mor_idx])
n.lemma = encode_fn(item[lem_idx])
nodes.append(n)
for n, item in zip(nodes, lines):
try:
parent_id = item[gov_idx]
except IndexError:
parent_id = '0'
if parent_id in ['0', '_']:
n.syn_parent = None
else:
try:
n.syn_parent = nodes[int(parent_id)-1]
except ValueError:
print("conll format error: %s is not a node reference"%(parent_id,), file=sys.stderr)
n.syn_parent = None
try:
n.syn_label = item[lbl_idx]
except IndexError:
n.syn_label = '_'
t.terminals = nodes
t.roots = nodes[:]
if f_sno:
l_sno = f_sno.readline()
m = sno_re.match(l_sno)
assert m
t.sent_no = m.group(1)
words = m.group(2).strip().split(' ')
assert len(words) == len(t.terminals)
if sno_words:
for w, n in zip(words, t.terminals):
n.word = w
else:
for w, n in zip(words, t.terminals):
n.sno_word = w
yield t
def read_tabular(fname, att_columns, encoding=None,
tree_encoding=None,
error_treatment='strict'):
'''
reads a (generic) tabular format into trees.
:param att_columns: a list of property names, or None if the column
does not correspond to a property
'''
cat_idx = att_columns.index('cat')
word_idx = att_columns.index('word')
lines = []
if tree_encoding is None:
encode_fn = lambda x: x
else:
encode_fn = lambda x: x.encode(tree_encoding)
if encoding is None:
encoding = detect_encoding(fname)
if fname.endswith('.gz'):
open_fn = GzipFile
else:
open_fn = open
reader_fn = codecs.getreader(encoding)
line_no = old_line_no = 0
for l in reader_fn(open_fn(fname, "rb"), error_treatment):
line = l.strip().split()
line_no += 1
if not line:
# careful: this code is duplicated below for the
# case where there is no empty line at the end of the
# .conll file
t = Tree()
if tree_encoding is None:
t.encoding = encoding
else:
t.encoding = tree_encoding
nodes = []
for i, item in enumerate(lines):
if len(item) < len(att_columns):
print("[line %d] tabular format error: not enough fields in %s"%(
old_line_no+i, item,), file=sys.stderr)
n = TerminalNode(encode_fn(item[cat_idx]), encode_fn(item[word_idx]))
n.start = i
n.end = i+1
for i, att_name in enumerate(att_columns):
if att_name is not None:
setattr(n, att_name, encode_fn(item[i]))
nodes.append(n)
t.terminals = nodes
t.roots = nodes[:]
yield t
lines = []
old_line_no = line_no
else:
lines.append(line)
if lines:
t = Tree()
if tree_encoding is None:
t.encoding = encoding
else:
t.encoding = tree_encoding
nodes = []
t = Tree()
if tree_encoding is None:
t.encoding = encoding
else:
t.encoding = tree_encoding
nodes = []
for i, item in enumerate(lines):
if len(item) < len(att_columns):
print("[line %d] tabular format error: not enough fields in %s"%(
old_line_no+i, item,), file=sys.stderr)
n = TerminalNode(encode_fn(item[cat_idx]), encode_fn(item[1]))
n.start = i
n.end = i+1
for i, att_name in enumerate(att_columns):
if att_name is not None:
setattr(n, att_name, encode_fn(item[i]))
nodes.append(n)
t.terminals = nodes
t.roots = nodes[:]
yield t
def from_unicode(s, encoding):
if isinstance(s, bytes):
return s
else:
return s.encode(encoding)
class TabularWriter(object):
'''
writes dependency trees in CoNLL format. In contrast to
pytree_totext, this can also write the dependency column
and not just text attributes...
'''
def __init__(self, f, att_columns, dep_idx=None,
id_idx=0):
self.f = f
self.att_columns = att_columns
n_cols = len(att_columns)
if dep_idx is not None and dep_idx >= n_cols:
n_cols = dep_idx + 1
self.dep_idx = dep_idx
self.id_idx = id_idx
self.n_cols = n_cols
def write_tree(self, t):
'''
writes one tree in the selected format
'''
encoding = t.encoding
nodes = t.terminals
cols = []
id_idx = self.id_idx
dep_idx = self.dep_idx
att_columns = self.att_columns
f = self.f
for i in range(self.n_cols):
if i == id_idx:
cols.append([str(j+1) for j in range(len(nodes))])
elif i == dep_idx:
parents = []
for n in nodes:
if n.syn_parent is None:
parents.append('0')
else:
parents.append(str(n.syn_parent.start + 1))
cols.append(parents)
elif i == dep_idx + 1:
dep_labels = [from_unicode(getattr(n, 'syn_label', '_'), encoding)
for n in nodes]
cols.append(dep_labels)
else:
att = att_columns[i]
if att is None:
cols.append(['_' for n in nodes])
else:
cols.append([from_unicode(getattr(n, att, '_'), encoding) for n in nodes])
for row in zip(*cols):
try:
print('\t'.join(row), file=f)
except:
print(row, file=sys.stderr)
raise
print(file=f)
def write_trees(self, trees):
'''
writes a number of trees in the selected format
'''
for t in trees:
self.write_tree(t)
def read_generic(fname, encoding=None,
tree_encoding=None,
error_treatment='strict'):
'''
reads any generic tabular format into a sequence of tables.
'''
lines = []
if tree_encoding is None:
encode_fn = lambda x: x
else:
encode_fn = lambda x: x.encode(tree_encoding)
if encoding is None:
encoding = detect_encoding(fname)
if encoding_equivalent(encoding, tree_encoding):
encode_fn = lambda x: x
reader_fn = lambda x,y : x
else:
reader_fn = codecs.getreader(encoding)
if fname.endswith('.gz'):
open_fn = GzipFile
else:
open_fn = open
line_no = old_line_no = 0
for l in reader_fn(open_fn(fname, "rb"), error_treatment):
line = [encode_fn(x) for x in l.strip().split()]
line_no += 1
if not line:
if lines:
yield lines
lines = []
else:
lines.append(line)
if lines:
yield lines
def write_generic_single(f, lines):
for line in lines:
f.write('\t'.join(line)+'\n')
f.write('\n')
def do_merge(fname_orig, fname_merge, preproc_atts,
cpos_map=None, use_words=False, fmt_orig=None):
#print("fmt_orig:", fmt_orig, file=sys.stderr)
trees_orig = read_conll(fname_orig, use_fmt=fmt_orig)
trees_merge = read_tabular(fname_merge, preproc_atts)
need_cpos = not ('cpos' in preproc_atts)
for t_orig, t_merge in zip_longest(trees_orig, trees_merge):
if t_orig is None:
words2 = [n.word for n in t_merge.terminals]
print("more trees in merge: %s"%(
words2,), file=sys.stderr)
sys.exit(1)
elif t_merge is None:
words1 = [n.word for n in t_orig.terminals]
print("more trees in original: %s"%(
words1,), file=sys.stderr)
sys.exit(1)
words1 = [n.word for n in t_orig.terminals]
words2 = [n.word for n in t_merge.terminals]
if len(words1) != len(words2):
print("Sequences do not match: %s vs %s"%(
words1, words2), file=sys.stderr)
sys.exit(1)
elif words1 != words2:
print("Sequences differ: %s vs %s"%(
words1, words2), file=sys.stderr)
for n, n_merge in zip(t_orig.terminals, t_merge.terminals):
for att in preproc_atts:
if att is not None:
if att != 'word' or use_words==True:
setattr(n, att, getattr(n_merge, att))
# assign cpos if a pos map is given
if cpos_map is not None:
n.cpos = cpos_map.get(n.cat, n.cat)
elif need_cpos:
n.cpos = n.cat
yield t_orig
def make_conllx_writer(fname):
'''
creates a TabularWriter instance suitable for writing CoNLL-X format.
'''
f = open(fname, 'w')
w = TabularWriter(f,
[None, 'word', 'lemma', 'cpos', 'cat', 'morph',
None, 'syn_label', None, None],
dep_idx=6)
return w
def read_mapping(fname, encoding='UTF-8'):
result = {}
with open(fname, 'r', encoding=encoding) as f:
for l in f:
line = l.strip().split()
result[line[0]] = line[1]
return result
oparse_merge = optparse.OptionParser(
usage="usage: %prog [options] src.conll preproc.txt dest.conll")
oparse_merge.add_option('-F', dest='preproc_fmt',
choices=['plain', 'txt',
'conllx', 'conll09', 'conll09g'],
default='plain')
oparse_merge.add_option('--fmt-orig', dest='fmt_orig',
choices=['conllx', 'conll09', 'conll09g'],
default=None)
oparse_merge.add_option('--unimap', dest='cpos_map')
oparse_merge.add_option('--use-words', dest='use_words',
default=False, action='store_true')
PREPROC_COLUMNS = {
'plain': [None, 'word', 'lemma', 'cat', 'morph'],
'txt': ['word', 'cat'],
'conll09': [None, 'word', None, 'lemma', None, 'cat', None, 'morph'],
'conll09g': [None, 'word', 'lemma', None, 'cat', None, 'morph'],
'conllx': [None, 'word', 'lemma', 'cpos', 'cat', 'morph']
}
def merge_main(argv=None):
opts, args = oparse_merge.parse_args(argv)
if len(args) != 3:
oparse_merge.print_help()
sys.exit(1)
preproc_atts = PREPROC_COLUMNS[opts.preproc_fmt]
if opts.cpos_map is None:
cpos_map = None
else:
cpos_map = read_mapping(opts.cpos_map)
trees = do_merge(args[0], args[1], preproc_atts,
fmt_orig=opts.fmt_orig,
cpos_map=cpos_map)
# TODO: heuristic fix for tag assignment?
# TODO: add word-specific part of uniset features
# TODO: add generic filtering mechanism
w = make_conllx_writer(args[2])
w.write_trees(trees)
def merge_trees_generic(trees, fname_merge,
fmt_preproc='conllx',
fmt_orig=None,
use_words=False,
cpos_map=None):
preproc_atts = PREPROC_COLUMNS[fmt_preproc]
trees_merge = read_tabular(fname_merge, preproc_atts)
for t_orig, t_merge in zip_longest(trees, trees_merge):
if t_orig is None:
words2 = [n.word for n in t_merge.terminals]
print("more trees in merge: %s"%(
words2,), file=sys.stderr)
sys.exit(1)
elif t_merge is None:
words1 = [n.word for n in t_orig.terminals]
print("more trees in original: %s"%(
words1,), file=sys.stderr)
sys.exit(1)
words1 = [n.word for n in t_orig.terminals]
words2 = [n.word for n in t_merge.terminals]
if len(words1) != len(words2):
print("Sequences do not match: %s vs %s"%(
words1, words2), file=sys.stderr)
sys.exit(1)
elif words1 != words2:
pass
#print >>sys.stderr, "Sequences differ: %s vs %s"%(
# words1, words2)
for n, n_merge in zip(t_orig.terminals, t_merge.terminals):
for att in preproc_atts:
if att is not None:
if att != 'word' or use_words==True:
setattr(n, att, getattr(n_merge, att))
# assign cpos if a pos map is given
if cpos_map is not None:
n.cpos = cpos_map.get(n.cat, n.cat)
elif hasattr(n, 'cpos'):
n.cpos = n.cat
yield t_orig
oparse_recombine = optparse.OptionParser(
usage="usage: %prog [options] N_FOLDS TEMPLATE dest.conll")
def recombine_main(argv=None):
opts, args = oparse_recombine.parse_args(argv)
if len(args) != 3:
oparse_recombine.print_help()
sys.exit(1)
n_folds = int(args[0])
template = args[1]
assert '%(fold)s' in template
tree_seqs = []
for i in range(n_folds):
fname = template%{'fold': i+1}
tree_seqs.append(read_generic(fname))
trees = do_recombine(tree_seqs)
with open(args[2], 'w', encoding='UTF-8') as f_out:
for lines in trees:
write_generic_single(f_out, lines)
| 35.563107 | 115 | 0.532487 |
c45f7bc5e118e5adf0ba308fa59bf4453d93c20c | 9,634 | py | Python | test/python/algorithms/test_grover.py | WiFisunset/qiskit-terra | e122c9c0cef78d1ba4ac57442cd03fb0363ba93c | [
"Apache-2.0"
] | 1 | 2021-06-09T11:22:21.000Z | 2021-06-09T11:22:21.000Z | test/python/algorithms/test_grover.py | WiFisunset/qiskit-terra | e122c9c0cef78d1ba4ac57442cd03fb0363ba93c | [
"Apache-2.0"
] | 1 | 2022-02-07T21:30:36.000Z | 2022-02-07T21:30:36.000Z | test/python/algorithms/test_grover.py | WiFisunset/qiskit-terra | e122c9c0cef78d1ba4ac57442cd03fb0363ba93c | [
"Apache-2.0"
] | null | null | null | # This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Test Grover's algorithm."""
import unittest
from test.python.algorithms import QiskitAlgorithmsTestCase
import itertools
import numpy as np
from ddt import ddt, data
from qiskit import BasicAer, QuantumCircuit
from qiskit.utils import QuantumInstance
from qiskit.algorithms import Grover, AmplificationProblem
from qiskit.circuit.library import GroverOperator, PhaseOracle
from qiskit.quantum_info import Operator, Statevector
@ddt
class TestAmplificationProblem(QiskitAlgorithmsTestCase):
"""Test the amplification problem."""
def setUp(self):
super().setUp()
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
self._expected_grover_op = GroverOperator(oracle=oracle)
@data("oracle_only", "oracle_and_stateprep")
def test_groverop_getter(self, kind):
"""Test the default construction of the Grover operator."""
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
if kind == "oracle_only":
problem = AmplificationProblem(oracle, is_good_state=["11"])
expected = GroverOperator(oracle)
else:
stateprep = QuantumCircuit(2)
stateprep.ry(0.2, [0, 1])
problem = AmplificationProblem(
oracle, state_preparation=stateprep, is_good_state=["11"]
)
expected = GroverOperator(oracle, stateprep)
self.assertEqual(Operator(expected), Operator(problem.grover_operator))
@data("list_str", "list_int", "statevector", "callable")
def test_is_good_state(self, kind):
"""Test is_good_state works on different input types."""
if kind == "list_str":
is_good_state = ["01", "11"]
elif kind == "list_int":
is_good_state = [1] # means bitstr[1] == '1'
elif kind == "statevector":
is_good_state = Statevector(np.array([0, 1, 0, 1]) / np.sqrt(2))
else:
def is_good_state(bitstr):
# same as ``bitstr in ['01', '11']``
return bitstr[1] == "1"
possible_states = [
"".join(list(map(str, item))) for item in itertools.product([0, 1], repeat=2)
]
oracle = QuantumCircuit(2)
problem = AmplificationProblem(oracle, is_good_state=is_good_state)
expected = [state in ["01", "11"] for state in possible_states]
# pylint: disable=not-callable
actual = [problem.is_good_state(state) for state in possible_states]
self.assertListEqual(expected, actual)
class TestGrover(QiskitAlgorithmsTestCase):
"""Test for the functionality of Grover"""
def setUp(self):
super().setUp()
self.statevector = QuantumInstance(
BasicAer.get_backend("statevector_simulator"), seed_simulator=12, seed_transpiler=32
)
self.qasm = QuantumInstance(
BasicAer.get_backend("qasm_simulator"), seed_simulator=12, seed_transpiler=32
)
def test_implicit_phase_oracle_is_good_state(self):
"""Test implicit default for is_good_state with PhaseOracle."""
grover = Grover(iterations=2, quantum_instance=self.statevector)
oracle = PhaseOracle("x | x")
problem = AmplificationProblem(oracle)
result = grover.amplify(problem)
self.assertEqual(result.top_measurement, "0")
def test_fixed_iterations(self):
"""Test the iterations argument"""
grover = Grover(iterations=2, quantum_instance=self.statevector)
problem = AmplificationProblem(Statevector.from_label("111"), is_good_state=["111"])
result = grover.amplify(problem)
self.assertEqual(result.top_measurement, "111")
def test_multiple_iterations(self):
"""Test the algorithm for a list of iterations."""
grover = Grover(iterations=[1, 2, 3], quantum_instance=self.statevector)
problem = AmplificationProblem(Statevector.from_label("111"), is_good_state=["111"])
result = grover.amplify(problem)
self.assertEqual(result.top_measurement, "111")
def test_iterator(self):
"""Test running the algorithm on an iterator."""
# step-function iterator
def iterator():
wait, value, count = 3, 1, 0
while True:
yield value
count += 1
if count % wait == 0:
value += 1
grover = Grover(iterations=iterator(), quantum_instance=self.statevector)
problem = AmplificationProblem(Statevector.from_label("111"), is_good_state=["111"])
result = grover.amplify(problem)
self.assertEqual(result.top_measurement, "111")
def test_growth_rate(self):
"""Test running the algorithm on a growth rate"""
grover = Grover(growth_rate=8 / 7, quantum_instance=self.statevector)
problem = AmplificationProblem(Statevector.from_label("111"), is_good_state=["111"])
result = grover.amplify(problem)
self.assertEqual(result.top_measurement, "111")
def test_max_num_iterations(self):
"""Test the iteration stops when the maximum number of iterations is reached."""
def zero():
while True:
yield 0
grover = Grover(iterations=zero(), quantum_instance=self.statevector)
n = 5
problem = AmplificationProblem(Statevector.from_label("1" * n), is_good_state=["1" * n])
result = grover.amplify(problem)
self.assertEqual(len(result.iterations), 2 ** n)
def test_max_power(self):
"""Test the iteration stops when the maximum power is reached."""
lam = 10.0
grover = Grover(growth_rate=lam, quantum_instance=self.statevector)
problem = AmplificationProblem(Statevector.from_label("111"), is_good_state=["111"])
result = grover.amplify(problem)
self.assertEqual(len(result.iterations), 0)
def test_run_circuit_oracle(self):
"""Test execution with a quantum circuit oracle"""
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
problem = AmplificationProblem(oracle, is_good_state=["11"])
grover = Grover(quantum_instance=self.qasm)
result = grover.amplify(problem)
self.assertIn(result.top_measurement, ["11"])
def test_run_state_vector_oracle(self):
"""Test execution with a state vector oracle"""
mark_state = Statevector.from_label("11")
problem = AmplificationProblem(mark_state, is_good_state=["11"])
grover = Grover(quantum_instance=self.qasm)
result = grover.amplify(problem)
self.assertIn(result.top_measurement, ["11"])
def test_run_custom_grover_operator(self):
"""Test execution with a grover operator oracle"""
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
grover_op = GroverOperator(oracle)
problem = AmplificationProblem(
oracle=oracle, grover_operator=grover_op, is_good_state=["11"]
)
grover = Grover(quantum_instance=self.qasm)
ret = grover.amplify(problem)
self.assertIn(ret.top_measurement, ["11"])
def test_optimal_num_iterations(self):
"""Test optimal_num_iterations"""
num_qubits = 7
for num_solutions in range(1, 2 ** num_qubits):
amplitude = np.sqrt(num_solutions / 2 ** num_qubits)
expected = round(np.arccos(amplitude) / (2 * np.arcsin(amplitude)))
actual = Grover.optimal_num_iterations(num_solutions, num_qubits)
self.assertEqual(actual, expected)
def test_construct_circuit(self):
"""Test construct_circuit"""
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
problem = AmplificationProblem(oracle, is_good_state=["11"])
grover = Grover()
constructed = grover.construct_circuit(problem, 2, measurement=False)
grover_op = GroverOperator(oracle)
expected = QuantumCircuit(2)
expected.h([0, 1])
expected.compose(grover_op.power(2), inplace=True)
self.assertTrue(Operator(constructed).equiv(Operator(expected)))
def test_circuit_result(self):
"""Test circuit_result"""
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
# is_good_state=['00'] is intentionally selected to obtain a list of results
problem = AmplificationProblem(oracle, is_good_state=["00"])
grover = Grover(iterations=[1, 2, 3, 4], quantum_instance=self.qasm)
result = grover.amplify(problem)
expected_results = [
{"11": 1024},
{"00": 238, "01": 253, "10": 263, "11": 270},
{"00": 238, "01": 253, "10": 263, "11": 270},
{"11": 1024},
]
self.assertEqual(result.circuit_results, expected_results)
def test_max_probability(self):
"""Test max_probability"""
oracle = QuantumCircuit(2)
oracle.cz(0, 1)
problem = AmplificationProblem(oracle, is_good_state=["11"])
grover = Grover(quantum_instance=self.qasm)
result = grover.amplify(problem)
self.assertEqual(result.max_probability, 1.0)
if __name__ == "__main__":
unittest.main()
| 38.690763 | 96 | 0.647395 |
573168c6586a7ee462ec31e6f6558fe1957f912c | 1,284 | py | Python | lib/meshrenderer/gl_utils/glfw_offscreen_context.py | AlbertoRemus/GDR_Net | 114cff27c6fc6048724a6f2bdce2306ab51d798e | [
"Apache-2.0"
] | 132 | 2021-02-25T10:45:29.000Z | 2022-03-30T06:54:26.000Z | lib/meshrenderer/gl_utils/glfw_offscreen_context.py | AlbertoRemus/GDR_Net | 114cff27c6fc6048724a6f2bdce2306ab51d798e | [
"Apache-2.0"
] | 69 | 2021-03-23T12:26:17.000Z | 2022-03-29T09:08:11.000Z | lib/meshrenderer/gl_utils/glfw_offscreen_context.py | AlbertoRemus/GDR_Net | 114cff27c6fc6048724a6f2bdce2306ab51d798e | [
"Apache-2.0"
] | 23 | 2021-03-26T06:21:32.000Z | 2022-03-23T23:53:51.000Z | # -*- coding: utf-8 -*-
import logging as log
import os
from OpenGL.GL import *
import cyglfw3 as glfw
class OffscreenContext(object):
def __init__(self):
assert glfw.Init(), "Glfw Init failed!"
glfw.WindowHint(glfw.VISIBLE, False)
self._offscreen_context = glfw.CreateWindow(1, 1, "", None)
assert self._offscreen_context, "Could not create Offscreen Context!"
glfw.MakeContextCurrent(self._offscreen_context)
self.previous_second = glfw.GetTime()
self.frame_count = 0.0
self._fps = 0.0
def update(self):
self.poll_events()
self.update_fps_counter()
def poll_events(self):
glfw.PollEvents()
def update_fps_counter(self):
current_second = glfw.GetTime()
elapsed_seconds = current_second - self.previous_second
if elapsed_seconds > 1.0:
self.previous_second = current_second
self._fps = float(self.frame_count) / float(elapsed_seconds)
self.frame_count = 0.0
self.frame_count += 1.0
@property
def fps(self):
return self._fps
def close(self):
glfw.Terminate()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
| 25.68 | 77 | 0.634735 |
03a20a6eddf794021448481d0c2406f6c1e0cdc4 | 3,054 | py | Python | src/backend/api/managers/auth_manager.py | dtenenba/motuz | 1b54295d2790f756bcb2de61f667f60b7fd5c340 | [
"MIT"
] | null | null | null | src/backend/api/managers/auth_manager.py | dtenenba/motuz | 1b54295d2790f756bcb2de61f667f60b7fd5c340 | [
"MIT"
] | null | null | null | src/backend/api/managers/auth_manager.py | dtenenba/motuz | 1b54295d2790f756bcb2de61f667f60b7fd5c340 | [
"MIT"
] | null | null | null | import logging
import datetime
from functools import wraps
import pwd
import json
from flask import request
from werkzeug.security import generate_password_hash, check_password_hash
import flask_jwt_extended as flask_jwt
from ..config import key
from ..models import InvalidToken
from ..application import db
from ..exceptions import *
from ..utils.pam import pam
def refresh_token_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
flask_jwt.verify_jwt_refresh_token_in_request()
except Exception as e:
raise HTTP_401_UNAUTHORIZED(str(e))
return fn(*args, **kwargs)
return wrapper
def token_required(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
try:
flask_jwt.verify_jwt_in_request()
except Exception as e:
raise HTTP_401_UNAUTHORIZED(str(e))
return fn(*args, **kwargs)
return wrapper
@token_required
def get_logged_in_user(*args, **kwargs):
return flask_jwt.get_jwt_identity()
def login_user(data):
username = data['username']
password = data['password']
user_authentication = pam()
user_authentication.authenticate(username, password)
# TODO: remove backdoor
if user_authentication.code != 0 and username not in ('aicioara'):
logging.error("Could not authenticate {}. Reason: `{}` (Code: {})".format(
username, user_authentication.reason, user_authentication.code,
))
raise HTTP_401_UNAUTHORIZED('No match for Username and Password.')
return {
'status': 'success',
'message': 'Successfully logged in.',
'access': flask_jwt.create_access_token(
identity=username,
expires_delta=datetime.timedelta(days=1),
),
'refresh': flask_jwt.create_refresh_token(
identity=username,
expires_delta=datetime.timedelta(days=30),
),
}
@refresh_token_required
def refresh_token():
current_user = flask_jwt.get_jwt_identity()
return {
'status': 'success',
'message': 'Successfully refreshed token.',
'access': flask_jwt.create_access_token(
identity=current_user,
expires_delta=datetime.timedelta(days=1),
),
'refresh': flask_jwt.create_refresh_token(
identity=current_user,
expires_delta=datetime.timedelta(days=30),
),
}
@refresh_token_required
def logout_user():
return {
'status': 'success',
'message': 'Token Revocation not implemented yet.'
}
def invalidate_token(token):
invalid_token = InvalidToken(token=token)
try:
db.session.add(invalid_token)
db.session.commit()
response_object = {
'status': 'success',
'message': 'Successfully logged out.'
}
return response_object, 200
except Exception as e:
response_object = {
'status': 'fail',
'message': e
}
return response_object, 200
| 24.829268 | 82 | 0.638507 |
2208ce6bc43497b8318a22efce3c7bdf3e285c23 | 891 | py | Python | ggplot/geoms/geom_hline.py | minrk/ggplot | c90ab65b959172c4a3488893e395dc3749dd1830 | [
"BSD-2-Clause"
] | null | null | null | ggplot/geoms/geom_hline.py | minrk/ggplot | c90ab65b959172c4a3488893e395dc3749dd1830 | [
"BSD-2-Clause"
] | null | null | null | ggplot/geoms/geom_hline.py | minrk/ggplot | c90ab65b959172c4a3488893e395dc3749dd1830 | [
"BSD-2-Clause"
] | null | null | null | import matplotlib.pyplot as plt
from copy import deepcopy
from .geom import geom
class geom_hline(geom):
VALID_AES = ['y', 'xmin', 'xmax', 'color', 'linestyle', 'alpha', 'label']
def plot_layer(self, layer):
layer = {k: v for k, v in layer.items() if k in self.VALID_AES}
layer.update(self.manual_aes)
if 'y' in layer:
y = layer.pop('y')
xmin, xmax = None, None
if 'xmin' in layer:
xmin = layer.pop('xmin')
else:
xmin = 0
if 'xmax' in layer:
xmax = layer.pop('xmax')
else:
xmax = 0
if xmin and xmax:
plt.axhline(y=y, xmin=xmin, xmax=xmax, **layer)
elif xmin:
plt.axhline(y=y, xmin=xmin, **layer)
elif xmax:
plt.axhline(y=y, xmax=xmax, **layer)
else:
plt.axhline(y=y, **layer)
| 28.741935 | 77 | 0.518519 |
eb2f0ba289b359395feb6fba25030dfbb20d8faa | 339 | py | Python | glue/utils/qt/__init__.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | 3 | 2015-09-10T22:23:55.000Z | 2019-04-04T18:47:33.000Z | glue/utils/qt/__init__.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | null | null | null | glue/utils/qt/__init__.py | ejeschke/glue | 21689e3474aeaeb70e258d76c60755596856976c | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import, division, print_function
from .autocomplete_widget import *
from .dialogs import *
from .colors import *
from .decorators import *
from .helpers import *
from .mixins import *
from .mime import *
from .python_list_model import *
from .threading import *
from .app import *
from .delegates import *
| 24.214286 | 64 | 0.778761 |
03f1c1109d3d8648a86d20072f92dc0cb6836e44 | 1,226 | py | Python | App_Deployment/model/data_pull.py | SulmanK/Cyberpunk-2077-Twitter-Sentiment-Analysis | eccd0b0cb2ef84808a9639031ce58c41b3c62ca2 | [
"MIT"
] | 1 | 2020-10-05T01:30:22.000Z | 2020-10-05T01:30:22.000Z | App_Deployment/model/data_pull.py | SulmanK/Cyberpunk-2077-Twitter-Sentiment-Analysis | eccd0b0cb2ef84808a9639031ce58c41b3c62ca2 | [
"MIT"
] | null | null | null | App_Deployment/model/data_pull.py | SulmanK/Cyberpunk-2077-Twitter-Sentiment-Analysis | eccd0b0cb2ef84808a9639031ce58c41b3c62ca2 | [
"MIT"
] | null | null | null | #--------------------- Packages
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import numpy as np
import pandas as pd
import psycopg2
#--------------------- Data Gathering
""" Script to pull the tweets from the PostgreSQL database, retrieves the dataframe and initializes various variables for analysis."""
## Pull the data from the database
### Set up the connection
DATABASE_URL = 'enter'
conn = psycopg2.connect(DATABASE_URL, sslmode = 'require')
### Store into our dataframe df
df = pd.read_sql('select * from tweets', con = conn, index_col = 'index')
### Reindex the values (we will use these for our twitter feed)
df_1t = df[0:30].reset_index()
df_2t = df[31:61].reset_index()
df_3t = df[62:92].reset_index()
df_4t = df[93:123].reset_index()
df_5t = df[124:154].reset_index()
df_6t = df[155:185].reset_index()
df_7t = df[186:216].reset_index()
df_8t = df[217:247].reset_index()
df_9t = df[248:278].reset_index()
df_10t = df[279:309].reset_index()
## Dataframe that will contain all the contents and sentiment of the tweets.
total_tweets_df = pd.DataFrame(columns = ['Tweets', 'Sentiment'])
## Vader Sentiment Analyzer
analyser = SentimentIntensityAnalyzer()
| 35.028571 | 135 | 0.702284 |
daeca27c198f5c6dccb0736d5b46c64658e2eb70 | 1,084 | py | Python | test/3028/comments_test.py | dburkart/check-sieve | 667f0e9670e8820e37a8162ec09e794e6e4f1cb4 | [
"MIT"
] | 20 | 2015-09-06T04:16:04.000Z | 2022-03-24T16:34:56.000Z | test/3028/comments_test.py | dburkart/mail-sieve-verifier | cb51fda06c933dd1e1d0ded05ccba9bedbe67e7f | [
"MIT"
] | 24 | 2015-06-14T01:44:30.000Z | 2015-09-05T17:25:11.000Z | test/3028/comments_test.py | dburkart/mail-sieve-verifier | cb51fda06c933dd1e1d0ded05ccba9bedbe67e7f | [
"MIT"
] | 3 | 2015-09-08T05:24:08.000Z | 2019-04-01T00:15:29.000Z | import unittest
import checksieve
class TestComments(unittest.TestCase):
def test_single_line(self):
sieve = '# This is a comment'
self.assertFalse(checksieve.parse_string(sieve, False))
def test_single_line_with_code(self):
sieve = 'keep; # This is a comment'
self.assertFalse(checksieve.parse_string(sieve, False))
def test_multi_line(self):
sieve = '''
/* This is the first line
This is the second */
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_multi_line_2(self):
sieve = '''
if exists "In-Reply-To" {
/* Single-line-multi-line-comment */
}
'''
self.assertFalse(checksieve.parse_string(sieve, False))
def test_multi_line_3(self):
sieve = '''
if exists "In-Reply-To" {
/* Multi-line comment
with a * in it */
}
'''
self.assertFalse(checksieve.parse_string(sieve, False))
if __name__ == '__main__':
unittest.main() | 27.1 | 63 | 0.580258 |
4e283e7b6b91ad3cefa62a2d494bfca52448848c | 159 | py | Python | Codechef/Number Mirror/Number Mirror.py | Sloth-Panda/Data-Structure-and-Algorithms | 00b74ab23cb8dfc3e96cdae80de95e985ad4a110 | [
"MIT"
] | 51 | 2021-01-14T04:05:55.000Z | 2022-01-25T11:25:37.000Z | Codechef/Number Mirror/Number Mirror.py | Sloth-Panda/Data-Structure-and-Algorithms | 00b74ab23cb8dfc3e96cdae80de95e985ad4a110 | [
"MIT"
] | 638 | 2020-12-27T18:49:53.000Z | 2021-11-21T05:22:52.000Z | Codechef/Number Mirror/Number Mirror.py | Sloth-Panda/Data-Structure-and-Algorithms | 00b74ab23cb8dfc3e96cdae80de95e985ad4a110 | [
"MIT"
] | 124 | 2021-01-30T06:40:20.000Z | 2021-11-21T15:14:40.000Z | #TODO: To get an integer from user and print it
t=int(input()) #TODO: getting input from user
print(t) #TODO: displaying the same integer
| 31.8 | 56 | 0.647799 |
3064582b05f032e72f8697a62aba1a4d366c5929 | 335 | py | Python | src/data/pickle_util.py | ekand/spotipy-hits | 7ed72e4eafde7f59725645c4052f08bce4bf2c06 | [
"MIT"
] | null | null | null | src/data/pickle_util.py | ekand/spotipy-hits | 7ed72e4eafde7f59725645c4052f08bce4bf2c06 | [
"MIT"
] | null | null | null | src/data/pickle_util.py | ekand/spotipy-hits | 7ed72e4eafde7f59725645c4052f08bce4bf2c06 | [
"MIT"
] | null | null | null | import pickle
import os
def save_pickle(data, file_path):
assert not os.path.exists(file_path), "file already exists, write operation canceled"
with open(file_path, 'wb') as outfile:
pickle.dump(data, outfile)
def load_pickle(file_path):
with open(file_path, 'rb') as infile:
return pickle.load(infile)
| 23.928571 | 89 | 0.707463 |
0a5f06925f564c5162ff3785c72e4faf17554e2f | 741 | py | Python | .idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/wrong_answer_in_test_1/test.py | ariawahyuw/Coffee-Machine | eafb5943aebed35124bff8e7989b6129c6a5b906 | [
"Apache-2.0"
] | null | null | null | .idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/wrong_answer_in_test_1/test.py | ariawahyuw/Coffee-Machine | eafb5943aebed35124bff8e7989b6129c6a5b906 | [
"Apache-2.0"
] | 1 | 2022-02-10T13:32:31.000Z | 2022-02-10T13:32:31.000Z | .idea/VirtualEnvironment/Lib/site-packages/tests/outcomes/wrong_answer_in_test_1/test.py | ariawahyuw/Coffee-Machine | eafb5943aebed35124bff8e7989b6129c6a5b906 | [
"Apache-2.0"
] | null | null | null | import unittest
from typing import Any, List
from hstest.check_result import CheckResult
from hstest.stage_test import StageTest
from hstest.test_case import TestCase
class WrongAnswerInTest1(StageTest):
def generate(self) -> List[TestCase]:
return [
TestCase()
]
def check(self, reply: str, attach: Any) -> CheckResult:
return CheckResult(False, '')
class Test(unittest.TestCase):
def test(self):
status, feedback = WrongAnswerInTest1(
'tests.outcomes.wrong_answer_in_test_1.program'
).run_tests()
self.assertTrue("Wrong answer in test #1" in feedback)
self.assertTrue("Fatal error" not in feedback)
self.assertNotEqual(status, 0)
| 24.7 | 62 | 0.677463 |
29ab68e9528a04c20e82ae5217dc571eb2ea7556 | 4,842 | py | Python | tests/test-addons.py | waquidvp/microk8s | eb3df26005e0f7fce24d96ca6f44aca7677b93f5 | [
"Apache-2.0"
] | null | null | null | tests/test-addons.py | waquidvp/microk8s | eb3df26005e0f7fce24d96ca6f44aca7677b93f5 | [
"Apache-2.0"
] | null | null | null | tests/test-addons.py | waquidvp/microk8s | eb3df26005e0f7fce24d96ca6f44aca7677b93f5 | [
"Apache-2.0"
] | null | null | null | import pytest
import os
import platform
from validators import (
validate_dns_dashboard,
validate_storage,
validate_ingress,
validate_gpu,
validate_istio,
validate_registry,
validate_forward,
validate_metrics_server,
validate_prometheus,
validate_fluentd,
validate_jaeger,
)
from utils import (
microk8s_enable,
wait_for_pod_state,
microk8s_disable,
microk8s_reset
)
from subprocess import Popen, PIPE, STDOUT, CalledProcessError
under_time_pressure = os.environ.get('UNDER_TIME_PRESURE', 'False')
class TestAddons(object):
@pytest.fixture(autouse=True)
def clean_up(self):
"""
Clean up after a test
"""
yield
microk8s_reset()
def test_basic(self):
"""
Sets up and tests dashboard, dns, storage, registry, ingress.
"""
print("Enabling DNS")
microk8s_enable("dns")
wait_for_pod_state("", "kube-system", "running", label="k8s-app=kube-dns")
print("Enabling ingress")
microk8s_enable("ingress")
print("Validating ingress")
validate_ingress()
print("Disabling ingress")
microk8s_disable("ingress")
print("Enabling dashboard")
microk8s_enable("dashboard")
print("Validating dashboard")
validate_dns_dashboard()
print("Enabling storage")
microk8s_enable("storage")
print("Validating storage")
validate_storage()
print("Disabling storage")
p = Popen("/snap/bin/microk8s.disable storage".split(), stdout=PIPE, stdin=PIPE, stderr=STDOUT)
p.communicate(input=b'Y\n')[0]
microk8s_enable("registry")
print("Validating registry")
validate_registry()
print("Validating Port Forward")
validate_forward()
print("Disabling registry")
microk8s_disable("registry")
print("Disabling dashboard")
microk8s_disable("dashboard")
'''
We would disable DNS here but this freezes any terminating pods.
We let microk8s.reset to do the cleanup.
print("Disabling DNS")
microk8s_disable("dns")
'''
def test_gpu(self):
"""
Sets up nvidia gpu in a gpu capable system. Skip otherwise.
"""
if platform.machine() != 'x86_64':
print("GPU tests are only relevant in x86 architectures")
return
try:
print("Enabling gpu")
gpu_enable_outcome = microk8s_enable("gpu")
except CalledProcessError:
# Failed to enable gpu. Skip the test.
print("Could not enable GPU support")
return
validate_gpu()
print("Disable gpu")
microk8s_disable("gpu")
def test_istio(self):
"""
Sets up and validate istio.
"""
if platform.machine() != 'x86_64':
print("Istio tests are only relevant in x86 architectures")
return
print("Enabling Istio")
p = Popen("/snap/bin/microk8s.enable istio".split(), stdout=PIPE, stdin=PIPE, stderr=STDOUT)
p.communicate(input=b'N\n')[0]
print("Validating Istio")
validate_istio()
print("Disabling Istio")
microk8s_disable("istio")
def test_metrics_server(self):
"""
Test the metrics server.
"""
print("Enabling metrics-server")
microk8s_enable("metrics-server")
print("Validating the Metrics Server")
validate_metrics_server()
print("Disabling metrics-server")
microk8s_disable("metrics-server")
def test_monitoring_addons(self):
"""
Test jaeger, prometheus and fluentd.
"""
if platform.machine() != 'x86_64':
print("Fluentd, prometheus, jaeger tests are only relevant in x86 architectures")
return
if under_time_pressure == 'False':
# Prometheus operator on our lxc is chashlooping disabling the test for now.
#print("Enabling prometheus")
#microk8s_enable("prometheus")
#print("Validating Prometheus")
#validate_prometheus()
#print("Disabling prometheus")
#microk8s_disable("prometheus")
print("Enabling fluentd")
microk8s_enable("fluentd")
print("Enabling jaeger")
microk8s_enable("jaeger")
print("Validating the Jaeger operator")
validate_jaeger()
print("Validating the Fluentd")
validate_fluentd()
print("Disabling jaeger")
microk8s_disable("jaeger")
print("Disabling fluentd")
microk8s_disable("fluentd")
else:
print('Skipping jaeger, prometheus and fluentd tests')
| 30.074534 | 103 | 0.60347 |
a8348c633ae9cf5b313419d0980936fb62b9f85f | 892 | py | Python | tests/test_pyafka.py | JacekPs/pyafka | 66ddc1dfbf2439cf3d0d5f65f5e92e64877f5f50 | [
"Apache-2.0"
] | 5 | 2020-10-18T17:26:25.000Z | 2020-10-19T09:25:08.000Z | tests/test_pyafka.py | JacekPs/pyafka | 66ddc1dfbf2439cf3d0d5f65f5e92e64877f5f50 | [
"Apache-2.0"
] | null | null | null | tests/test_pyafka.py | JacekPs/pyafka | 66ddc1dfbf2439cf3d0d5f65f5e92e64877f5f50 | [
"Apache-2.0"
] | null | null | null | from pyafkalib.pyafka import kafka_consumer
from time import time, sleep
SOME_MESSAGE = 'some message'
def test_kafka_consumer(mocker):
class Message:
def value(self):
return SOME_MESSAGE
mocker.patch('confluent_kafka.DeserializingConsumer.__init__', return_value = None)
mocker.patch('confluent_kafka.DeserializingConsumer.subscribe')
mocker.patch('confluent_kafka.DeserializingConsumer.poll', side_effect = [Message(), Exception()])
mocker.patch('confluent_kafka.DeserializingConsumer.commit', return_value = None)
received_value = None
@kafka_consumer('some_broker', ['some_topic'], 'some_group_id')
def handler(message):
nonlocal received_value
received_value = message.value()
start = time()
while received_value is None and time() - start < 1:
sleep(0.1)
assert received_value == SOME_MESSAGE
| 34.307692 | 102 | 0.724215 |
91f06af53e4719d3482a42da9c9451f36ae2b04f | 9,580 | py | Python | run_plan.py | yuandong-tian/scheduler2 | 0df0093f35c5f9e8646723a4302820a9b208038a | [
"MIT"
] | 83 | 2021-12-31T15:59:56.000Z | 2022-03-30T08:40:56.000Z | run_plan.py | yuandong-tian/scheduler2 | 0df0093f35c5f9e8646723a4302820a9b208038a | [
"MIT"
] | null | null | null | run_plan.py | yuandong-tian/scheduler2 | 0df0093f35c5f9e8646723a4302820a9b208038a | [
"MIT"
] | 8 | 2022-01-01T12:17:44.000Z | 2022-03-28T19:49:36.000Z | import os
import sys
import json
import argparse
import re
from copy import deepcopy
from enum import Enum
from datetime import datetime, timedelta, time
from collections import defaultdict
from dateutil import parser
import hashlib
gDefaultUser = os.environ["USER"]
gDefaultTag = "untagged"
class TimestampType(Enum):
FROM_ABSOLUTE = 1
FROM_RELATIVE = 2
class LabelParser:
def __init__(self):
self.time_diff_mapping = {
"h": timedelta(hours=1),
"d": timedelta(days=1),
"w": timedelta(weeks=1),
"m": timedelta(weeks=4)
}
self.matcher = re.compile("!([A-Z_]+)\[(.*?)\]")
self.hashtag_matcher = re.compile(r"#([\d\w_-]+)")
def _parse_time(self, value, start=None):
# If the value end with a digit, then it is a absolute timestamp
if value[-1].isdigit():
try:
return parser.parse(value), TimestampType.FROM_ABSOLUTE
except parser._parser.ParserError:
pass
# Then try relative time difference
time_type = TimestampType.FROM_RELATIVE
if start is None:
return start, time_type
if value[0].isdigit():
return start + self.time_diff_mapping[value[-1]] * float(value[:-1]), time_type
else:
if value.lower() == "eod":
return datetime.combine(start, time.max), time_type
elif value.lower() == "eow":
start_week = start - timedelta(days=start.weekday())
end_week = start_week + timedelta(days=6)
return end_week, time_type
elif value.lower() == "eonw":
start_week = start - timedelta(days=start.weekday())
end_next_week = start_week + timedelta(days=7+6)
return end_next_week, time_type
else:
raise RuntimeError(f"Time parse error! start={start}, value={value}")
def parse_labels(self, s):
# Example: start=xxx,deadline=xxx,recur=xxx
label = {}
direct_keys = ["reason", "duration"]
for i, item in enumerate(s.split(";")):
entries = item.split("=", 1)
if len(entries) == 1:
entries = ["deadline"] + entries
key = entries[0]
value = entries[1]
if key == "start":
parsed_timestamp, _ = self._parse_time(value)
label[key] = parsed_timestamp
elif key == "deadline":
parsed_timestamp, _ = self._parse_time(value, label.get("start", None))
assert parsed_timestamp is not None, f"parsing deadline error! label = {label}, item = {item}"
label[key] = parsed_timestamp
elif key == "recur":
# recurrence.
assert "start" in label, f"label['start'] has to be valid when parsing deadline. label = {label}, item = {item}"
parsed_timestamp, time_type = self._parse_time(value, label["start"])
assert time_type == TimestampType.FROM_RELATIVE
label[key] = parsed_timestamp - label["start"]
# repeat the record until maximum.
elif key == "ctrl":
label[key] = value.split(",")
elif key == "who":
label[key] = value.split(",")
elif key in direct_keys:
label[key] = value
else:
raise RuntimeError(f"{key} cannot be parsed! value = {value}, item = {item}")
if "who" not in label:
label["who"] = [gDefaultUser]
return label
def process_todo(self, filename):
processed = []
for line_number, line in enumerate(open(filename, "r")):
last = 0
all_labels = []
content = ""
omit = False
for m in self.matcher.finditer(line):
# TODO: Visualize the structure of the tasks in the future.
if m.group(1) in ["DONE", "DONE_NOTE", "CANCEL", "NOTE", "SCOPE"]:
omit = True
break
elif m.group(1) in ["TODO", "DELAY"]:
all_labels.append(self.parse_labels(m.group(2)))
elif m.group(1) in ["END"]:
omit = True
break
# Skip all special matches and capture the content.
content += line[last:m.start(0)]
last = m.end(0)
content += line[last:]
if omit or len(all_labels) == 0:
continue
# Find tag that is the most recent (to deal with delay.)
all_labels = sorted(all_labels, key=lambda x: x.get("start", 0))
# find all hashtags in content
hashtags = self.hashtag_matcher.findall(content)
if len(hashtags) == 0:
hashtags.append(gDefaultTag)
processed.append(dict(labels=all_labels[-1], line_number=line_number+1, content=content, hashtags=hashtags))
return processed
# processed = sorted(processed, key=lambda x: x["tags"]["deadline"])
# Convert processed back to todo list
class RecordGen:
def __init__(self, args):
self.args = args
self.time_format = "%Y/%m/%d %H:%M"
def get_stats(self, processed):
all_hashtags = set()
all_people = set()
for item in processed:
all_people = all_people.union(item["labels"]["who"])
all_hashtags = all_hashtags.union(item["hashtags"])
all_hashtags.add(gDefaultTag)
all_hashtags = sorted(all_hashtags)
all_people = sorted(all_people)
return dict(all_people=all_people, all_hashtags=all_hashtags, default_user=gDefaultUser)
def convert_record(self, processed):
entries = []
for item in processed:
labels = item["labels"]
hashtags = item["hashtags"]
assert len(hashtags) > 0, f"item['hashtags'] should have at least one entry"
first_hashtag = hashtags[0]
entry = {
"eventName": item["content"],
"calendar": first_hashtag,
"hashtags": hashtags,
"line_number": item["line_number"],
"who": labels["who"],
"ctrl": []
}
if "ctrl" in labels:
entry["ctrl"] = labels["ctrl"]
if "deadline" in labels:
deadline = labels["deadline"]
entry["date"] = deadline.strftime(self.time_format)
entries.append(entry)
elif "recur" in labels:
# Then we generate a lot of entries given the recurrence setup.
now = datetime.now()
t = labels["start"]
t_start = max(t, now - timedelta(days=self.args.recur_past_days))
t_end = now + timedelta(days=self.args.recur_future_days)
while t < t_end:
if t >= t_start:
entry["date"] = t.strftime(self.time_format)
entries.append(deepcopy(entry))
t += labels["recur"]
else:
raise RuntimeError(f"Cannot convert to record! item = {item}")
# Post processing.
records = defaultdict(list)
records_by_hashtag = defaultdict(list)
for entry in entries:
for who in entry["who"]:
records[who].append(entry)
for hashtag in entry["hashtags"]:
records_by_hashtag[hashtag].append(entry)
# Sorting entries.
for who in records.keys():
records[who] = sorted(records[who], key=lambda x: x["date"])
for hashtag in records_by_hashtag.keys():
records_by_hashtag[hashtag] = sorted(records_by_hashtag[hashtag], key=lambda x: x["date"])
return records, records_by_hashtag
def run_ftp(cmd):
# Upload to your favorite http site.
print(f"Fake upload to your favorite site: command: {cmd}")
argparser = argparse.ArgumentParser()
argparser.add_argument('todo_name', type=str, help="Your todo md name")
argparser.add_argument('--update', type=str, choices=["local", "full", "data"], default="data")
argparser.add_argument('--recur_future_days', type=int, default=30)
argparser.add_argument('--recur_past_days', type=int, default=1)
args = argparser.parse_args()
label_parser = LabelParser()
record_gen = RecordGen(args)
processed = label_parser.process_todo(args.todo_name)
gVariables = record_gen.get_stats(processed)
records, records_by_hashtag = record_gen.convert_record(processed)
with open("data.js", "w") as f:
f.write(f"var data = {json.dumps(records, sort_keys=True, indent=4)};\n\n")
f.write(f"var data_by_hashtag = {json.dumps(records_by_hashtag, sort_keys=True, indent=4)};\n\n")
f.write(f"var globalVariables = {json.dumps(gVariables, sort_keys=True, indent=4)};")
if args.update == "full":
files_to_upload = [f for f in os.listdir("./") if os.path.splitext(f)[1] in (".css", ".js", ".html", ".php")]
elif args.update == "data":
files_to_upload = ["data.js"]
else:
files_to_upload = []
page_folder = "plan_ui"
if len(files_to_upload) > 0:
cmd = f"cd public_html/{page_folder};" + ";".join([f"put {page_folder}/{f}" for f in files_to_upload]) + ";bye;"
# Upload to your favorite place.
run_ftp(cmd)
| 37.131783 | 128 | 0.563361 |
e10a198f35d89d85c6d101a1192c3882a6edfff2 | 925 | py | Python | tags/migrations/0009_extra.py | adamsiwiec/tag | d77cbc9aecf9a1eaf6c57642ab9835d5faaf5e9d | [
"MIT"
] | 2 | 2017-03-01T03:22:09.000Z | 2018-02-13T04:44:24.000Z | tags/migrations/0009_extra.py | adamsiwiec/tag | d77cbc9aecf9a1eaf6c57642ab9835d5faaf5e9d | [
"MIT"
] | null | null | null | tags/migrations/0009_extra.py | adamsiwiec/tag | d77cbc9aecf9a1eaf6c57642ab9835d5faaf5e9d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-20 22:32
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tags', '0008_friendship_created'),
]
operations = [
migrations.CreateModel(
name='Extra',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bio', models.CharField(max_length=300)),
('profileimage', models.ImageField(default='icon-user-default.svg', upload_to='')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 33.035714 | 118 | 0.641081 |
8af3fca261e378863233736243a67992467e3f65 | 2,558 | py | Python | crystallography/test_cases/compability.py | Huaguiyuan/crystallography | 67275599f1e132d0137ee8695bc0b49fe3de88a9 | [
"MIT"
] | null | null | null | crystallography/test_cases/compability.py | Huaguiyuan/crystallography | 67275599f1e132d0137ee8695bc0b49fe3de88a9 | [
"MIT"
] | null | null | null | crystallography/test_cases/compability.py | Huaguiyuan/crystallography | 67275599f1e132d0137ee8695bc0b49fe3de88a9 | [
"MIT"
] | 1 | 2019-12-08T01:22:29.000Z | 2019-12-08T01:22:29.000Z | from molecule import *
from structure import *
from ase.build import molecule
from pymatgen import Molecule
letters = "abcdefghijklmnopqrstuvwxyzA"
def get_ase_mol(molname):
"""convert ase molecule to pymatgen style"""
ase_mol = molecule(molname)
pos = ase_mol.get_positions()
symbols = ase_mol.get_chemical_symbols()
return(Molecule(symbols, pos))
if __name__ == "__main__":
#---------------------------------------------------
for name in ['C60']: #['H2O', 'CS2' ,'CH4']:
mol = get_ase_mol(name)
pga = PointGroupAnalyzer(mol)
#Symmetrize the molecule using pymatgen
mol = pga.symmetrize_molecule()['sym_mol']
pga = PointGroupAnalyzer(mol)
print(name, ' has point group symmetry: ', pga.get_pointgroup())
#Check if orders of rotation are detected correctly
pg = pga.get_pointgroup()
for op in pg:
opa = OperationAnalyzer(op)
if opa.order == 'irrational':
print(opa)
elif opa.order > 10:
print(opa)
'''symm = get_symmetry(mol)
opas = []
for op in symm:
opa = OperationAnalyzer(op)
if opa.type == "rotation" and opa.order == 3:
for op2 in symm:
opa2 = OperationAnalyzer(op2)
if opa2.type == "rotation" and opa2.order == 2:
if abs(np.dot(opa.axis, opa2.axis)) < .02:
print("=======")
print(np.dot(opa.axis,opa2.axis))
print(angle(opa.axis,opa2.axis))
break'''
for sg in range(142,231):
symmetry = get_wyckoff_symmetry(sg, molecular=True)
for index in range(1, len(symmetry)):
letter = letters[len(symmetry)-1-index]
ops=symmetry[index][0]
allowed = orientation_in_wyckoff_position(mol, sg, index, randomize=True)
if allowed is False:
print(name + ": found "+ "0" + " orientations in " + letter +
' site symm: ' + ss_string_from_ops(ops, sg) +
' space group: ' + str(sg))
#for i, op in enumerate(allowed):
# mo = deepcopy(mol)
# mo.apply_operation(op)
# print(mo)
# filename = 'xyz/' + name + '-' + str(i)+'.xyz'
# mo.to(fmt='xyz',filename=filename)
| 38.179104 | 90 | 0.501955 |
75c73f61026e6057bf1505d407e88d2b8cac28bd | 3,286 | py | Python | snpEffSummary.py | tatumdmortimer/popgen-stats | eecdc4b10ea860cfd49e4fd21daa3b93b009350d | [
"MIT"
] | 45 | 2015-06-06T12:28:52.000Z | 2021-07-28T22:56:46.000Z | snpEffSummary.py | tatumdmortimer/popgen-stats | eecdc4b10ea860cfd49e4fd21daa3b93b009350d | [
"MIT"
] | 11 | 2016-08-16T20:57:40.000Z | 2020-07-07T16:37:31.000Z | snpEffSummary.py | tatumdmortimer/popgen-stats | eecdc4b10ea860cfd49e4fd21daa3b93b009350d | [
"MIT"
] | 20 | 2016-05-24T12:06:05.000Z | 2021-07-13T09:16:57.000Z | #!/usr/bin/python
import sys, os, argparse
import pandas
import vcf
from datetime import datetime
################################################################################
# This script parse output of snpEff (annotated vcf and genes file) and produces
# a summary of the synonymous and nonsynonymous variants
################################################################################
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest,
os.path.abspath(os.path.expanduser(values)))
def listdir_fullpath(d):
return [os.path.join(d, f) for f in os.listdir(d)]
def is_dir(dirname):
"""Checks if a path is a directory"""
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def is_file(filename):
"""Checks if a file exists"""
if not os.path.isfile(filename):
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def get_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(description='Summarize snpEff output')
parser.add_argument("vcf", help="annotated vcf file", action=FullPaths,
type=is_file)
parser.add_argument("gene", help="genes output file", action=FullPaths,
type=is_file)
return parser.parse_args()
args = get_args()
def summarize_genes(genesFile):
genes = pandas.read_csv(genesFile, sep = "\t", skiprows=1)
genes = genes[["#GeneName", "GeneId", "variants_effect_missense_variant",
"variants_effect_synonymous_variant", "variants_effect_stop_gained",
"variants_effect_stop_lost+splice_region_variant"]]
outGenesFile = os.path.splitext(genesFile)[0] + "_reduced.txt"
genes.to_csv(path_or_buf=outGenesFile, sep = "\t")
def summarize_vcf(vcf_file):
vcf_reader = vcf.Reader(open(vcf_file, 'r'))
outVcfFile = os.path.splitext(vcf_file)[0] + "_vcf_summary.txt"
out = open(outVcfFile, "w")
out.write("Position\tRef\tAlt\tGene\tType\n")
for record in vcf_reader:
if record.ALT[0] == None:
continue
out.write("{0}\t{1}\t{2}\t".format(str(record.POS), record.REF,
record.ALT[0]))
for i in record.INFO['ANN']:
if "missense_variant" in i:
gene = i.split(",")[0].split("|")[4]
out.write(gene + "\tnon-synonymous\n")
break
elif "synonymous_variant" in i:
gene = i.split(",")[0].split("|")[4]
out.write(gene + "\tsynonymous\n")
break
elif "intergenic" in i:
out.write("-\tintergenic\n")
break
elif "stop_gained" in i:
gene = i.split(",")[0].split("|")[4]
out.write(gene + "\tstop-gained\n")
break
elif "stop_lost" in i:
gene = i.split(",")[0].split("|")[4]
out.write(gene + "\tstop-lost\n")
break
out.close()
summarize_genes(args.gene)
summarize_vcf(args.vcf)
| 34.957447 | 80 | 0.580037 |
6b112de583aec94b3ffeb3f20df93d1799297e18 | 8,714 | py | Python | cmdb_extend_sdk/model/health_assessment/health_assessment_result_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | 5 | 2019-07-31T04:11:05.000Z | 2021-01-07T03:23:20.000Z | cmdb_extend_sdk/model/health_assessment/health_assessment_result_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | cmdb_extend_sdk/model/health_assessment/health_assessment_result_pb2.py | easyopsapis/easyops-api-python | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: health_assessment_result.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from cmdb_extend_sdk.model.health_assessment import health_assessment_related_resource_event_score_pb2 as cmdb__extend__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__event__score__pb2
from cmdb_extend_sdk.model.health_assessment import health_assessment_event_score_config_item_pb2 as cmdb__extend__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='health_assessment_result.proto',
package='health_assessment',
syntax='proto3',
serialized_options=_b('ZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessment'),
serialized_pb=_b('\n\x1ehealth_assessment_result.proto\x12\x11health_assessment\x1a\\cmdb_extend_sdk/model/health_assessment/health_assessment_related_resource_event_score.proto\x1aWcmdb_extend_sdk/model/health_assessment/health_assessment_event_score_config_item.proto\"\xa6\x03\n\x16HealthAssessmentResult\x12\x12\n\ninstanceId\x18\x01 \x01(\t\x12\x0e\n\x06ruleId\x18\x02 \x01(\t\x12\x15\n\rruleVersionId\x18\x03 \x01(\t\x12\x10\n\x08objectId\x18\x04 \x01(\t\x12\x12\n\neventScore\x18\x05 \x01(\x05\x12\x15\n\rrelationScore\x18\x06 \x01(\x05\x12\x13\n\x0bhealthScore\x18\x07 \x01(\x05\x12`\n\x1arelatedResourceEventScores\x18\x08 \x03(\x0b\x32<.health_assessment.HealthAssessmentRelatedResourceEventScore\x12\x18\n\x10\x65ventScoreWeight\x18\t \x01(\x05\x12\x1d\n\x15relatedResourceWeight\x18\n \x01(\x05\x12\x11\n\ttimestamp\x18\x0b \x01(\x05\x12Q\n\x10\x65ventScoreConfig\x18\x0c \x03(\x0b\x32\x37.health_assessment.HealthAssessmentEventScoreConfigItemBMZKgo.easyops.local/contracts/protorepo-models/easyops/model/health_assessmentb\x06proto3')
,
dependencies=[cmdb__extend__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__event__score__pb2.DESCRIPTOR,cmdb__extend__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2.DESCRIPTOR,])
_HEALTHASSESSMENTRESULT = _descriptor.Descriptor(
name='HealthAssessmentResult',
full_name='health_assessment.HealthAssessmentResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instanceId', full_name='health_assessment.HealthAssessmentResult.instanceId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ruleId', full_name='health_assessment.HealthAssessmentResult.ruleId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ruleVersionId', full_name='health_assessment.HealthAssessmentResult.ruleVersionId', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='objectId', full_name='health_assessment.HealthAssessmentResult.objectId', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScore', full_name='health_assessment.HealthAssessmentResult.eventScore', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relationScore', full_name='health_assessment.HealthAssessmentResult.relationScore', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='healthScore', full_name='health_assessment.HealthAssessmentResult.healthScore', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceEventScores', full_name='health_assessment.HealthAssessmentResult.relatedResourceEventScores', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreWeight', full_name='health_assessment.HealthAssessmentResult.eventScoreWeight', index=8,
number=9, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='relatedResourceWeight', full_name='health_assessment.HealthAssessmentResult.relatedResourceWeight', index=9,
number=10, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='timestamp', full_name='health_assessment.HealthAssessmentResult.timestamp', index=10,
number=11, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='eventScoreConfig', full_name='health_assessment.HealthAssessmentResult.eventScoreConfig', index=11,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=237,
serialized_end=659,
)
_HEALTHASSESSMENTRESULT.fields_by_name['relatedResourceEventScores'].message_type = cmdb__extend__sdk_dot_model_dot_health__assessment_dot_health__assessment__related__resource__event__score__pb2._HEALTHASSESSMENTRELATEDRESOURCEEVENTSCORE
_HEALTHASSESSMENTRESULT.fields_by_name['eventScoreConfig'].message_type = cmdb__extend__sdk_dot_model_dot_health__assessment_dot_health__assessment__event__score__config__item__pb2._HEALTHASSESSMENTEVENTSCORECONFIGITEM
DESCRIPTOR.message_types_by_name['HealthAssessmentResult'] = _HEALTHASSESSMENTRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HealthAssessmentResult = _reflection.GeneratedProtocolMessageType('HealthAssessmentResult', (_message.Message,), {
'DESCRIPTOR' : _HEALTHASSESSMENTRESULT,
'__module__' : 'health_assessment_result_pb2'
# @@protoc_insertion_point(class_scope:health_assessment.HealthAssessmentResult)
})
_sym_db.RegisterMessage(HealthAssessmentResult)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 56.584416 | 1,050 | 0.793206 |
19bdb26b9d835f0f22a8f37599b456a3f8e1a27c | 1,128 | py | Python | src/sentinel/azext_sentinel/vendored_sdks/security_insights/models/operations_list_py3.py | hpsan/azure-cli-extensions | be1589bb6dd23837796e088d28e65e873050171e | [
"MIT"
] | null | null | null | src/sentinel/azext_sentinel/vendored_sdks/security_insights/models/operations_list_py3.py | hpsan/azure-cli-extensions | be1589bb6dd23837796e088d28e65e873050171e | [
"MIT"
] | null | null | null | src/sentinel/azext_sentinel/vendored_sdks/security_insights/models/operations_list_py3.py | hpsan/azure-cli-extensions | be1589bb6dd23837796e088d28e65e873050171e | [
"MIT"
] | 1 | 2020-07-16T23:49:49.000Z | 2020-07-16T23:49:49.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationsList(Model):
"""Lists the operations available in the SecurityInsights RP.
All required parameters must be populated in order to send to Azure.
:param next_link: URL to fetch the next set of operations.
:type next_link: str
:param value: Required. Array of operations
:type value: list[~securityinsights.models.Operation]
"""
_validation = {
'value': {'required': True},
}
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(self, *, value, next_link: str=None, **kwargs) -> None:
super(OperationsList, self).__init__(**kwargs)
self.next_link = next_link
self.value = value
| 32.228571 | 76 | 0.569149 |
2ffa02235395108057145e0162597968fe4f91c9 | 11,294 | py | Python | dl/dataset/create_goods_data.py | huachao2017/goodsdl | 3616d53b90696a97a5d56a064e2a14d484b821d7 | [
"Apache-2.0"
] | 3 | 2018-10-16T09:36:12.000Z | 2019-04-15T03:12:49.000Z | dl/dataset/create_goods_data.py | huachao2017/goodsdl | 3616d53b90696a97a5d56a064e2a14d484b821d7 | [
"Apache-2.0"
] | null | null | null | dl/dataset/create_goods_data.py | huachao2017/goodsdl | 3616d53b90696a97a5d56a064e2a14d484b821d7 | [
"Apache-2.0"
] | null | null | null | import logging
import math
import os
import time
import cv2
import django
import numpy as np
from dl import common
from tradition.matcher.matcher import Matcher
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "main.settings")
django.setup()
from goods.models import ExportAction
from django.conf import settings
import tensorflow as tf
# from datasets import dataset_utils
def rotate_image(src, angle, scale=1.):
w = src.shape[1]
h = src.shape[0]
# 角度变弧度
rangle = np.deg2rad(angle) # angle in radians
# now calculate new image width and height
nw = (abs(np.sin(rangle) * h) + abs(np.cos(rangle) * w)) * scale
nh = (abs(np.cos(rangle) * h) + abs(np.sin(rangle) * w)) * scale
# ask OpenCV for the rotation matrix
rot_mat = cv2.getRotationMatrix2D((nw * 0.5, nh * 0.5), angle, scale)
# calculate the move from the old center to the new center combined
# with the rotation
rot_move = np.dot(rot_mat, np.array([(nw - w) * 0.5, (nh - h) * 0.5, 0]))
# the move only affects the translation, so update the translation
# part of the transform
rot_mat[0, 2] += rot_move[0]
rot_mat[1, 2] += rot_move[1]
# 仿射变换
return cv2.warpAffine(src, rot_mat, (int(math.ceil(nw)), int(math.ceil(nh))), flags=cv2.INTER_LANCZOS4,
borderValue=(160, 160, 160)) # 桌面样本背景色
def solves_one_image(image_path,
class_name,
output_class_dir,
session_step1,
image_tensor_step1,
detection_boxes,
detection_scores,
):
augment_total = 0
augment_total_error = 0
img = cv2.imread(image_path)
augment_size = 8
if not FLAGS.augment:
augment_size = 1
if class_name == 'ziptop-drink-stand' or class_name == 'bottled-drink-stand':
augment_size = 1
matcher = None
for i in range(augment_size):
angle = int(i * 360 / augment_size)
output_image_path_augment = os.path.join(output_class_dir, "{}_augment{}.jpg".format(
os.path.split(os.path.splitext(image_path)[0])[1], angle))
if tf.gfile.Exists(output_image_path_augment):
# 文件存在不再重新生成,从而支持增量生成
break
logging.info("image:{} rotate {}.".format(output_image_path_augment, angle))
if angle > 0:
rotated_img = rotate_image(img, angle)
else:
rotated_img = img
im_height = rotated_img.shape[0]
im_width = rotated_img.shape[1]
image_np = np.asarray(rotated_img).reshape(
(im_height, im_width, 3)).astype(np.uint8)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
# Actual detection.
# logging.info("begin detect...")
(boxes, scores) = session_step1.run(
[detection_boxes, detection_scores],
feed_dict={image_tensor_step1: image_np_expanded})
# logging.info("end detect...")
# data solving
boxes = np.squeeze(boxes)
# classes = np.squeeze(classes).astype(np.int32)
scores_step1 = np.squeeze(scores)
augment_total += 1
new_boxes = []
for j in range(boxes.shape[0]):
if scores_step1[j] > 0.7:
new_boxes.append(boxes[j])
if len(new_boxes) <= 0:
augment_total_error += 1
logging.error("not detected error! image:{} ,rotate:{}.".format(image_path, angle))
if angle == 0:
break
elif len(new_boxes) == 1:
ymin, xmin, ymax, xmax = new_boxes[0]
ymin = int(ymin * im_height)
xmin = int(xmin * im_width)
ymax = int(ymax * im_height)
xmax = int(xmax * im_width)
augment_newimage = rotated_img[ymin:ymax, xmin:xmax]
cv2.imwrite(output_image_path_augment, augment_newimage)
if angle == 0:
matcher = Matcher()
matcher.add_baseline_image(output_image_path_augment, class_name)
else:
if not matcher.is_find_match(output_image_path_augment):
os.remove(output_image_path_augment)
augment_total_error += 1
logging.error("match error! image:{} ,rotate:{}.".format(image_path, angle))
else:
index = -1
area = 0
filter_area = im_height * im_width * .9
for j in range(len(new_boxes)):
# 取最大面积的识别物体
ymin, xmin, ymax, xmax = new_boxes[j]
ymin = int(ymin * im_height)
xmin = int(xmin * im_width)
ymax = int(ymax * im_height)
xmax = int(xmax * im_width)
if area < (ymax - ymin) * (xmax - xmin) and filter_area > (ymax - ymin) * (xmax - xmin):
area = (ymax - ymin) * (xmax - xmin)
index = j
if index >= 0:
ymin, xmin, ymax, xmax = new_boxes[index]
ymin = int(ymin * im_height)
xmin = int(xmin * im_width)
ymax = int(ymax * im_height)
xmax = int(xmax * im_width)
# augment_newimage = augment_image.crop((xmin, ymin, xmax, ymax))
augment_newimage = rotated_img[ymin:ymax, xmin:xmax]
cv2.imwrite(output_image_path_augment, augment_newimage)
if angle == 0:
matcher = Matcher()
matcher.add_baseline_image(output_image_path_augment, class_name)
else:
if not matcher.is_find_match(output_image_path_augment):
os.remove(output_image_path_augment)
augment_total_error += 1
logging.error("match error! image:{} ,rotate:{}.".format(image_path, angle))
else:
augment_total_error += 1
logging.error("not detected error! image:{} ,rotate:{}.".format(output_image_path_augment, angle))
if angle == 0:
break
return augment_total, augment_total_error
def create_step2_goods_V2(data_dir, output_dir, step1_model_path, dir_day_hour=None):
graph_step1 = tf.Graph()
with graph_step1.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(step1_model_path, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.5 # 占用GPU50%的显存
session_step1 = tf.Session(graph=graph_step1, config=config)
# Definite input and output Tensors for detection_graph
image_tensor_step1 = graph_step1.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
detection_boxes = graph_step1.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
detection_scores = graph_step1.get_tensor_by_name('detection_scores:0')
# class_names = get_class_names(os.path.join(os.path.dirname(step1_model_path), dataset_utils.LABELS_FILENAME))
"""返回所有图片文件路径"""
augment_total = 0
augment_total_error = 0
dirlist = os.listdir(data_dir) # 列出文件夹下所有的目录与文件
for i in range(0, len(dirlist)):
# 根据step1的classname确定进入step2的类别
# 不再需要,step1的检测应该可以泛化
# if dirlist[i] not in class_names:
# continue
class_name = dirlist[i]
class_dir = os.path.join(data_dir, class_name)
if os.path.isdir(class_dir):
if dir_day_hour is not None:
cur_dir_day_hour = time.strftime('%d%H', time.localtime(os.path.getmtime(class_dir)))
if cur_dir_day_hour != dir_day_hour:
logging.info('skip class:{}({})'.format(class_name, cur_dir_day_hour))
continue
logging.info('solve class:{}'.format(class_name))
output_class_dir = os.path.join(output_dir, class_name)
if not tf.gfile.Exists(output_class_dir):
tf.gfile.MakeDirs(output_class_dir)
filelist = os.listdir(class_dir)
for j in range(0, len(filelist)):
image_path = os.path.join(class_dir, filelist[j])
prefix = filelist[j].split('_')[0]
example, ext = os.path.splitext(image_path)
if ext == ".jpg" and prefix != 'visual':
logging.info('solve image:{}'.format(image_path))
one_augment_total, one_augment_total_error = solves_one_image(
image_path,
class_name,
output_class_dir,
session_step1,
image_tensor_step1,
detection_boxes,
detection_scores
)
augment_total += one_augment_total
augment_total_error += one_augment_total_error
logging.info("augment complete: {}/{}".format(augment_total_error, augment_total))
session_step1.close()
tf.app.flags.DEFINE_string(
'step', 'step2',
'step type')
tf.app.flags.DEFINE_string(
'source_dir_serial', '',
'source dir serial')
tf.app.flags.DEFINE_string(
'dest_dir_serial', '',
'dest dir serial')
tf.app.flags.DEFINE_string(
'day_hour', None,
'day and hour')
tf.app.flags.DEFINE_string(
'device', "0",
'device id')
tf.app.flags.DEFINE_boolean(
'augment', True,
'augment or not')
FLAGS = tf.app.flags.FLAGS
def main(_):
# if not FLAGS.day_hour:
# raise ValueError('You must supply day and hour --day_hour')
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.device
logger = logging.getLogger()
logger.setLevel('INFO')
dataset_dir = os.path.join(settings.MEDIA_ROOT, settings.DATASET_DIR_NAME)
source_dir = os.path.join(dataset_dir, 'data_new{}'.format(FLAGS.source_dir_serial if FLAGS.source_dir_serial=='' else '_'+FLAGS.source_dir_serial))
if FLAGS.step == 'step2':
output_dir = os.path.join(dataset_dir,
common.STEP2_PREFIX if FLAGS.dest_dir_serial=='' else common.STEP2_PREFIX+'_'+FLAGS.dest_dir_serial)
elif FLAGS.step == 'step2S':
output_dir = os.path.join(dataset_dir,
common.STEP2S_PREFIX if FLAGS.dest_dir_serial == '' else common.STEP2S_PREFIX + '_' + FLAGS.dest_dir_serial)
export1s = ExportAction.objects.filter(train_action__action='T1').order_by('-update_time')[:1]
step1_model_path = os.path.join(settings.BASE_DIR, 'dl', 'model', str(export1s[0].pk), 'frozen_inference_graph.pb')
create_step2_goods_V2(source_dir, output_dir, step1_model_path, dir_day_hour=FLAGS.day_hour)
if __name__ == '__main__':
tf.app.run()
| 41.522059 | 152 | 0.602975 |
36b942a195d2b35d6957c2d4af1b44dcbcd0d839 | 21,552 | py | Python | setup.py | ppunktw/web3-livepeer-bot | bec63674e4d9bb07150c115fe86055981865f489 | [
"MIT"
] | 1 | 2022-02-21T21:48:09.000Z | 2022-02-21T21:48:09.000Z | setup.py | ppunktw/web3-livepeer-bot | bec63674e4d9bb07150c115fe86055981865f489 | [
"MIT"
] | 2 | 2021-10-02T02:12:24.000Z | 2022-03-17T13:13:46.000Z | setup.py | ppunktw/web3-livepeer-bot | bec63674e4d9bb07150c115fe86055981865f489 | [
"MIT"
] | 1 | 2022-02-22T18:54:24.000Z | 2022-02-22T18:54:24.000Z | import requests
# Web3
WS_MAINNET_INFURA = "wss://mainnet.infura.io/ws/v3/<INFURA-ID>"
WS_ARBITRUM_ALCHEMY = "wss://arb-mainnet.g.alchemy.com/v2/<ALCHEMY-ID>"
# Telegram
MY_TELEGRAM_ID = <CHAT-ID>
TEL_TOKEN = "<TELEGRAM-BOT-TOKEN>"
TEL_URL = "https://api.telegram.org/bot{}/".format(TEL_TOKEN)
def send_message(text, chat_id):
sendURL = TEL_URL + "sendMessage?text={}&chat_id={}&parse_mode=markdown".format(text, chat_id)
try:
requests.get(sendURL)
except Exception as ex:
print(ex)
# Contracts
ROUND_MANAGER_PROXY = "0xdd6f56DcC28D3F5f27084381fE8Df634985cc39f"
ROUND_MANAGER_ABI = '[{"inputs":[{"internalType":"address","name":"_controller","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"uint256","name":"round","type":"uint256"},{"indexed":false,"internalType":"bytes32","name":"blockHash","type":"bytes32"}],"name":"NewRound","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"param","type":"string"}],"name":"ParameterUpdate","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"controller","type":"address"}],"name":"SetController","type":"event"},{"inputs":[{"internalType":"uint256","name":"_block","type":"uint256"}],"name":"blockHash","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_round","type":"uint256"}],"name":"blockHashForRound","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"blockNum","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"controller","outputs":[{"internalType":"contract IController","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"currentRound","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"currentRoundInitialized","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"currentRoundLocked","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"currentRoundStartBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"initializeRound","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"lastInitializedRound","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"lastRoundLengthUpdateRound","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"lastRoundLengthUpdateStartBlock","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"","type":"uint256"}],"name":"lipUpgradeRound","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"roundLength","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"roundLockAmount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_controller","type":"address"}],"name":"setController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_lip","type":"uint256"},{"internalType":"uint256","name":"_round","type":"uint256"}],"name":"setLIPUpgradeRound","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundLength","type":"uint256"}],"name":"setRoundLength","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_roundLockAmount","type":"uint256"}],"name":"setRoundLockAmount","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"targetContractId","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"}]'
BONDING_MANAGER_PROXY = "0x35Bcf3c30594191d53231E4FF333E8A770453e40"
BONDING_MANAGER_ABI = '[{"inputs":[{"internalType":"address","name":"_controller","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"newDelegate","type":"address"},{"indexed":true,"internalType":"address","name":"oldDelegate","type":"address"},{"indexed":true,"internalType":"address","name":"delegator","type":"address"},{"indexed":false,"internalType":"uint256","name":"additionalAmount","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"bondedAmount","type":"uint256"}],"name":"Bond","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"delegate","type":"address"},{"indexed":true,"internalType":"address","name":"delegator","type":"address"},{"indexed":false,"internalType":"uint256","name":"rewards","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"fees","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"startRound","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"endRound","type":"uint256"}],"name":"EarningsClaimed","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"string","name":"param","type":"string"}],"name":"ParameterUpdate","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"delegate","type":"address"},{"indexed":true,"internalType":"address","name":"delegator","type":"address"},{"indexed":false,"internalType":"uint256","name":"unbondingLockId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"Rebond","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transcoder","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"Reward","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"address","name":"controller","type":"address"}],"name":"SetController","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transcoder","type":"address"},{"indexed":false,"internalType":"uint256","name":"activationRound","type":"uint256"}],"name":"TranscoderActivated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transcoder","type":"address"},{"indexed":false,"internalType":"uint256","name":"deactivationRound","type":"uint256"}],"name":"TranscoderDeactivated","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transcoder","type":"address"},{"indexed":false,"internalType":"address","name":"finder","type":"address"},{"indexed":false,"internalType":"uint256","name":"penalty","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"finderReward","type":"uint256"}],"name":"TranscoderSlashed","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"transcoder","type":"address"},{"indexed":false,"internalType":"uint256","name":"rewardCut","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"feeShare","type":"uint256"}],"name":"TranscoderUpdate","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"oldDelegator","type":"address"},{"indexed":true,"internalType":"address","name":"newDelegator","type":"address"},{"indexed":false,"internalType":"uint256","name":"oldUnbondingLockId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"newUnbondingLockId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"TransferBond","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"delegate","type":"address"},{"indexed":true,"internalType":"address","name":"delegator","type":"address"},{"indexed":false,"internalType":"uint256","name":"unbondingLockId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"withdrawRound","type":"uint256"}],"name":"Unbond","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"delegator","type":"address"},{"indexed":false,"internalType":"address","name":"recipient","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"}],"name":"WithdrawFees","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"delegator","type":"address"},{"indexed":false,"internalType":"uint256","name":"unbondingLockId","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"withdrawRound","type":"uint256"}],"name":"WithdrawStake","type":"event"},{"inputs":[{"internalType":"uint256","name":"_amount","type":"uint256"},{"internalType":"address","name":"_to","type":"address"}],"name":"bond","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_amount","type":"uint256"},{"internalType":"address","name":"_owner","type":"address"},{"internalType":"address","name":"_to","type":"address"},{"internalType":"address","name":"_oldDelegateNewPosPrev","type":"address"},{"internalType":"address","name":"_oldDelegateNewPosNext","type":"address"},{"internalType":"address","name":"_currDelegateNewPosPrev","type":"address"},{"internalType":"address","name":"_currDelegateNewPosNext","type":"address"}],"name":"bondForWithHint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_amount","type":"uint256"},{"internalType":"address","name":"_to","type":"address"},{"internalType":"address","name":"_oldDelegateNewPosPrev","type":"address"},{"internalType":"address","name":"_oldDelegateNewPosNext","type":"address"},{"internalType":"address","name":"_currDelegateNewPosPrev","type":"address"},{"internalType":"address","name":"_currDelegateNewPosNext","type":"address"}],"name":"bondWithHint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_endRound","type":"uint256"}],"name":"claimEarnings","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"controller","outputs":[{"internalType":"contract IController","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"currentRoundTotalActiveStake","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_delegator","type":"address"}],"name":"delegatorStatus","outputs":[{"internalType":"enum BondingManager.DelegatorStatus","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_delegator","type":"address"}],"name":"getDelegator","outputs":[{"internalType":"uint256","name":"bondedAmount","type":"uint256"},{"internalType":"uint256","name":"fees","type":"uint256"},{"internalType":"address","name":"delegateAddress","type":"address"},{"internalType":"uint256","name":"delegatedAmount","type":"uint256"},{"internalType":"uint256","name":"startRound","type":"uint256"},{"internalType":"uint256","name":"lastClaimRound","type":"uint256"},{"internalType":"uint256","name":"nextUnbondingLockId","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_delegator","type":"address"},{"internalType":"uint256","name":"_unbondingLockId","type":"uint256"}],"name":"getDelegatorUnbondingLock","outputs":[{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint256","name":"withdrawRound","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getFirstTranscoderInPool","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"}],"name":"getNextTranscoderInPool","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getTotalBonded","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"}],"name":"getTranscoder","outputs":[{"internalType":"uint256","name":"lastRewardRound","type":"uint256"},{"internalType":"uint256","name":"rewardCut","type":"uint256"},{"internalType":"uint256","name":"feeShare","type":"uint256"},{"internalType":"uint256","name":"lastActiveStakeUpdateRound","type":"uint256"},{"internalType":"uint256","name":"activationRound","type":"uint256"},{"internalType":"uint256","name":"deactivationRound","type":"uint256"},{"internalType":"uint256","name":"activeCumulativeRewards","type":"uint256"},{"internalType":"uint256","name":"cumulativeRewards","type":"uint256"},{"internalType":"uint256","name":"cumulativeFees","type":"uint256"},{"internalType":"uint256","name":"lastFeeRound","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"},{"internalType":"uint256","name":"_round","type":"uint256"}],"name":"getTranscoderEarningsPoolForRound","outputs":[{"internalType":"uint256","name":"totalStake","type":"uint256"},{"internalType":"uint256","name":"transcoderRewardCut","type":"uint256"},{"internalType":"uint256","name":"transcoderFeeShare","type":"uint256"},{"internalType":"uint256","name":"cumulativeRewardFactor","type":"uint256"},{"internalType":"uint256","name":"cumulativeFeeFactor","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getTranscoderPoolMaxSize","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getTranscoderPoolSize","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"}],"name":"isActiveTranscoder","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"}],"name":"isRegisteredTranscoder","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_delegator","type":"address"},{"internalType":"uint256","name":"_unbondingLockId","type":"uint256"}],"name":"isValidUnbondingLock","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"nextRoundTotalActiveStake","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_delegator","type":"address"},{"internalType":"uint256","name":"_endRound","type":"uint256"}],"name":"pendingFees","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_delegator","type":"address"},{"internalType":"uint256","name":"_endRound","type":"uint256"}],"name":"pendingStake","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_unbondingLockId","type":"uint256"}],"name":"rebond","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_unbondingLockId","type":"uint256"}],"name":"rebondFromUnbonded","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_to","type":"address"},{"internalType":"uint256","name":"_unbondingLockId","type":"uint256"},{"internalType":"address","name":"_newPosPrev","type":"address"},{"internalType":"address","name":"_newPosNext","type":"address"}],"name":"rebondFromUnbondedWithHint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_unbondingLockId","type":"uint256"},{"internalType":"address","name":"_newPosPrev","type":"address"},{"internalType":"address","name":"_newPosNext","type":"address"}],"name":"rebondWithHint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"reward","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_newPosPrev","type":"address"},{"internalType":"address","name":"_newPosNext","type":"address"}],"name":"rewardWithHint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_controller","type":"address"}],"name":"setController","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"setCurrentRoundTotalActiveStake","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_numActiveTranscoders","type":"uint256"}],"name":"setNumActiveTranscoders","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint64","name":"_unbondingPeriod","type":"uint64"}],"name":"setUnbondingPeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"},{"internalType":"address","name":"_finder","type":"address"},{"internalType":"uint256","name":"_slashAmount","type":"uint256"},{"internalType":"uint256","name":"_finderFee","type":"uint256"}],"name":"slashTranscoder","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"targetContractId","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_rewardCut","type":"uint256"},{"internalType":"uint256","name":"_feeShare","type":"uint256"}],"name":"transcoder","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"}],"name":"transcoderStatus","outputs":[{"internalType":"enum BondingManager.TranscoderStatus","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"}],"name":"transcoderTotalStake","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"_rewardCut","type":"uint256"},{"internalType":"uint256","name":"_feeShare","type":"uint256"},{"internalType":"address","name":"_newPosPrev","type":"address"},{"internalType":"address","name":"_newPosNext","type":"address"}],"name":"transcoderWithHint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_delegator","type":"address"},{"internalType":"uint256","name":"_amount","type":"uint256"},{"internalType":"address","name":"_oldDelegateNewPosPrev","type":"address"},{"internalType":"address","name":"_oldDelegateNewPosNext","type":"address"},{"internalType":"address","name":"_newDelegateNewPosPrev","type":"address"},{"internalType":"address","name":"_newDelegateNewPosNext","type":"address"}],"name":"transferBond","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"unbond","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_amount","type":"uint256"},{"internalType":"address","name":"_newPosPrev","type":"address"},{"internalType":"address","name":"_newPosNext","type":"address"}],"name":"unbondWithHint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"unbondingPeriod","outputs":[{"internalType":"uint64","name":"","type":"uint64"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_transcoder","type":"address"},{"internalType":"uint256","name":"_fees","type":"uint256"},{"internalType":"uint256","name":"_round","type":"uint256"}],"name":"updateTranscoderWithFees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address payable","name":"_recipient","type":"address"},{"internalType":"uint256","name":"_amount","type":"uint256"}],"name":"withdrawFees","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"_unbondingLockId","type":"uint256"}],"name":"withdrawStake","outputs":[],"stateMutability":"nonpayable","type":"function"}]'
TICKET_BROKER_PROXY = "0xa8bB618B1520E284046F3dFc448851A1Ff26e41B"
| 798.222222 | 16,912 | 0.689402 |
f1b81dcdec7d015fbb25e851e867a5f3acafcd67 | 272 | py | Python | URI JUDGE ONLINE/1177.py | mee-akie/Desafios-de-programacao-JAVA | 8bfdd490ea7e17599c8cd3070b5750bb5c5c38a3 | [
"MIT"
] | null | null | null | URI JUDGE ONLINE/1177.py | mee-akie/Desafios-de-programacao-JAVA | 8bfdd490ea7e17599c8cd3070b5750bb5c5c38a3 | [
"MIT"
] | null | null | null | URI JUDGE ONLINE/1177.py | mee-akie/Desafios-de-programacao-JAVA | 8bfdd490ea7e17599c8cd3070b5750bb5c5c38a3 | [
"MIT"
] | null | null | null | T = int(input())
T2 = T-1
L = list()
cont = 1
for i in range (0, T, +1):
L.append(T - cont)
cont = cont + 1
L2 = list()
for j in range(len(L)-1, -1, -1):
m = L[j]
L2.append(m)
L3 = L2 * 1000
for k in range(0, 1000, +1):
print(f"N[{k}] = {L3[k]}") | 13.6 | 33 | 0.477941 |
83ab5d1cd97414bdbcad5a34310a0373dd79ce1b | 5,312 | py | Python | docs/conf.py | tannewt/Adafruit_CircuitPython_TMP007 | e19423ac685c03992185d7f911a9db1e91299b5c | [
"MIT"
] | null | null | null | docs/conf.py | tannewt/Adafruit_CircuitPython_TMP007 | e19423ac685c03992185d7f911a9db1e91299b5c | [
"MIT"
] | null | null | null | docs/conf.py | tannewt/Adafruit_CircuitPython_TMP007 | e19423ac685c03992185d7f911a9db1e91299b5c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
]
# TODO: Please Read!
# Uncomment the below if you use native CircuitPython modules such as
# digitalio, micropython and busio. List the modules you use. Without it, the
# autodoc module docs will fail to generate with a warning.
# autodoc_mock_imports = ["micropython", "adafruit_bus_device"]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/busdevice/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Adafruit TMP007 Library'
copyright = u'2018 Jerry Needell'
author = u'Jerry Needell'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '.env', 'CODE_OF_CONDUCT.md']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# If this is True, todo emits a warning for each TODO entries. The default is False.
todo_emit_warnings = True
napoleon_numpy_docstring = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = '_static/favicon.ico'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitTmp007Librarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitTMP007Library.tex', u'AdafruitTMP007 Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'AdafruitTMP007library', u'Adafruit TMP007 Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitTMP007Library', u'Adafruit TMP007 Library Documentation',
author, 'AdafruitTMP007Library', 'One line description of project.',
'Miscellaneous'),
]
| 32.993789 | 236 | 0.686747 |
321fd8df700d05739d2c9e2c88a605759308795d | 6,370 | py | Python | mergify_engine/engine/queue_runner.py | Zerdanes/mergify-engine | 36e379c8513821d1a74c355bf8be7477afbdc9c6 | [
"Apache-2.0"
] | 1 | 2021-09-09T12:52:15.000Z | 2021-09-09T12:52:15.000Z | mergify_engine/engine/queue_runner.py | Zerdanes/mergify-engine | 36e379c8513821d1a74c355bf8be7477afbdc9c6 | [
"Apache-2.0"
] | null | null | null | mergify_engine/engine/queue_runner.py | Zerdanes/mergify-engine | 36e379c8513821d1a74c355bf8be7477afbdc9c6 | [
"Apache-2.0"
] | null | null | null | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import typing
from first import first
from mergify_engine import check_api
from mergify_engine import context
from mergify_engine import delayed_refresh
from mergify_engine import github_types
from mergify_engine import rules
from mergify_engine.actions import merge_base
from mergify_engine.queue import merge_train
async def have_unexpected_draft_pull_request_changes(
ctxt: context.Context, car: merge_train.TrainCar
) -> bool:
if ctxt.pull["base"]["sha"] != car.initial_current_base_sha:
ctxt.log.info(
"train car has an unexpected base sha change",
base_sha=ctxt.pull["base"]["sha"],
initial_current_base_sha=car.initial_current_base_sha,
)
return True
if await ctxt.has_been_synchronized_by_user():
ctxt.log.info(
"train car has unexpectedly been synchronized",
)
return True
unexpected_event = first(
(source for source in ctxt.sources),
key=lambda s: s["event_type"] == "pull_request"
and typing.cast(github_types.GitHubEventPullRequest, s["data"])["action"]
in ["closed", "reopened"],
)
if unexpected_event:
ctxt.log.debug(
"train car received an unexpected event",
unexpected_event=unexpected_event,
)
return True
return False
async def handle(queue_rules: rules.QueueRules, ctxt: context.Context) -> None:
# FIXME: Maybe create a command to force the retesting to put back the PR in the queue?
if ctxt.closed:
ctxt.log.info(
"train car temporary pull request has been closed", sources=ctxt.sources
)
return
train = await merge_train.Train.from_context(ctxt)
car = train.get_car_by_tmp_pull(ctxt)
if not car:
ctxt.log.warning(
"train car not found for an opened merge queue pull request",
sources=ctxt.sources,
)
return
if car.queue_pull_request_number is None:
raise RuntimeError(
"Got draft pull request event on car without queue_pull_request_number"
)
ctxt.log.info(
"handling train car temporary pull request event",
sources=ctxt.sources,
gh_pulls_queued=[
ep.user_pull_request_number for ep in car.still_queued_embarked_pulls
],
)
queue_name = car.still_queued_embarked_pulls[0].config["name"]
try:
queue_rule = queue_rules[queue_name]
except KeyError:
ctxt.log.warning(
"queue_rule not found for this train car",
gh_pulls_queued=[
ep.user_pull_request_number for ep in car.still_queued_embarked_pulls
],
queue_rules=queue_rules,
queue_name=queue_name,
)
return
pull_requests = await car.get_pull_requests_to_evaluate()
evaluated_queue_rule = await queue_rule.get_pull_request_rule(
ctxt.repository, ctxt.pull["base"]["ref"], pull_requests
)
for pull_request in pull_requests:
await delayed_refresh.plan_next_refresh(
ctxt, [evaluated_queue_rule], pull_request
)
if not ctxt.sources:
# NOTE(sileht): Only comment/command, don't need to go further
return None
unexpected_changes: typing.Optional[merge_train.UnexpectedChange] = None
if await have_unexpected_draft_pull_request_changes(ctxt, car):
unexpected_changes = merge_train.UnexpectedDraftPullRequestChange(
car.queue_pull_request_number
)
else:
current_base_sha = await train.get_base_sha()
if not await train.is_synced_with_the_base_branch(current_base_sha):
unexpected_changes = merge_train.UnexpectedBaseBranchChange(
current_base_sha
)
if unexpected_changes is None:
real_status = status = await merge_base.get_rule_checks_status(
ctxt.log,
[pull_request],
evaluated_queue_rule,
unmatched_conditions_return_failure=False,
)
if real_status == check_api.Conclusion.FAILURE and (
not car.has_previous_car_status_succeeded()
or len(car.initial_embarked_pulls) != 1
):
# NOTE(sileht): we can't set it as failed as we don't known
# yet which pull request is responsible for the failure.
# * one of the batch ?
# * one of the parent car ?
status = check_api.Conclusion.PENDING
else:
real_status = status = check_api.Conclusion.PENDING
ctxt.log.info(
"train car temporary pull request evaluation",
gh_pull_queued=[
ep.user_pull_request_number for ep in car.still_queued_embarked_pulls
],
evaluated_queue_rule=evaluated_queue_rule.conditions.get_summary(),
unexpected_changes=unexpected_changes,
temporary_status=status,
real_status=real_status,
event_types=[se["event_type"] for se in ctxt.sources],
)
await car.update_summaries(
status,
real_status,
evaluated_queue_rule=evaluated_queue_rule,
unexpected_change=unexpected_changes,
)
await train.save()
if unexpected_changes:
ctxt.log.info(
"train will be reset",
gh_pull_queued=[
ep.user_pull_request_number for ep in car.still_queued_embarked_pulls
],
unexpected_changes=unexpected_changes,
)
await train.reset(unexpected_changes)
await ctxt.client.post(
f"{ctxt.base_url}/issues/{ctxt.pull['number']}/comments",
json={
"body": f"This pull request has unexpected changes: {unexpected_changes}. The whole train will be reset."
},
)
| 34.247312 | 121 | 0.660283 |
94aadbc79c9bf7442ebe79a00954e1a5210b97ee | 9,019 | py | Python | network/modeling.py | SUMIN97/deeplab_pac | b32a64692c4d0411e1709b834cbc174f065dfc6a | [
"MIT"
] | null | null | null | network/modeling.py | SUMIN97/deeplab_pac | b32a64692c4d0411e1709b834cbc174f065dfc6a | [
"MIT"
] | null | null | null | network/modeling.py | SUMIN97/deeplab_pac | b32a64692c4d0411e1709b834cbc174f065dfc6a | [
"MIT"
] | null | null | null | from .utils import IntermediateLayerGetter
from ._deeplab import DeepLabHead, DeepLabHeadV3Plus, DeepLabV3, PanopticDeepLab
from .backbone import resnet
from .backbone import mobilenetv2
from .backbone import hrnetv2
def _segm_hrnet(name, backbone_name, num_classes, pretrained_backbone):
backbone = hrnetv2.__dict__[backbone_name](pretrained_backbone)
# HRNetV2 config:
# the final output channels is dependent on highest resolution channel config (c).
# output of backbone will be the inplanes to assp:
hrnet_channels = int(backbone_name.split('_')[-1])
inplanes = sum([hrnet_channels * 2 ** i for i in range(4)])
low_level_planes = 256 # all hrnet version channel output from bottleneck is the same
aspp_dilate = [12, 24, 36] # If follow paper trend, can put [24, 48, 72].
if name=='deeplabv3plus':
return_layers = {'stage4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'stage4': 'out'}
classifier = DeepLabHead(inplanes, num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers, hrnet_flag=True)
model = DeepLabV3(backbone, classifier)
return model
def _segm_resnet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if output_stride==8:
replace_stride_with_dilation=[False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation=[False, False, True]
aspp_dilate = [6, 12, 18]
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained_backbone,
replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
if name=='deeplabv3plus':
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'layer4': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _segm_mobilenet(name, backbone_name, num_classes, output_stride, pretrained_backbone):
if output_stride==8:
aspp_dilate = [12, 24, 36]
else:
aspp_dilate = [6, 12, 18]
backbone = mobilenetv2.mobilenet_v2(pretrained=pretrained_backbone, output_stride=output_stride)
# rename layers
backbone.low_level_features = backbone.features[0:4]
backbone.high_level_features = backbone.features[4:-1]
backbone.features = None
backbone.classifier = None
inplanes = 320
low_level_planes = 24
if name=='deeplabv3plus':
return_layers = {'high_level_features': 'out', 'low_level_features': 'low_level'}
classifier = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
elif name=='deeplabv3':
return_layers = {'high_level_features': 'out'}
classifier = DeepLabHead(inplanes , num_classes, aspp_dilate)
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
model = DeepLabV3(backbone, classifier)
return model
def _load_model(arch_type, backbone, num_classes, output_stride, pretrained_backbone):
if backbone=='mobilenetv2':
model = _segm_mobilenet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
elif backbone.startswith('resnet'):
model = _segm_resnet(arch_type, backbone, num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
elif backbone.startswith('hrnetv2'):
model = _segm_hrnet(arch_type, backbone, num_classes, pretrained_backbone=pretrained_backbone)
else:
raise NotImplementedError
return model
# Deeplab v3
def deeplabv3_hrnetv2_48(num_classes=21, output_stride=4, pretrained_backbone=False): # no pretrained backbone yet
return _load_model('deeplabv3', 'hrnetv2_48', output_stride, num_classes, pretrained_backbone=pretrained_backbone)
def deeplabv3_hrnetv2_32(num_classes=21, output_stride=4, pretrained_backbone=True):
return _load_model('deeplabv3', 'hrnetv2_32', output_stride, num_classes, pretrained_backbone=pretrained_backbone)
def deeplabv3_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True, **kwargs):
"""Constructs a DeepLabV3 model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
# Deeplab v3+
def deeplabv3plus_hrnetv2_48(num_classes=21, output_stride=4, pretrained_backbone=False): # no pretrained backbone yet
return _load_model('deeplabv3plus', 'hrnetv2_48', num_classes, output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_hrnetv2_32(num_classes=21, output_stride=4, pretrained_backbone=True):
return _load_model('deeplabv3plus', 'hrnetv2_32', num_classes, output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_resnet50(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet50', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_resnet50_two_branch(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3 model with a ResNet-50 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
if output_stride == 8:
replace_stride_with_dilation = [False, True, True]
aspp_dilate = [12, 24, 36]
else:
replace_stride_with_dilation = [False, False, True]
aspp_dilate = [6, 12, 18]
backbone = resnet.__dict__['resnet50'](
pretrained=pretrained_backbone,
replace_stride_with_dilation=replace_stride_with_dilation)
inplanes = 2048
low_level_planes = 256
num_neighbors = 56
return_layers = {'layer4': 'out', 'layer1': 'low_level'}
backbone = IntermediateLayerGetter(backbone, return_layers=return_layers)
semantic_branch = DeepLabHeadV3Plus(inplanes, low_level_planes, num_classes, aspp_dilate)
neighbor_branch = DeepLabHeadV3Plus(inplanes, low_level_planes, num_neighbors, aspp_dilate)
model = PanopticDeepLab(backbone, semantic_branch, neighbor_branch)
return model
def deeplabv3plus_resnet101(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a ResNet-101 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'resnet101', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone)
def deeplabv3plus_mobilenet(num_classes=21, output_stride=8, pretrained_backbone=True):
"""Constructs a DeepLabV3+ model with a MobileNetv2 backbone.
Args:
num_classes (int): number of classes.
output_stride (int): output stride for deeplab.
pretrained_backbone (bool): If True, use the pretrained backbone.
"""
return _load_model('deeplabv3plus', 'mobilenetv2', num_classes, output_stride=output_stride, pretrained_backbone=pretrained_backbone) | 43.360577 | 137 | 0.741657 |
1ac9e88f4f43dd613109e266d9f6dcf4c9da149d | 6,307 | py | Python | KosarajuSCC.py | fxie520/Coursera-Algo-Specialization | 4ae744ec2a578ac19465305077427eb0fa44a7e9 | [
"MIT"
] | null | null | null | KosarajuSCC.py | fxie520/Coursera-Algo-Specialization | 4ae744ec2a578ac19465305077427eb0fa44a7e9 | [
"MIT"
] | null | null | null | KosarajuSCC.py | fxie520/Coursera-Algo-Specialization | 4ae744ec2a578ac19465305077427eb0fa44a7e9 | [
"MIT"
] | null | null | null | import sys
from collections import deque
from collections import Counter
import time
sys.setrecursionlimit(10**4)
class Node:
def __init__(self, label: int):
self.label: int = label
self.neighbors: list = []
self.neighbors_reversed: list = []
self.explored: bool = False
self.leader: Node or None = None
def add_neighbor(self, neighbor):
self.neighbors.append(neighbor)
def add_neighbor_reversed(self, neighbor_reversed):
self.neighbors_reversed.append(neighbor_reversed)
# Adjacency list representation of a directed graph. Space complexity = O(m + n).
class Graph:
def __init__(self, nb_vertex: int = 0):
self.nb_vertex = nb_vertex
self.vertices: list = []
# Vertices arranged in increasing finishing times
self.vertices_increasing_f: list = [None]*nb_vertex
# Current finishing time
self.t: int = 0
# Current leader node
self.s: Node or None = None
def add_node(self, node: Node):
self.vertices.append(node)
# Depth-first search using recursion.
# Only use it for small graph. Otherwise, the algorithm can hit Python's max recursion depth (1000).
def dfs(self, node: Node, first_pass: bool):
node.explored = True
if first_pass:
neighbors = node.neighbors_reversed
else:
neighbors = node.neighbors
node.leader = self.s
for neighbor in neighbors:
if not neighbor.explored:
self.dfs(neighbor, first_pass=first_pass)
if first_pass:
self.vertices_increasing_f[self.t] = node
self.t += 1
# Depth-first search using stack
def dfs_stack(self, node: Node, first_pass: bool):
node.explored = True
stack = deque() # Stack
stack.append(node)
current_node_arr = []
while stack:
v = stack.pop() # v is the current node
if not first_pass:
v.leader = self.s
current_node_arr.append(v)
neighbors = v.neighbors_reversed if first_pass else v.neighbors
for neighbor in neighbors:
if not neighbor.explored:
neighbor.explored = True
stack.append(neighbor)
if first_pass:
nb_nodes_explored = len(current_node_arr)
self.vertices_increasing_f[self.t:self.t + nb_nodes_explored] = reversed(current_node_arr)
self.t += nb_nodes_explored
# First pass = False means this is the second pass
def dfs_loop(self, first_pass: bool, recursion_version: bool = False):
if first_pass:
self.t = 0
vertices = self.vertices
else:
self.s = None
vertices = self.vertices_increasing_f
for node in reversed(vertices):
if not node.explored:
self.s = node
if recursion_version:
self.dfs(node=node, first_pass=first_pass)
else:
self.dfs_stack(node=node, first_pass=first_pass)
# Restoration of class states
def restore_states(self):
self.t = 0
self.s = None
for node in self.vertices:
node.explored = False
node.leader = None
# Kosaraju's algorithm to compute SCCs.
# Time complexity = O(m + n) where m is the number of vertices and n is the number of nodes.
def kosaraju(self, recursion_dfs: bool = False):
self.dfs_loop(first_pass=True, recursion_version=recursion_dfs)
self.restore_states()
self.dfs_loop(first_pass=False, recursion_version=recursion_dfs)
leader_arr = []
for node in self.vertices:
leader_arr.append(node.leader.label)
self.restore_states()
return leader_arr
if __name__ == '__main__':
# Code for testing
nb_nodes = 9
graph = Graph(nb_nodes)
for i in range(nb_nodes):
graph.add_node(Node(i))
graph.vertices[0].neighbors.append(graph.vertices[8])
graph.vertices[8].neighbors_reversed.append(graph.vertices[0])
graph.vertices[8].neighbors.append(graph.vertices[7])
graph.vertices[7].neighbors_reversed.append(graph.vertices[8])
graph.vertices[7].neighbors.append(graph.vertices[0])
graph.vertices[0].neighbors_reversed.append(graph.vertices[7])
graph.vertices[7].neighbors.append(graph.vertices[5])
graph.vertices[5].neighbors_reversed.append(graph.vertices[7])
graph.vertices[5].neighbors.append(graph.vertices[4])
graph.vertices[4].neighbors_reversed.append(graph.vertices[5])
graph.vertices[6].neighbors.append(graph.vertices[5])
graph.vertices[5].neighbors_reversed.append(graph.vertices[6])
graph.vertices[4].neighbors.append(graph.vertices[6])
graph.vertices[6].neighbors_reversed.append(graph.vertices[4])
graph.vertices[4].neighbors.append(graph.vertices[2])
graph.vertices[2].neighbors_reversed.append(graph.vertices[4])
graph.vertices[2].neighbors.append(graph.vertices[1])
graph.vertices[1].neighbors_reversed.append(graph.vertices[2])
graph.vertices[1].neighbors.append(graph.vertices[3])
graph.vertices[3].neighbors_reversed.append(graph.vertices[1])
graph.vertices[3].neighbors.append(graph.vertices[2])
graph.vertices[2].neighbors_reversed.append(graph.vertices[3])
leaders = graph.kosaraju(recursion_dfs=True)
print(f"Test code with recursion version of DFS: leaders = {leaders}")
# Initialization
nb_nodes = 875714
graph = Graph(nb_nodes)
for i in range(nb_nodes):
graph.add_node(Node(i))
with open("AssignmentData/Data_SCC.txt", 'r') as f:
for line in f:
data = line.strip().split(' ')
src_node, dst_node = int(data[0]) - 1, int(data[1]) - 1
graph.vertices[src_node].neighbors.append(graph.vertices[dst_node])
graph.vertices[dst_node].neighbors_reversed.append(graph.vertices[src_node])
start = time.time()
leaders = graph.kosaraju(recursion_dfs=False)
print(f"Programming assignment: time consumed = {round(time.time() - start, 2)}s")
print(f"Programming assignment with stack version of DFS: 5 largest SCCs = {Counter(leaders).most_common(5)}")
| 38.932099 | 114 | 0.650547 |
1b61de65ca4bfc032487f7575520a429a91e4590 | 3,068 | py | Python | gen_kml.py | sean/delivery-routes | 482edec3499dadd2526ea2577f6962d15d62e979 | [
"MIT"
] | 6 | 2017-06-30T10:39:55.000Z | 2022-02-20T01:46:05.000Z | gen_kml.py | sean/delivery-routes | 482edec3499dadd2526ea2577f6962d15d62e979 | [
"MIT"
] | null | null | null | gen_kml.py | sean/delivery-routes | 482edec3499dadd2526ea2577f6962d15d62e979 | [
"MIT"
] | 1 | 2017-09-15T21:04:25.000Z | 2017-09-15T21:04:25.000Z | #!/usr/bin/env python
import simplekml
import argparse
import colorsys
import numpy
import simplejson
import csv
import os
from lib.config import Config
def RGBToHTMLColor(rgb_tuple):
""" convert an (R, G, B) tuple to #RRGGBB """
hexcolor = '%02x%02x%02xff' % (int(rgb_tuple[0]*256), int(rgb_tuple[1]*256), int(rgb_tuple[2]*256))
# that's it! '%02x' means zero-padded, 2-digit hex values
return hexcolor
def get_colors(num_colors):
colors=[]
for i in numpy.arange(0., 360., 360. / num_colors):
hue = i/360.
lightness = (50 + numpy.random.rand() * 10)/100.
saturation = (90 + numpy.random.rand() * 10)/100.
colors.append(RGBToHTMLColor(colorsys.hls_to_rgb(hue, lightness, saturation)))
return colors
def load_data(savefile):
orders = {}
with open(savefile, 'r') as json_data:
try:
orders = simplejson.load(json_data)
except ValueError as e:
print('invalid json: %s' % e)
raise
return orders
def save_routes_to_csv(config, routes):
savefile = "{}/master.csv".format(config.output_dir)
with open(savefile, "w") as csvfile:
f = csv.writer(csvfile)
# Write CSV Header, If you dont need that, remove this line
f.writerow(["ID", "Name", "Address", "Bags", "Route", "Coments"])
for idx, route in enumerate(routes):
for d in route:
f.writerow([d['id'], d['name'], d['address'], d['count'], "route-{}".format(idx+1), d['comments']])
if config.verbose:
print("Saved {} routes to {}".format(len(routes), savefile))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Generate a KML file based on the passed in routes.json file.')
parser.add_argument('filename', type=str, help='the routes.json file containing the deliveries')
parser.add_argument('-v', '--verbose', action='store_true', help='increase the output verbosity')
args = parser.parse_args()
config = Config()
config.verbose = args.verbose
if not os.path.isfile(args.filename):
print("There is no such file {}!".format(args.filename))
sys.exit(-1)
routes = load_data(args.filename)
savefile = "{}/deliveries.kml".format(config.output_dir)
if config.verbose:
print("Loaded {} routes from {}".format(len(routes), args.filename))
colors = get_colors(100)
kml = simplekml.Kml(open=1)
num_routes = 0
num_orders = 0
for idx, route in enumerate(routes):
num_routes = idx + 1
for delivery in route:
pnt = kml.newpoint()
pnt.name = "{} {} ({} bags)".format(delivery['id'], delivery['address'], delivery['count'])
pnt.description = "route-{}".format(num_routes)
pnt.coords = [(delivery['lon'], delivery['lat'])]
pnt.style.iconstyle.color = colors[num_routes]
pnt.style.iconstyle.icon.href = None
num_orders = num_orders + 1
if config.verbose:
print("Added point for {} (route-{})".format(delivery['id'], num_routes))
kml.save(savefile)
if config.verbose:
print("Created {} points, one per order.".format(num_orders))
save_routes_to_csv(config, routes)
| 30.68 | 110 | 0.661995 |
b32598d1feec5af52b78c97d470be317b104c55b | 5,506 | py | Python | applications/init/models/db.py | himelpdas/pastebeam_web_server | 607fe5aaab571fadd99e100fdd774fc953384cf1 | [
"BSD-3-Clause"
] | null | null | null | applications/init/models/db.py | himelpdas/pastebeam_web_server | 607fe5aaab571fadd99e100fdd774fc953384cf1 | [
"BSD-3-Clause"
] | null | null | null | applications/init/models/db.py | himelpdas/pastebeam_web_server | 607fe5aaab571fadd99e100fdd774fc953384cf1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#########################################################################
## This scaffolding model makes your app work on Google App Engine too
## File is released under public domain and you can use without limitations
#########################################################################
## if SSL/HTTPS is properly configured and you want all HTTP requests to
## be redirected to HTTPS, uncomment the line below:
# request.requires_https()
## app configuration made easy. Look inside private/appconfig.ini
from gluon.contrib.appconfig import AppConfig
## once in production, remove reload=True to gain full speed
myconf = AppConfig(reload=True)
if not request.env.web2py_runtime_gae:
## if NOT running on Google App Engine use SQLite or other DB
db = DAL(myconf.take('db.uri'), pool_size=myconf.take('db.pool_size', cast=int), check_reserved=['all'])
else:
## connect to Google BigTable (optional 'google:datastore://namespace')
db = DAL('google:datastore+ndb')
## store sessions and tickets there
session.connect(request, response, db=db)
## or store session in Memcache, Redis, etc.
## from gluon.contrib.memdb import MEMDB
## from google.appengine.api.memcache import Client
## session.connect(request, response, db = MEMDB(Client()))
## by default give a view/generic.extension to all actions from localhost
## none otherwise. a pattern can be 'controller/function.extension'
response.generic_patterns = ['*'] if request.is_local else []
## choose a style for forms
response.formstyle = myconf.take('forms.formstyle') # or 'bootstrap3_stacked' or 'bootstrap2' or other
response.form_label_separator = myconf.take('forms.separator')
## (optional) optimize handling of static files
# response.optimize_css = 'concat,minify,inline'
# response.optimize_js = 'concat,minify,inline'
## (optional) static assets folder versioning
# response.static_version = '0.0.0'
#########################################################################
## Here is sample code if you need for
## - email capabilities
## - authentication (registration, login, logout, ... )
## - authorization (role based authorization)
## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss)
## - old style crud actions
## (more options discussed in gluon/tools.py)
#########################################################################
from gluon.tools import Auth, Service, PluginManager
auth = Auth(db)
service = Service()
plugins = PluginManager()
## create all tables needed by auth if not custom tables
auth.define_tables(username=False, signature=False)
## configure email
mail = auth.settings.mailer
mail.settings.server = 'logging' if request.is_local else myconf.take('smtp.server')
mail.settings.sender = myconf.take('smtp.sender')
mail.settings.login = myconf.take('smtp.login')
## configure auth policy
auth.settings.registration_requires_verification = False
auth.settings.registration_requires_approval = False
auth.settings.reset_password_requires_verification = True
#########################################################################
## Define your tables below (or better in another model file) for example
##
## >>> db.define_table('mytable',Field('myfield','string'))
##
## Fields can be 'string','text','password','integer','double','boolean'
## 'date','time','datetime','blob','upload', 'reference TABLENAME'
## There is an implicit 'id integer autoincrement' field
## Consult manual for more options, validators, etc.
##
## More API examples for controllers:
##
## >>> db.mytable.insert(myfield='value')
## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL)
## >>> for row in rows: print row.id, row.myfield
#########################################################################
## after defining tables, uncomment below to enable auditing
# auth.enable_record_versioning(db)
def initializeAccount(form):
"this is run after db insert and form.vars has actual values in the db"
#form value from: on_register OR logged_in_user #already accepted by db, so these values are guaranteed to be user's password
my_password = form.vars.password_two or form.vars.new_password2 or 1/0 #the unencrypted password on register is password_two and new_password2 when logged in #the raw password in the password verification field
my_id = db._adapter.object_id(form.vars.id or auth.user_id or 1/0) #http://stackoverflow.com/questions/26614981/mongodb-web2py-working-with-objectids
print my_password, my_id
rsa_keys = SecureRSAKeyPair(my_password, pbkdf2 = True)
print rsa_keys.public_key, rsa_keys.private_key
MONGO_ACCOUNTS.update_one({"_id":my_id},
{"$set":{
"contacts_list":[],
"rsa_public_key": rsa_keys.public_key,
"rsa_private_key": rsa_keys.private_key,
"rsa_pbkdf2_salt": Binary(rsa_keys.salt),
}}
)
def killAllAccountWebSockets(form):
kill_id = db._adapter.object_id(auth.user_id)
kill_account = MONGO_ACCOUNTS.find_one({"_id":kill_id})
kill_email = kill_account["email"].lower()
publisher_socket.send_string(u"%s %s"%(kill_email, u"kill" ) )
#print form.vars.new_password2
auth.settings.register_onaccept = [initializeAccount]
auth.settings.profile_onaccept = [killAllAccountWebSockets]
auth.settings.change_password_onaccept = [initializeAccount, killAllAccountWebSockets] #do killall last to prevent race with websocket #HIDDEN FROM DOCS!!! Also change_password_onvalidation
| 44.764228 | 214 | 0.682528 |
eebef13ac637626b93137cc44557e82782fe91ea | 62,781 | py | Python | ludwig/models/model.py | Martinouj/ludwig | 48acbf6cb63a859d06eb39b2f7ec27577394251d | [
"Apache-2.0"
] | 1 | 2020-04-23T01:50:22.000Z | 2020-04-23T01:50:22.000Z | ludwig/models/model.py | Gerzer/ludwig | 71ca2189bcee7a2667c428aeb1bf738697cbe83d | [
"Apache-2.0"
] | null | null | null | ludwig/models/model.py | Gerzer/ludwig | 71ca2189bcee7a2667c428aeb1bf738697cbe83d | [
"Apache-2.0"
] | 1 | 2020-01-30T21:11:57.000Z | 2020-01-30T21:11:57.000Z | #! /usr/bin/env python
# coding=utf-8
# Copyright (c) 2019 Uber Technologies, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module contains the class and auxiliary methods of a model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import os
import re
import signal
import sys
import threading
import time
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from tabulate import tabulate
from tensorflow.python import debug as tf_debug
from tensorflow.python.saved_model import builder as saved_model_builder
from tqdm import tqdm
from ludwig.constants import *
from ludwig.contrib import contrib_command
from ludwig.features.feature_registries import output_type_registry
from ludwig.features.feature_utils import SEQUENCE_TYPES
from ludwig.globals import MODEL_HYPERPARAMETERS_FILE_NAME
from ludwig.globals import MODEL_WEIGHTS_FILE_NAME
from ludwig.globals import MODEL_WEIGHTS_PROGRESS_FILE_NAME
from ludwig.globals import TRAINING_PROGRESS_FILE_NAME
from ludwig.globals import is_on_master
from ludwig.globals import is_progressbar_disabled
from ludwig.models.combiners import get_build_combiner
from ludwig.models.inputs import build_inputs, dynamic_length_encoders
from ludwig.models.modules.loss_modules import regularizer_registry
from ludwig.models.modules.measure_modules import get_improved_fun
from ludwig.models.modules.measure_modules import get_initial_validation_value
from ludwig.models.modules.optimization_modules import optimize
from ludwig.models.outputs import build_outputs
from ludwig.utils import time_utils
from ludwig.utils.batcher import Batcher
from ludwig.utils.batcher import BucketedBatcher
from ludwig.utils.batcher import DistributedBatcher
from ludwig.utils.data_utils import load_json, save_json
from ludwig.utils.defaults import default_random_seed
from ludwig.utils.defaults import default_training_params
from ludwig.utils.math_utils import learning_rate_warmup_distributed, \
learning_rate_warmup
from ludwig.utils.misc import set_random_seed
from ludwig.utils.misc import sum_dicts
from ludwig.utils.tf_utils import get_tf_config
logger = logging.getLogger(__name__)
class Model:
"""
Model is a class that builds the model that Ludwig uses
"""
def __init__(
self,
input_features,
output_features,
combiner,
training,
preprocessing,
use_horovod=False,
random_seed=default_random_seed,
debug=False,
**kwargs
):
self.horovod = None
if use_horovod:
import horovod.tensorflow
self.horovod = horovod.tensorflow
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.debug = debug
self.weights_save_path = None
self.hyperparameters = {}
self.session = None
self.epochs = None
self.received_sigint = False
self.__build(
input_features,
output_features,
combiner,
training,
preprocessing,
random_seed,
**kwargs
)
def __build(
self,
input_features,
output_features,
combiner,
training,
preprocessing,
random_seed,
**kwargs
):
self.hyperparameters['input_features'] = input_features
self.hyperparameters['output_features'] = output_features
self.hyperparameters['combiner'] = combiner
self.hyperparameters['training'] = training
self.hyperparameters['preprocessing'] = preprocessing
self.hyperparameters['random_seed'] = random_seed
self.hyperparameters.update(kwargs)
if self.horovod:
self.horovod.init()
tf.reset_default_graph()
graph = tf.Graph()
with graph.as_default():
# ================ Setup ================
tf.set_random_seed(random_seed)
self.global_step = tf.Variable(0, trainable=False)
self.regularization_lambda = tf.placeholder(
tf.float32,
name='regularization_lambda'
)
regularizer = regularizer_registry[training['regularizer']]
self.regularizer = regularizer(self.regularization_lambda)
self.learning_rate = tf.placeholder(
tf.float32,
name='learning_rate'
)
self.dropout_rate = tf.placeholder(tf.float32, name='dropout_rate')
self.is_training = tf.placeholder(tf.bool, [], name='is_training')
# ================ Inputs ================
feature_encodings = build_inputs(
input_features,
self.regularizer,
self.dropout_rate,
is_training=self.is_training
)
for fe_name, fe_properties in feature_encodings.items():
setattr(self, fe_name, fe_properties['placeholder'])
# ================ Model ================
logger.debug('- Combiner {}'.format(combiner['type']))
build_combiner = get_build_combiner(combiner['type'])(**combiner)
hidden, hidden_size = build_combiner(
feature_encodings,
self.regularizer,
self.dropout_rate,
is_training=self.is_training,
**kwargs
)
# ================ Outputs ================
outs = build_outputs(
output_features,
hidden,
hidden_size,
regularizer=self.regularizer,
dropout_rate=self.dropout_rate,
is_training=self.is_training
)
(
self.train_reg_mean_loss,
self.eval_combined_loss,
self.regularization_loss,
output_tensors
) = outs
for ot_name, ot in output_tensors.items():
setattr(self, ot_name, ot)
# ================ Optimizer ================
self.optimize, self.learning_rate = optimize(
self.train_reg_mean_loss,
training,
self.learning_rate,
self.global_step,
self.horovod
)
tf.summary.scalar('train_reg_mean_loss', self.train_reg_mean_loss)
self.merged_summary = tf.summary.merge_all()
self.graph = graph
self.graph_initialize = tf.global_variables_initializer()
if self.horovod:
self.broadcast_op = self.horovod.broadcast_global_variables(0)
self.saver = tf.train.Saver()
def initialize_session(self, gpus=None, gpu_fraction=1):
if self.session is None:
self.session = tf.Session(
config=get_tf_config(gpus, gpu_fraction, self.horovod),
graph=self.graph
)
self.session.run(self.graph_initialize)
if self.debug:
session = tf_debug.LocalCLIDebugWrapperSession(self.session)
session.add_tensor_filter(
'has_inf_or_nan',
tf_debug.has_inf_or_nan
)
return self.session
def close_session(self):
if self.session is not None:
self.session.close()
self.session = None
def feed_dict(
self,
batch,
regularization_lambda=default_training_params[
'regularization_lambda'],
learning_rate=default_training_params['learning_rate'],
dropout_rate=default_training_params['dropout_rate'],
is_training=True
):
input_features = self.hyperparameters['input_features']
output_features = self.hyperparameters['output_features']
feed_dict = {
self.is_training: is_training,
self.regularization_lambda: regularization_lambda,
self.learning_rate: learning_rate,
self.dropout_rate: dropout_rate
}
for input_feature in input_features:
feed_dict[getattr(self, input_feature['name'])] = batch[
input_feature['name']]
for output_feature in output_features:
if output_feature['name'] in batch:
feed_dict[getattr(self, output_feature['name'])] = batch[
output_feature['name']]
return feed_dict
def train(
self,
training_set,
validation_set=None,
test_set=None,
validation_field=None,
validation_measure=None,
save_path='model',
regularization_lambda=0.0,
epochs=100,
learning_rate=0.001,
batch_size=128,
eval_batch_size=0,
bucketing_field=None,
dropout_rate=0.0,
early_stop=20,
reduce_learning_rate_on_plateau=0,
reduce_learning_rate_on_plateau_patience=5,
reduce_learning_rate_on_plateau_rate=0.5,
increase_batch_size_on_plateau=0,
increase_batch_size_on_plateau_patience=5,
increase_batch_size_on_plateau_rate=2,
increase_batch_size_on_plateau_max=512,
learning_rate_warmup_epochs=1,
resume=False,
skip_save_model=False,
skip_save_progress=False,
skip_save_log=False,
gpus=None,
gpu_fraction=1,
random_seed=default_random_seed,
**kwargs
):
"""Trains a model with a set of hyperparameters listed below. Customizable
:param training_set: The training set
:param validation_set: The validation dataset
:param test_set: The test dataset
:param validation_field: The first output feature, by default it is set
as the same field of the first output feature.
:param validation_measure: Measure used on the validation field, it is
accuracy by default
:type validation_measure:
:param save_path: The path to save the file
:type save_path: filepath (str)
:param regularization_lambda: Strength of the $L2$ regularization
:type regularization_lambda: Integer
:param epochs: Number of epochs the algorithm is intended to be run over
:type epochs: Integer
:param learning_rate: Learning rate for the algorithm, represents how
much to scale the gradients by
:type learning_rate: Integer
:param batch_size: Size of batch to pass to the model for training.
:type batch_size: Integer
:param batch_size: Size of batch to pass to the model for evaluation.
:type batch_size: Integer
:param bucketing_field: when batching, buckets datapoints based the
length of a field together. Bucketing on text length speeds up
training of RNNs consistently, 30% in some cases
:type bucketing_field:
:param dropout_rate: dropout_rate probability (probability of dropping
a neuron in a given layer)
:type dropout_rate: Float
:param early_stop: How many epochs without any improvement in the
validation_measure triggers the algorithm to stop
:type early_stop: Integer
:param reduce_learning_rate_on_plateau: Reduces the learning rate when
the algorithm hits a plateau (i.e. the performance on the
validation does not improve)
:type reduce_learning_rate_on_plateau: Float
:param reduce_learning_rate_on_plateau_patience: How many epochs have
to pass before the learning rate reduces
:type reduce_learning_rate_on_plateau_patience: Float
:param reduce_learning_rate_on_plateau_rate: Rate at which we reduce
the learning rate
:type reduce_learning_rate_on_plateau_rate: Float
:param increase_batch_size_on_plateau: Increase the batch size on a
plateau
:type increase_batch_size_on_plateau: Integer
:param increase_batch_size_on_plateau_patience: How many epochs to wait
for before increasing the batch size
:type increase_batch_size_on_plateau_patience: Integer
:param increase_batch_size_on_plateau_rate: The rate at which the batch
size increases.
:type increase_batch_size_on_plateau_rate: Float
:param increase_batch_size_on_plateau_max: The maximum size of the batch
:type increase_batch_size_on_plateau_max: Integer
:param learning_rate_warmup_epochs: The number of epochs to warmup the
learning rate for.
:type learning_rate_warmup_epochs: Integer
:param resume: Resume training a model that was being trained.
:type resume: Boolean
:param skip_save_model: disables
saving model weights and hyperparameters each time the model
improves. By default Ludwig saves model weights after each epoch
the validation measure imrpvoes, but if the model is really big
that can be time consuming if you do not want to keep
the weights and just find out what performance can a model get
with a set of hyperparameters, use this parameter to skip it,
but the model will not be loadable later on.
:type skip_save_model: Boolean
:param skip_save_progress: disables saving progress each epoch.
By default Ludwig saves weights and stats after each epoch
for enabling resuming of training, but if the model is
really big that can be time consuming and will uses twice
as much space, use this parameter to skip it, but training
cannot be resumed later on
:type skip_save_progress: Boolean
:param skip_save_log: Disables saving TensorBoard
logs. By default Ludwig saves logs for the TensorBoard, but if it
is not needed turning it off can slightly increase the
overall speed..
:type skip_save_log: Boolean
:param gpus: List of gpus to use
:type gpus: List
:param gpu_fraction: Percentage of the GPU that is intended to be used
:type gpu_fraction: Float
:param random_seed: Default initialization for the random seeds
:type: Float
"""
# ====== General setup =======
output_features = self.hyperparameters['output_features']
self.epochs = epochs
digits_per_epochs = len(str(self.epochs))
self.received_sigint = False
# Only use signals when on the main thread to avoid issues with CherryPy: https://github.com/uber/ludwig/issues/286
if threading.current_thread() == threading.main_thread():
signal.signal(signal.SIGINT, self.set_epochs_to_1_or_quit)
should_validate = validation_set is not None and validation_set.size > 0
if eval_batch_size < 1:
eval_batch_size = batch_size
stat_names = self.get_stat_names(output_features)
if self.horovod:
learning_rate *= self.horovod.size()
# ====== Setup file names =======
os.makedirs(save_path, exist_ok=True)
model_weights_path = os.path.join(save_path, MODEL_WEIGHTS_FILE_NAME)
model_weights_progress_path = os.path.join(
save_path,
MODEL_WEIGHTS_PROGRESS_FILE_NAME
)
model_hyperparameters_path = os.path.join(
save_path,
MODEL_HYPERPARAMETERS_FILE_NAME
)
# ====== Setup session =======
session = self.initialize_session(gpus, gpu_fraction)
if self.weights_save_path:
self.restore(session, self.weights_save_path)
train_writer = None
if is_on_master():
if not skip_save_log:
train_writer = tf.summary.FileWriter(
os.path.join(save_path, 'log', 'train'),
session.graph
)
if self.debug:
session = tf_debug.LocalCLIDebugWrapperSession(session)
session.add_tensor_filter(
'has_inf_or_nan',
tf_debug.has_inf_or_nan
)
# ================ Resume logic ================
if resume:
progress_tracker = self.resume_training(
save_path,
model_weights_path
)
if is_on_master():
self.resume_session(
session,
save_path,
model_weights_path,
model_weights_progress_path
)
else:
(
train_stats,
vali_stats,
test_stats
) = self.initialize_training_stats(output_features)
progress_tracker = ProgressTracker(
batch_size=batch_size,
epoch=0,
steps=0,
last_improvement_epoch=0,
learning_rate=learning_rate,
best_valid_measure=get_initial_validation_value(
validation_measure
),
num_reductions_lr=0,
num_increases_bs=0,
train_stats=train_stats,
vali_stats=vali_stats,
test_stats=test_stats
)
# horovod broadcasting after init or restore
if self.horovod:
session.run(self.broadcast_op)
set_random_seed(random_seed)
batcher = self.initialize_batcher(
training_set,
batch_size,
bucketing_field
)
# ================ Training Loop ================
while progress_tracker.epoch < self.epochs:
# epoch init
start_time = time.time()
if is_on_master():
logger.info(
'\nEpoch {epoch:{digits}d}'.format(
epoch=progress_tracker.epoch + 1,
digits=digits_per_epochs
)
)
# needed because batch size may change
batcher.batch_size = progress_tracker.batch_size
# ================ Train ================
if is_on_master():
progress_bar = tqdm(
desc='Training',
total=batcher.steps_per_epoch,
file=sys.stdout,
disable=is_progressbar_disabled()
)
# training step loop
while not batcher.last_batch():
batch = batcher.next_batch()
if self.horovod:
current_learning_rate = learning_rate_warmup_distributed(
progress_tracker.learning_rate,
progress_tracker.epoch,
learning_rate_warmup_epochs,
self.horovod.size(),
batcher.step,
batcher.steps_per_epoch
) * self.horovod.size()
else:
current_learning_rate = learning_rate_warmup(
progress_tracker.learning_rate,
progress_tracker.epoch,
learning_rate_warmup_epochs,
batcher.step,
batcher.steps_per_epoch
)
readout_nodes = {'optimize': self.optimize}
if not skip_save_log:
readout_nodes['summary'] = self.merged_summary
output_values = session.run(
readout_nodes,
feed_dict=self.feed_dict(
batch,
regularization_lambda=regularization_lambda,
learning_rate=current_learning_rate,
dropout_rate=dropout_rate,
is_training=True
)
)
if is_on_master():
if not skip_save_log:
# it is initialized only on master
train_writer.add_summary(output_values['summary'],
progress_tracker.steps)
progress_tracker.steps += 1
if is_on_master():
progress_bar.update(1)
# post training
if is_on_master():
progress_bar.close()
progress_tracker.epoch += 1
batcher.reset() # todo this may be useless, doublecheck
# ================ Eval ================
# init tables
tables = OrderedDict()
for output_feature in output_features:
field_name = output_feature['name']
tables[field_name] = [
[field_name] + stat_names[field_name]]
tables['combined'] = [['combined', LOSS, ACCURACY]]
# eval measures on train set
self.evaluation(
session,
training_set,
'train',
regularization_lambda,
progress_tracker.train_stats,
tables,
eval_batch_size,
bucketing_field
)
if validation_set is not None and validation_set.size > 0:
# eval measures on validation set
self.evaluation(
session,
validation_set,
'vali',
regularization_lambda,
progress_tracker.vali_stats,
tables,
eval_batch_size,
bucketing_field
)
if test_set is not None and test_set.size > 0:
# eval measures on test set
self.evaluation(
session,
test_set,
'test',
regularization_lambda,
progress_tracker.test_stats,
tables,
eval_batch_size,
bucketing_field
)
# mbiu and end of epoch prints
elapsed_time = (time.time() - start_time) * 1000.0
if is_on_master():
logger.info('Took {time}'.format(
time=time_utils.strdelta(elapsed_time)))
# stat prints
for output_feature, table in tables.items():
if (
output_feature != 'combined' or
(output_feature == 'combined' and
len(output_features) > 1)
):
if is_on_master():
logger.info(
tabulate(
table,
headers='firstrow',
tablefmt='fancy_grid',
floatfmt='.4f'
)
)
if should_validate:
should_break = self.check_progress_on_validation(
progress_tracker,
validation_field,
validation_measure,
session,
model_weights_path,
model_hyperparameters_path,
reduce_learning_rate_on_plateau,
reduce_learning_rate_on_plateau_patience,
reduce_learning_rate_on_plateau_rate,
increase_batch_size_on_plateau_patience,
increase_batch_size_on_plateau,
increase_batch_size_on_plateau_max,
increase_batch_size_on_plateau_rate,
early_stop,
skip_save_model
)
if should_break:
break
else:
# there's no validation, so we save the model at each iteration
if is_on_master():
if not skip_save_model:
self.save_weights(session, model_weights_path)
self.save_hyperparameters(
self.hyperparameters,
model_hyperparameters_path
)
# ========== Save training progress ==========
if is_on_master():
if not skip_save_progress:
self.save_weights(session, model_weights_progress_path)
progress_tracker.save(
os.path.join(
save_path,
TRAINING_PROGRESS_FILE_NAME
)
)
if skip_save_model:
self.save_hyperparameters(
self.hyperparameters,
model_hyperparameters_path
)
if is_on_master():
contrib_command("train_epoch_end", progress_tracker)
logger.info('')
if train_writer is not None:
train_writer.close()
return (
progress_tracker.train_stats,
progress_tracker.vali_stats,
progress_tracker.test_stats
)
def train_online(
self,
dataset,
batch_size=128,
learning_rate=0.01,
regularization_lambda=0,
dropout_rate=0,
bucketing_field=None,
gpus=None,
gpu_fraction=1
):
session = self.initialize_session(gpus, gpu_fraction)
batcher = self.initialize_batcher(dataset, batch_size, bucketing_field)
# training step loop
progress_bar = tqdm(
desc='Trainining online',
total=batcher.steps_per_epoch,
file=sys.stdout,
disable=is_progressbar_disabled()
)
while not batcher.last_batch():
batch = batcher.next_batch()
_ = session.run(
[self.optimize],
feed_dict=self.feed_dict(
batch,
regularization_lambda=regularization_lambda,
learning_rate=learning_rate,
dropout_rate=dropout_rate,
is_training=True
)
)
progress_bar.update(1)
progress_bar.close()
def evaluation(
self,
session,
dataset,
dataset_name,
regularization_lambda,
stats,
tables,
batch_size=128,
bucketing_field=None
):
results = self.batch_evaluation(
session,
dataset,
batch_size,
bucketing_field=bucketing_field,
regularization_lambda=regularization_lambda,
is_training=False,
name=dataset_name
)
for output_feature in self.hyperparameters['output_features']:
field_name = output_feature['name']
scores = [dataset_name]
for stat in stats[field_name]:
stats[field_name][stat].append(results[field_name][stat])
scores.append(results[field_name][stat])
tables[field_name].append(scores)
stats['combined'][LOSS].append(results['combined'][LOSS])
stats['combined'][ACCURACY].append(results['combined'][ACCURACY])
tables['combined'].append(
[
dataset_name,
results['combined'][LOSS],
results['combined'][ACCURACY]
]
)
return stats, tables
def batch_evaluation(
self,
session,
dataset,
batch_size,
bucketing_field=None,
regularization_lambda=0.0,
is_training=False,
collect_predictions=False,
only_predictions=False,
name=None
):
output_nodes = self.get_output_nodes(
collect_predictions,
only_predictions
)
output_stats = self.get_outputs_stats()
set_size = dataset.size
if set_size == 0:
if is_on_master():
logger.warning('No datapoints to evaluate on.')
return output_stats
seq_set_size = {output_feature['name']: {} for output_feature in
self.hyperparameters['output_features'] if
output_feature['type'] in SEQUENCE_TYPES}
batcher = self.initialize_batcher(
dataset,
batch_size,
bucketing_field,
should_shuffle=False
)
if is_on_master():
progress_bar = tqdm(
desc='Evaluation' if name is None
else 'Evaluation {0: <5.5}'.format(name),
total=batcher.steps_per_epoch,
file=sys.stdout,
disable=is_progressbar_disabled()
)
while not batcher.last_batch():
batch = batcher.next_batch()
result = session.run(
output_nodes,
feed_dict=self.feed_dict(
batch,
regularization_lambda=regularization_lambda,
dropout_rate=0.0,
is_training=is_training
)
)
output_stats, seq_set_size = self.update_output_stats_batch(
output_stats,
seq_set_size,
collect_predictions,
only_predictions,
result
)
if is_on_master():
progress_bar.update(1)
if is_on_master():
progress_bar.close()
if self.horovod:
output_stats, seq_set_size = self.merge_workers_outputs(
output_stats,
seq_set_size
)
output_stats = self.update_output_stats(
output_stats,
set_size,
seq_set_size,
collect_predictions,
only_predictions
)
if 'combined' in output_stats and LOSS in output_stats['combined']:
regularization = session.run(
[self.regularization_loss],
feed_dict={self.regularization_lambda: regularization_lambda}
)[0]
output_stats['combined'][LOSS] += regularization
return output_stats
def merge_workers_outputs(self, output_stats, seq_set_size):
# gather outputs from all workers
all_workers_output_stats = self.comm.allgather(output_stats)
all_workers_seq_set_size = self.comm.allgather(seq_set_size)
# merge them into a single one
merged_output_stats = sum_dicts(
all_workers_output_stats,
dict_type=OrderedDict
)
merged_seq_set_size = sum_dicts(all_workers_seq_set_size)
return merged_output_stats, merged_seq_set_size
def batch_collect_activations(
self,
session,
dataset,
batch_size,
tensor_names,
bucketing_field=None
):
output_nodes = {tensor_name: self.graph.get_tensor_by_name(tensor_name)
for tensor_name in tensor_names}
collected_tensors = {tensor_name: [] for tensor_name in tensor_names}
batcher = self.initialize_batcher(
dataset,
batch_size,
bucketing_field,
should_shuffle=False
)
progress_bar = tqdm(
desc='Collecting Tensors',
total=batcher.steps_per_epoch,
file=sys.stdout,
disable=is_progressbar_disabled()
)
while not batcher.last_batch():
batch = batcher.next_batch()
result = session.run(
output_nodes,
feed_dict=self.feed_dict(
batch,
is_training=False
)
)
for tensor_name in result:
for row in result[tensor_name]:
collected_tensors[tensor_name].append(row)
progress_bar.update(1)
progress_bar.close()
return collected_tensors
def get_output_nodes(self, collect_predictions, only_predictions=False):
output_features = self.hyperparameters['output_features']
output_nodes = {}
for output_feature in output_features:
field_name = output_feature['name']
feature_type = output_feature['type']
output_nodes[field_name] = {}
output_config = output_type_registry[feature_type].output_config
for stat in output_config:
output_name = output_config[stat]['output']
output_type = output_config[stat]['type']
if ((output_type == PREDICTION and
(collect_predictions or only_predictions)) or
(output_type == MEASURE and not only_predictions)):
output_nodes[field_name][output_name] = getattr(
self,
output_name + '_' + field_name
)
if not only_predictions:
output_nodes['eval_combined_loss'] = getattr(
self,
'eval_combined_loss'
)
return output_nodes
def get_outputs_stats(self):
output_features = self.hyperparameters['output_features']
output_stats = OrderedDict()
for output_feature in output_features:
field_name = output_feature['name']
feature_type = output_feature['type']
output_stats[field_name] = {}
output_config = output_type_registry[feature_type].output_config
for stat in output_config:
output_value = output_config[stat]['value']
if isinstance(output_value, list):
output_stats[field_name][stat] = []
else:
output_stats[field_name][stat] = output_value
output_stats['combined'] = {LOSS: 0, ACCURACY: 0}
return output_stats
def update_output_stats_batch(
self,
output_stats,
seq_set_size,
collect_predictions,
only_predictions,
result
):
output_features = self.hyperparameters['output_features']
combined_correct_predictions = None
for i, output_feature in enumerate(output_features):
field_name = output_feature['name']
feature_type = output_feature['type']
output_config = output_type_registry[feature_type].output_config
for stat in output_config:
stat_config = output_config[stat]
output_type = output_config[stat]['type']
if ((output_type == PREDICTION and
(collect_predictions or only_predictions)) or
(output_type == MEASURE and not only_predictions)):
aggregation_method = stat_config['aggregation']
if aggregation_method == SUM:
output_stats[field_name][stat] += (
result[field_name][stat_config['output']].sum()
)
elif aggregation_method == SEQ_SUM:
output_stats[field_name][stat] += (
result[field_name][stat_config['output']].sum()
)
seq_set_size[field_name][stat] = (
seq_set_size[field_name].get(stat, 0) +
len(result[field_name][stat_config['output']])
)
elif aggregation_method == AVG_EXP:
output_stats[field_name][stat] += (
result[field_name][stat_config['output']].sum()
)
elif aggregation_method == APPEND:
output_stats[field_name][stat].append(
result[field_name][stat_config['output']]
)
if not only_predictions:
if feature_type in [CATEGORY, BINARY]:
correct_predictions = \
result[field_name][CORRECT_PREDICTIONS]
elif feature_type == SEQUENCE:
correct_predictions = \
result[field_name][CORRECT_ROWWISE_PREDICTIONS]
else:
correct_predictions = None
if correct_predictions is not None:
if combined_correct_predictions is None:
combined_correct_predictions = correct_predictions
else:
combined_correct_predictions = np.logical_and(
combined_correct_predictions,
correct_predictions
)
if not only_predictions:
output_stats['combined'][LOSS] += result['eval_combined_loss'].sum()
output_stats['combined'][ACCURACY] += (
combined_correct_predictions.sum()
if combined_correct_predictions is not None else 0
)
return output_stats, seq_set_size
def update_output_stats(
self,
output_stats,
set_size,
seq_set_size,
collect_predictions,
only_predictions
):
output_features = self.hyperparameters['output_features']
for i, output_feature in enumerate(output_features):
feature_type = output_feature['type']
field_name = output_feature['name']
output_config = output_type_registry[feature_type].output_config
for stat in output_config:
output_type = output_config[stat]['type']
if ((output_type == PREDICTION and
(collect_predictions or only_predictions)) or
(output_type == MEASURE and not only_predictions)):
if output_config[stat]['aggregation'] == SUM:
output_stats[field_name][stat] /= set_size
elif output_config[stat]['aggregation'] == SEQ_SUM:
output_stats[field_name][stat] /= (
seq_set_size[field_name][stat]
)
elif output_config[stat]['aggregation'] == AVG_EXP:
output_stats[field_name][stat] = np.exp(
output_stats[field_name][stat] / set_size
)
elif output_config[stat]['aggregation'] == APPEND:
if len(output_stats[field_name][stat]) > 0 and len(
output_stats[field_name][stat][0].shape) > 1:
max_shape = None
for result in output_stats[field_name][stat]:
if max_shape is None:
max_shape = result.shape
else:
max_shape = np.maximum(
max_shape,
result.shape
)
results = []
for result in output_stats[field_name][stat]:
diff_shape = max_shape - np.array(result.shape)
diff_shape[0] = 0
pad_width = [(0, k) for k in diff_shape]
paded_result = np.pad(
result,
pad_width,
'constant',
constant_values=0
)
results.append(paded_result)
else:
results = output_stats[field_name][stat]
output_stats[field_name][stat] = np.concatenate(
results
)
if feature_type == SEQUENCE:
# trim output sequences
if LENGTHS in output_stats[field_name]:
lengths = output_stats[field_name][LENGTHS]
if PREDICTIONS in output_stats[field_name]:
output_stats[field_name][PREDICTIONS] = np.array(
[list(output_stats[field_name][PREDICTIONS][i,
0:lengths[i]])
for i in range(len(lengths))]
)
if PROBABILITIES in output_stats[field_name]:
output_stats[field_name][PROBABILITIES] = np.array(
[list(output_stats[field_name][PROBABILITIES][i,
0:lengths[i]]) for i in
range(len(lengths))]
)
if not only_predictions:
output_stats['combined'][LOSS] /= set_size
output_stats['combined'][ACCURACY] /= set_size
return output_stats
def check_progress_on_validation(
self,
progress_tracker,
validation_field,
validation_measure,
session, model_weights_path,
model_hyperparameters_path,
reduce_learning_rate_on_plateau,
reduce_learning_rate_on_plateau_patience,
reduce_learning_rate_on_plateau_rate,
increase_batch_size_on_plateau_patience,
increase_batch_size_on_plateau,
increase_batch_size_on_plateau_max,
increase_batch_size_on_plateau_rate,
early_stop,
skip_save_model
):
should_break = False
# record how long its been since an improvement
improved = get_improved_fun(validation_measure)
if improved(
progress_tracker.vali_stats[validation_field][
validation_measure][-1],
progress_tracker.best_valid_measure
):
progress_tracker.last_improvement_epoch = progress_tracker.epoch
progress_tracker.best_valid_measure = progress_tracker.vali_stats[
validation_field][validation_measure][-1]
if is_on_master():
if not skip_save_model:
self.save_weights(session, model_weights_path)
self.save_hyperparameters(
self.hyperparameters,
model_hyperparameters_path
)
logger.info(
'Validation {} on {} improved, model saved'.format(
validation_measure,
validation_field
)
)
progress_tracker.last_improvement = (
progress_tracker.epoch - progress_tracker.last_improvement_epoch
)
if progress_tracker.last_improvement != 0:
if is_on_master():
logger.info(
'Last improvement of {} on {} happened '
'{} epoch{} ago'.format(
validation_measure,
validation_field,
progress_tracker.last_improvement,
'' if progress_tracker.last_improvement == 1 else 's'
)
)
# ========== Reduce Learning Rate Plateau logic ========
if reduce_learning_rate_on_plateau > 0:
self.reduce_learning_rate(
progress_tracker,
reduce_learning_rate_on_plateau,
reduce_learning_rate_on_plateau_patience,
reduce_learning_rate_on_plateau_rate
)
# ========== Increase Batch Size Plateau logic =========
if increase_batch_size_on_plateau > 0:
self.increase_batch_size(
progress_tracker,
increase_batch_size_on_plateau_patience,
increase_batch_size_on_plateau,
increase_batch_size_on_plateau_max,
increase_batch_size_on_plateau_rate
)
# ========== Early Stop logic ==========
if early_stop > 0:
if progress_tracker.last_improvement >= early_stop:
if is_on_master():
logger.info(
"\nEARLY STOPPING due to lack of validation improvement"
", it has been {0} epochs since last validation "
"accuracy improvement\n".format(
progress_tracker.epoch -
progress_tracker.last_improvement_epoch
)
)
should_break = True
return should_break
def predict(
self,
dataset,
batch_size,
evaluate_performance=True,
gpus=None,
gpu_fraction=1,
**kwargs
):
if self.session is None:
session = self.initialize_session(gpus, gpu_fraction)
# load parameters
if self.weights_save_path:
self.restore(session, self.weights_save_path)
else:
session = self.session
# predict
predict_stats = self.batch_evaluation(
session,
dataset,
batch_size,
is_training=False,
collect_predictions=True,
only_predictions=not evaluate_performance
)
return predict_stats
def collect_activations(
self,
dataset,
tensor_names,
batch_size,
gpus=None,
gpu_fraction=1,
**kwargs
):
if self.session is None:
session = self.initialize_session(gpus, gpu_fraction)
# load parameters
if self.weights_save_path:
self.restore(session, self.weights_save_path)
else:
session = self.session
# get operation names
operation_names = set(
[t.name for op in self.graph.get_operations() for t in op.values()]
)
for tensor_name in tensor_names:
if tensor_name not in operation_names:
raise ValueError(
'Tensor / operation {} not present in the '
'model graph'.format(tensor_name)
)
# collect tensors
collected_tensors = self.batch_collect_activations(
session,
dataset,
batch_size,
tensor_names
)
return collected_tensors
def collect_weights(
self,
tensor_names,
gpus=None,
gpu_fraction=1,
**kwargs
):
if self.session is None:
session = self.initialize_session(gpus, gpu_fraction)
# load parameters
if self.weights_save_path:
self.restore(session, self.weights_save_path)
else:
session = self.session
operation_names = set(
[t.name for op in self.graph.get_operations() for t in op.values()]
)
for tensor_name in tensor_names:
if tensor_name not in operation_names:
raise ValueError(
'Tensor / operation {} not present in the '
'model graph'.format(tensor_name)
)
# collect tensors
collected_tensors = {
tensor_name: session.run(self.graph.get_tensor_by_name(tensor_name))
for tensor_name in tensor_names
}
return collected_tensors
def save_weights(self, session, save_path):
self.weights_save_path = self.saver.save(session, save_path)
def save_hyperparameters(self, hyperparameters, save_path):
# removing pretrained embeddings paths from hyperparameters
# because the weights are already saved in the model, no need to reload
# from their path when loading the model next time
local_hyperparamters = copy.deepcopy(hyperparameters)
for feature in (local_hyperparamters['input_features'] +
local_hyperparamters['output_features']):
if 'pretrained_embeddings' in feature:
feature['pretrained_embeddings'] = None
save_json(save_path, hyperparameters, sort_keys=True, indent=4)
def save_savedmodel(self, save_path):
input_tensors = {}
for input_feature in self.hyperparameters['input_features']:
input_tensors[input_feature['name']] = getattr(
self, input_feature['name']
)
output_tensors = {}
for output_feature in self.hyperparameters['output_features']:
output_tensors[output_feature['name']] = getattr(
self,
output_feature['name']
)
session = self.initialize_session()
builder = saved_model_builder.SavedModelBuilder(save_path)
builder.add_meta_graph_and_variables(
session,
[tf.saved_model.tag_constants.SERVING],
signature_def_map={
'predict': tf.saved_model.predict_signature_def(
input_tensors, output_tensors)
},
strip_default_attrs=True,
saver=self.saver,
)
builder.save()
def restore(self, session, weights_path):
self.saver.restore(session, weights_path)
@staticmethod
def load(load_path, use_horovod=False):
hyperparameter_file = os.path.join(
load_path,
MODEL_HYPERPARAMETERS_FILE_NAME
)
hyperparameters = load_json(hyperparameter_file)
model = Model(use_horovod=use_horovod, **hyperparameters)
model.weights_save_path = os.path.join(
load_path,
MODEL_WEIGHTS_FILE_NAME
)
return model
def set_epochs_to_1_or_quit(self, signum, frame):
if not self.received_sigint:
self.epochs = 1
self.received_sigint = True
logger.critical(
'\nReceived SIGINT, will finish this epoch and then conclude '
'the training'
)
logger.critical(
'Send another SIGINT to immediately interrupt the process'
)
else:
logger.critical('\nReceived a second SIGINT, will now quit')
sys.exit(1)
def quit_training(self, signum, frame):
logger.critical('Received SIGQUIT, will kill training')
sys.exit(1)
def resume_training(self, save_path, model_weights_path):
if is_on_master():
logger.info('Resuming training of model: {0}'.format(save_path))
self.weights_save_path = model_weights_path
progress_tracker = ProgressTracker.load(
os.path.join(
save_path,
TRAINING_PROGRESS_FILE_NAME
)
)
return progress_tracker
def initialize_training_stats(self, output_features):
train_stats = OrderedDict()
vali_stats = OrderedDict()
test_stats = OrderedDict()
for output_feature in output_features:
field_name = output_feature['name']
train_stats[field_name] = OrderedDict()
vali_stats[field_name] = OrderedDict()
test_stats[field_name] = OrderedDict()
output_config = output_type_registry[
output_feature['type']].output_config
for stat, config in output_config.items():
if config['type'] == MEASURE:
train_stats[field_name][stat] = []
vali_stats[field_name][stat] = []
test_stats[field_name][stat] = []
for stats in [train_stats, vali_stats, test_stats]:
stats['combined'] = {
LOSS: [],
ACCURACY: []
}
return train_stats, vali_stats, test_stats
def get_stat_names(self, output_features):
stat_names = {}
for output_feature in output_features:
field_name = output_feature['name']
output_config = output_type_registry[
output_feature['type']].output_config
for stat, config in output_config.items():
if config['type'] == MEASURE:
stats = stat_names.get(field_name, [])
stats.append(stat)
stat_names[field_name] = stats
stat_names['combined'] = [LOSS, ACCURACY]
return stat_names
def initialize_batcher(
self,
dataset,
batch_size=128,
bucketing_field=None,
should_shuffle=True,
ignore_last=False
):
if self.horovod:
batcher = DistributedBatcher(
dataset,
self.horovod.rank(),
self.horovod,
batch_size,
should_shuffle=should_shuffle,
ignore_last=ignore_last
)
elif bucketing_field is not None:
input_features = self.hyperparameters['input_features']
bucketing_feature = [
feature for feature in input_features if
feature['name'] == bucketing_field
]
if not bucketing_feature:
raise ValueError(
'Bucketing field {} not present in input features'.format(
bucketing_field
)
)
else:
bucketing_feature = bucketing_feature[0]
should_trim = bucketing_feature[
'encoder'] in dynamic_length_encoders
if 'preprocessing' in bucketing_feature:
trim_side = bucketing_feature['preprocessing']['padding']
else:
trim_side = self.hyperparameters['preprocessing'][
bucketing_feature['type']]['padding']
batcher = BucketedBatcher(
dataset,
bucketing_field=bucketing_field,
batch_size=batch_size,
buckets=10,
ignore_last=ignore_last,
should_shuffle=should_shuffle,
should_trim=should_trim,
trim_side=trim_side
)
else:
batcher = Batcher(
dataset,
batch_size,
should_shuffle=should_shuffle,
ignore_last=ignore_last
)
return batcher
def resume_session(
self,
session,
save_path,
model_weights_path,
model_weights_progress_path
):
num_matching_files = 0
pattern = re.compile(MODEL_WEIGHTS_PROGRESS_FILE_NAME)
for file_path in os.listdir(save_path):
if pattern.match(file_path):
num_matching_files += 1
if num_matching_files == 3:
self.restore(session, model_weights_progress_path)
else:
self.restore(session, model_weights_path)
def reduce_learning_rate(
self,
progress_tracker,
reduce_learning_rate_on_plateau,
reduce_learning_rate_on_plateau_patience,
reduce_learning_rate_on_plateau_rate
):
if (progress_tracker.last_improvement >=
reduce_learning_rate_on_plateau_patience):
if (progress_tracker.num_reductions_lr >=
reduce_learning_rate_on_plateau):
if is_on_master():
logger.info(
'It has been ' +
str(progress_tracker.last_improvement) +
' epochs since last validation accuracy improvement '
'and the learning rate was already reduced ' +
str(progress_tracker.num_reductions_lr) +
' times, not reducing it anymore'
)
else:
if is_on_master():
logger.info(
'PLATEAU REACHED, reducing learning rate '
'due to lack of validation improvement, it has been ' +
str(progress_tracker.last_improvement) +
' epochs since last validation accuracy improvement '
'or since the learning rate was reduced'
)
progress_tracker.learning_rate *= (
reduce_learning_rate_on_plateau_rate
)
progress_tracker.last_improvement_epoch = (
progress_tracker.epoch
)
progress_tracker.last_improvement = 0
progress_tracker.num_reductions_lr += 1
def increase_batch_size(
self,
progress_tracker,
increase_batch_size_on_plateau_patience,
increase_batch_size_on_plateau,
increase_batch_size_on_plateau_max,
increase_batch_size_on_plateau_rate
):
if (progress_tracker.last_improvement >=
increase_batch_size_on_plateau_patience):
if (progress_tracker.num_increases_bs >=
increase_batch_size_on_plateau):
if is_on_master():
logger.info(
'It has been ' +
str(progress_tracker.last_improvement) +
' epochs since last validation accuracy improvement '
'and the learning rate was already reduced ' +
str(progress_tracker.num_increases_bs) +
' times, not reducing it anymore'
)
elif (progress_tracker.batch_size ==
increase_batch_size_on_plateau_max):
if is_on_master():
logger.info(
'It has been' +
str(progress_tracker.last_improvement) +
' epochs since last validation accuracy improvement '
'and the batch size was already increased ' +
str(progress_tracker.num_increases_bs) +
' times and currently is ' +
str(progress_tracker.batch_size) +
', the maximum allowed'
)
else:
if is_on_master():
logger.info(
'PLATEAU REACHED '
'increasing batch size due to lack of '
'validation improvement, it has been ' +
str(progress_tracker.last_improvement) +
' epochs since last validation accuracy improvement '
'or since the batch size was increased'
)
progress_tracker.batch_size = min(
(increase_batch_size_on_plateau_rate *
progress_tracker.batch_size),
increase_batch_size_on_plateau_max
)
progress_tracker.last_improvement_epoch = progress_tracker.epoch
progress_tracker.last_improvement = 0
progress_tracker.num_increases_bs += 1
class ProgressTracker:
def __init__(
self,
epoch,
batch_size,
steps,
last_improvement_epoch,
best_valid_measure,
learning_rate,
num_reductions_lr,
num_increases_bs,
train_stats,
vali_stats,
test_stats,
last_improvement=0
):
self.batch_size = batch_size
self.epoch = epoch
self.steps = steps
self.last_improvement_epoch = last_improvement_epoch
self.last_improvement = last_improvement
self.learning_rate = learning_rate
self.best_valid_measure = best_valid_measure
self.num_reductions_lr = num_reductions_lr
self.num_increases_bs = num_increases_bs
self.train_stats = train_stats
self.vali_stats = vali_stats
self.test_stats = test_stats
def save(self, filepath):
save_json(filepath, self.__dict__)
@staticmethod
def load(filepath):
loaded = load_json(filepath)
return ProgressTracker(**loaded)
def load_model_and_definition(model_dir, use_horovod=False):
# Load model definition and weights
model_definition = load_json(
os.path.join(
model_dir,
MODEL_HYPERPARAMETERS_FILE_NAME
)
)
model = Model.load(model_dir, use_horovod=use_horovod)
return model, model_definition
| 37.414184 | 123 | 0.547092 |
5c6c42ad5c3061f29faf71c041bb36a37bc384bb | 2,485 | py | Python | vartoml/__init__.py | manfredlotz/vartoml | 402524c74dd65bc1bf41209220fa41804752122f | [
"MIT"
] | null | null | null | vartoml/__init__.py | manfredlotz/vartoml | 402524c74dd65bc1bf41209220fa41804752122f | [
"MIT"
] | null | null | null | vartoml/__init__.py | manfredlotz/vartoml | 402524c74dd65bc1bf41209220fa41804752122f | [
"MIT"
] | null | null | null | """vartoml - Enable variables in a TOML file"""
__version__ = '0.9.2'
__author__ = 'Manfred Lotz <manfred.lotz@posteo.de>'
# __all__ = []
import toml
import os
import re
from typing import List, Dict, Match, Any, MutableMapping
"""
According to the TOML specification (https://toml.io/en/v1.0.0-rc.1)
- naming rules for sections (aka tables) are the same as for keys
- keys may consist of ASCII letters, digits, underscores and dashes
Example:
database = "/var/db/mydb.db"
home_dir = "/home/johndoe"
db-port = 4711
_a = "hey"
-bla = "something"
1ab = true
"""
RE_VAR = re.compile(r"""
[$][{]
(
[a-zA-Z0-9_-]+ # section name
([:][a-zA-Z0-9_-]+)+ # variable name
)
[}]
""", re.VERBOSE)
class VarToml:
def __init__(self) -> None:
self.decoder = toml.TomlDecoder()
def load(self, *args, **kwargs):
self.data = toml.load(*args, **kwargs)
self._process(self.data)
def loads(self, *args, **kwargs):
self.data = toml.loads(*args, **kwargs)
self._process(self.data)
def _var_replace(self, x):
toml_var = x.groups()[0]
lst = toml_var.split(':')
val = self.data[lst[0]]
for v in lst[1:]:
val = val[v]
return str(val)
def get(self, *args):
gotten = self.data
for arg in args:
gotten = gotten[arg]
return gotten
def dict(self):
return self.data
def _process(self, item):
iter_ = None
if isinstance(item, dict):
iter_ = item.items()
elif isinstance(item, list):
iter_ = enumerate(item)
for i, val in iter_:
if isinstance(val, (dict, list)):
self._process(val)
elif isinstance(val, str):
if re.search(RE_VAR, val):
r = re.sub(RE_VAR, self._var_replace, val)
# Try to first load the value from the variable contents
# (i.e. make what seems like a float a float, what seems like a
# boolean a bool and so on). If that fails, fail back to
# string.
try:
item[i], _ = self.decoder.load_value(r)
continue
except ValueError:
pass
item[i], _ = self.decoder.load_value('"{}"'.format(r))
| 26.157895 | 83 | 0.521529 |
a9e9864642e5eb7ae4e9a58db8dd2a93ddbe2a64 | 3,684 | py | Python | pbr-0.5.19-py2.7.egg/pbr/d2to1/tests/test_hooks.py | Rashminadig/SDN | 9945f93156ca488bcad9b95c298d7ddc90873a87 | [
"Apache-2.0"
] | null | null | null | pbr-0.5.19-py2.7.egg/pbr/d2to1/tests/test_hooks.py | Rashminadig/SDN | 9945f93156ca488bcad9b95c298d7ddc90873a87 | [
"Apache-2.0"
] | null | null | null | pbr-0.5.19-py2.7.egg/pbr/d2to1/tests/test_hooks.py | Rashminadig/SDN | 9945f93156ca488bcad9b95c298d7ddc90873a87 | [
"Apache-2.0"
] | 1 | 2019-02-04T21:41:00.000Z | 2019-02-04T21:41:00.000Z | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (C) 2013 Association of Universities for Research in Astronomy
# (AURA)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of AURA and its representatives may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY AURA ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL AURA BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
import os
import textwrap
from pbr.d2to1 import tests
from pbr.d2to1.tests import util
class TestHooks(tests.D2to1TestCase):
def setUp(self):
super(TestHooks, self).setUp()
with util.open_config(
os.path.join(self.package_dir, 'setup.cfg')) as cfg:
cfg.set('global', 'setup-hooks',
'd2to1_testpackage._setup_hooks.test_hook_1\n'
'd2to1_testpackage._setup_hooks.test_hook_2')
cfg.set('build_ext', 'pre-hook.test_pre_hook',
'd2to1_testpackage._setup_hooks.test_pre_hook')
cfg.set('build_ext', 'post-hook.test_post_hook',
'd2to1_testpackage._setup_hooks.test_post_hook')
def test_global_setup_hooks(self):
"""Test setup_hooks.
Test that setup_hooks listed in the [global] section of setup.cfg are
executed in order.
"""
stdout, _, return_code = self.run_setup('egg_info')
assert 'test_hook_1\ntest_hook_2' in stdout
assert return_code == 0
def test_command_hooks(self):
"""Test command hooks.
Simple test that the appropriate command hooks run at the
beginning/end of the appropriate command.
"""
stdout, _, return_code = self.run_setup('egg_info')
assert 'build_ext pre-hook' not in stdout
assert 'build_ext post-hook' not in stdout
assert return_code == 0
stdout, _, return_code = self.run_setup('build_ext')
assert textwrap.dedent("""
running build_ext
running pre_hook d2to1_testpackage._setup_hooks.test_pre_hook for command build_ext
build_ext pre-hook
""") in stdout # flake8: noqa
assert stdout.endswith('build_ext post-hook')
assert return_code == 0
| 39.191489 | 95 | 0.690554 |
1679747124948cf34e303aa08f75709de63b1530 | 257 | py | Python | ncei/src/data_bundle.py | DataONEorg/d1_ncei_adapter | 34dd4ed9d581d259a70d7c9a884f520226dd2691 | [
"Apache-2.0"
] | 1 | 2019-06-19T02:41:02.000Z | 2019-06-19T02:41:02.000Z | ncei/src/data_bundle.py | DataONEorg/d1_ncei_adapter | 34dd4ed9d581d259a70d7c9a884f520226dd2691 | [
"Apache-2.0"
] | 7 | 2019-06-24T20:21:51.000Z | 2022-01-07T13:06:07.000Z | ncei/src/data_bundle.py | DataONEorg/d1_ncei_adapter | 34dd4ed9d581d259a70d7c9a884f520226dd2691 | [
"Apache-2.0"
] | 3 | 2017-04-17T13:24:20.000Z | 2019-05-28T18:32:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""":Mod: data_bundle
:Synopsis:
:Author:
servilla
:Created:
3/4/16
"""
import logging
logger = logging.getLogger('data_bundle')
def main():
return 0
if __name__ == "__main__":
main() | 9.884615 | 41 | 0.599222 |
572a6d297c3652254b0307c454346bbfeec1cd4e | 4,253 | py | Python | mod_shift/ModShiftWrapper.py | ModShift/ModShift | 13a38dd573a413e3122a35f4424c539773ee252e | [
"MIT"
] | null | null | null | mod_shift/ModShiftWrapper.py | ModShift/ModShift | 13a38dd573a413e3122a35f4424c539773ee252e | [
"MIT"
] | null | null | null | mod_shift/ModShiftWrapper.py | ModShift/ModShift | 13a38dd573a413e3122a35f4424c539773ee252e | [
"MIT"
] | 1 | 2019-11-22T08:21:46.000Z | 2019-11-22T08:21:46.000Z | import sys
import time
sys.path.append("..")
from .ModShift import ModShift
from tqdm import tqdm
import numpy as np
import os
from utils.obtain_hard_clusters import obtain_hard_clusters
from utils.metrics import compute_metrics
class ModShiftWrapper(object):
def __init__(self, config, dataset, target_dir):
if dataset == "CREMI":
self.betas = config["CREMI"]["betas"]
self.thresholds = config["CREMI"]["thresholds"]
elif dataset == "ISBI":
self.betas = config["ISBI"]["betas"]
self.thresholds = config["ISBI"]["thresholds"]
else:
print("Invalid dataset {} provided.".format(dataset))
self.config = config["ModShift"]
self.n_iter = config["ModShift"]["optimization"]["iterations"]
self.target_dir = target_dir
if not os.path.exists(target_dir):
try:
os.makedirs(target_dir)
except OSError:
print("Creation of the directory %s failed" % self.target_dir)
def run(self, data, gt, repeats=1, timed=False, downsample=1):
if timed:
assert len(self.betas) == 1 and len(self.thresholds) == 1
times = []
for repeat in tqdm(range(repeats), desc="Runs"):
results_list = []
for beta in tqdm(self.betas, desc="Processing beta", leave=False):
self.config["weight_func"]["beta"] = beta
ModShifter = ModShift(data, self.config)
time_before_run = time.perf_counter()
ModShifter.run(self.n_iter)
time_after_run = time.perf_counter()
trajectories = ModShifter.trajectories
np.save(os.path.join(self.target_dir, f"mod_shift_trajectories_beta_{beta}_run_{repeat}_"+\
f"downsample_{downsample}.npy"),
trajectories)
time_before_hard_cluster = time.perf_counter()
labels = obtain_hard_clusters(np.moveaxis(trajectories[-1], 1, -1),
[threshold if not (threshold == "same") else beta for threshold in self.thresholds])
time_after_hard_cluster = time.perf_counter()
total_time = time_after_run - time_before_run + time_after_hard_cluster - time_before_hard_cluster
times.append(total_time)
# compute scores, save labels and scores for each threshold
for i, threshold in enumerate(self.thresholds):
np.save(os.path.join(self.target_dir, f"mod_shift_labels_beta_{beta}_threshold_{threshold}_" + \
f"run_{repeat}_downsample_{downsample}.npy"),
labels[:, i, ...])
# compute metrics
results = {"parameters": {"beta": beta, "threshold": threshold},
"scores": compute_metrics(labels[:, i, ...], gt.copy())}
# save results
np.save(os.path.join(self.target_dir,
f"mod_shift_scores_beta_{beta}_threshold_{threshold}_run_{repeat}_"+
f"downsample_{downsample}.npy"),
np.array([results["scores"]["CREMI_score"],
results["scores"]["arand"],
results["scores"]["voi"][0],
results["scores"]["voi"][1]
]))
results_list.append(results)
tqdm.write("Done with beta {}".format(beta))
times = np.array(times)
if timed:
print(f"Mean time: {times.mean(0)})")
print(f"Std dev time: {times.std(0)}")
np.save(os.path.join(self.target_dir,
f"mod_shift_times_beta_{self.betas[0]}_threshold_{self.thresholds[0]}_"+
f"repeats_{repeats}_downsample_{downsample}.npy"),
times)
return sorted(results_list, key=lambda x: x["scores"]["CREMI_score"])[0]
| 45.731183 | 130 | 0.531154 |
095570d04ca5a6835435d3bf1eacaa08721b7666 | 5,227 | py | Python | tests/acceptance/test_emails.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | 4 | 2016-03-16T07:21:36.000Z | 2017-09-04T07:29:56.000Z | tests/acceptance/test_emails.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | null | null | null | tests/acceptance/test_emails.py | mitsuhiko/sentry | cddc3b643a13b52ac6d07ff22e4bd5d69ecbad90 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import absolute_import
from urllib import urlencode
from sentry.testutils import AcceptanceTestCase
class EmailTestCase(AcceptanceTestCase):
def setUp(self):
super(EmailTestCase, self).setUp()
self.user = self.create_user('foo@example.com')
self.login_as(self.user)
def build_url(self, path, format='html'):
return u'{}?{}'.format(
path,
urlencode({
'format': format,
'seed': '123',
}),
)
def test_assigned_html(self):
self.browser.get(self.build_url('/debug/mail/assigned/'))
self.browser.wait_until('#preview')
self.browser.snapshot('assigned email html')
def test_assigned_txt(self):
self.browser.get(self.build_url('/debug/mail/assigned/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('assigned email txt')
def test_assigned_self_html(self):
self.browser.get(self.build_url('/debug/mail/assigned/self/'))
self.browser.wait_until('#preview')
self.browser.snapshot('assigned_self email html')
def test_assigned_self_txt(self):
self.browser.get(self.build_url('/debug/mail/assigned/self/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('assigned_self email txt')
def test_note_html(self):
self.browser.get(self.build_url('/debug/mail/note/'))
self.browser.wait_until('#preview')
self.browser.snapshot('note email html')
def test_note_txt(self):
self.browser.get(self.build_url('/debug/mail/note/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('note email txt')
def test_regression_html(self):
self.browser.get(self.build_url('/debug/mail/regression/'))
self.browser.wait_until('#preview')
self.browser.snapshot('regression email html')
def test_regression_txt(self):
self.browser.get(self.build_url('/debug/mail/regression/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('regression email txt')
def test_regression_with_version_html(self):
self.browser.get(self.build_url('/debug/mail/regression/release/'))
self.browser.wait_until('#preview')
self.browser.snapshot('regression_with_version email html')
def test_regression_with_version_txt(self):
self.browser.get(self.build_url('/debug/mail/regression/release/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('regression_with_version email txt')
def test_resolved_html(self):
self.browser.get(self.build_url('/debug/mail/resolved/'))
self.browser.wait_until('#preview')
self.browser.snapshot('resolved email html')
def test_resolved_txt(self):
self.browser.get(self.build_url('/debug/mail/resolved/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('resolved email txt')
def test_resolved_in_release_html(self):
self.browser.get(self.build_url('/debug/mail/resolved-in-release/'))
self.browser.wait_until('#preview')
self.browser.snapshot('resolved_in_release email html')
def test_resolved_in_release_txt(self):
self.browser.get(self.build_url('/debug/mail/resolved-in-release/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('resolved_in_release email txt')
def test_resolved_in_release_upcoming_html(self):
self.browser.get(self.build_url('/debug/mail/resolved-in-release/upcoming/'))
self.browser.wait_until('#preview')
self.browser.snapshot('resolved_in_release_upcoming email html')
def test_resolved_in_release_upcoming_txt(self):
self.browser.get(self.build_url('/debug/mail/resolved-in-release/upcoming/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('resolved_in_release_upcoming email txt')
def test_unassigned_html(self):
self.browser.get(self.build_url('/debug/mail/unassigned/'))
self.browser.wait_until('#preview')
self.browser.snapshot('unassigned email html')
def test_unassigned_txt(self):
self.browser.get(self.build_url('/debug/mail/unassigned/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('unassigned email txt')
def test_new_event_html(self):
self.browser.get(self.build_url('/debug/mail/new-event/'))
self.browser.wait_until('#preview')
self.browser.snapshot('new event email html')
def test_new_event_txt(self):
self.browser.get(self.build_url('/debug/mail/new-event/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('new event email txt')
def test_digest_html(self):
self.browser.get(self.build_url('/debug/mail/digest/'))
self.browser.wait_until('#preview')
self.browser.snapshot('digest email html')
def test_digest_txt(self):
self.browser.get(self.build_url('/debug/mail/digest/', 'txt'))
self.browser.wait_until('#preview')
self.browser.snapshot('digest email txt')
| 39.598485 | 92 | 0.673044 |
cc2f15a054dc3b94d0f8f0bfe58a6cc93195bbff | 26,304 | py | Python | Form/ModiaWindow.py | mokyue/Modia-Player | 1e87c86eeea1e379025d797bbdabf744d3725b5e | [
"Unlicense"
] | null | null | null | Form/ModiaWindow.py | mokyue/Modia-Player | 1e87c86eeea1e379025d797bbdabf744d3725b5e | [
"Unlicense"
] | null | null | null | Form/ModiaWindow.py | mokyue/Modia-Player | 1e87c86eeea1e379025d797bbdabf744d3725b5e | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
from PyQt4.QtCore import SIGNAL, QObject, SLOT, pyqtSlot, QRect, QPoint, QSize, Qt
from PyQt4.QtGui import QMainWindow, QPixmap, QApplication, QSizePolicy, QHBoxLayout, QVBoxLayout, QGridLayout, \
QTableWidget, QFrame, QAbstractItemView, QIcon, QPainter, QBrush, QColor, QFont
from PyQt4.phonon import Phonon
from Core.AudioManager import AudioManager
from Core.Enum import Enum
from Core.MStyleSetter import MStyleSetter
from Widget.MActionBar import MActionBar
from Widget.MButton import MButton
from Widget.MFrame import MFrame
from Widget.MLyricPanel import MLyricPanel
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
class ModiaWindow(QMainWindow):
CursorLocation = Enum(
['WINDOW_LEFT', 'WINDOW_RIGHT', 'WINDOW_TOP', 'WINDOW_BOTTOM', 'WINDOW_LEFT_TOP', 'WINDOW_RIGHT_TOP',
'WINDOW_LEFT_BOTTOM', 'WINDOW_RIGHT_BOTTOM', 'WINDOW_TITLE_BAR', 'SCREEN_LEFT', 'SCREEN_RIGHT', 'SCREEN_TOP'])
StickType = Enum(['LEFT', 'RIGHT', 'FULL_SCREEN'])
def __init__(self, parent=None):
super(ModiaWindow, self).__init__(parent)
self.__pic_bg = QPixmap(':resource')
self.WIDTH_MIN = 721
self.HEIGHT_MIN = 500
self.WIDTH_DEFAULT = 721
self.HEIGHT_DEFAULT = 599
self.WIDTH_BORDER_TOP = 28
self.WIDTH_BORDER_RIGHT = 12
self.WIDTH_BORDER_BOTTOM = 14
self.WIDTH_BORDER_LEFT = 12
self.OFFSET_BORDER_TOP = 6
self.OFFSET_BORDER_RIGHT = 8
self.OFFSET_BORDER_BOTTOM = 10
self.OFFSET_BORDER_LEFT = 8
self.WIDTH_FRAME_LEFT = 360
self.__cursor_loc = None
self.__is_fixed_size = False
self.__is_fixed_width = False
self.__is_fixed_height = False
self.__is_maximized = False
self.__is_zdt = False
self.__is_sticking = False
self.__is_sticked = False
self.__lyric_shown = True
self.__is_suspended = True
self.__move_point = None
self.__cursor_changed = False
self.__mouse_pressed = False
self.__window_resizing = False
self.__setup_ui()
self.__geometry_frame = self.geometry()
self.__register_actions()
self.__audio_manager = AudioManager(self, self.lyric_panel)
def __setup_ui(self):
self.setWindowTitle(QApplication.applicationName())
self.setWindowFlags(Qt.FramelessWindowHint | Qt.WindowMinMaxButtonsHint)
self.setWindowModality(Qt.WindowModal)
self.setAttribute(Qt.WA_TranslucentBackground)
self.setMouseTracking(True)
self.setAnimated(True)
self.setMinimumSize(self.WIDTH_MIN + self.OFFSET_BORDER_RIGHT + self.OFFSET_BORDER_LEFT,
self.HEIGHT_MIN + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM)
self.resize(self.WIDTH_DEFAULT + self.OFFSET_BORDER_RIGHT + self.OFFSET_BORDER_LEFT,
self.HEIGHT_DEFAULT + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM)
self.setWindowIcon(QIcon(':logo'))
# Title bar initialization start.
self.__btn_title_close = MButton(self, MButton.Type.Close)
self.__btn_title_close.setGeometry(14, 11, 12, 13)
self.__btn_title_close.setToolTip('退出')
self.__btn_title_maximize = MButton(self, MButton.Type.Maximize)
self.__btn_title_maximize.setGeometry(self.__btn_title_close.x() + 16, 11, 12, 13)
self.__btn_title_maximize.setToolTip('最大化')
self.__btn_title_minimize = MButton(self, MButton.Type.Minimize)
self.__btn_title_minimize.setGeometry(self.__btn_title_maximize.x() + 16, 11, 12, 13)
self.__btn_title_minimize.setToolTip('最小化')
self.frame = MFrame(self)
horizontal_layout = QHBoxLayout(self.frame)
horizontal_layout.setContentsMargins(0, 0, 4, 0)
horizontal_layout.setSpacing(5)
# Left panel initialization start.
frame_main_panel = MFrame(self.frame)
size_policy_v_expand = QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Expanding)
size_policy_v_expand.setHorizontalStretch(0)
size_policy_v_expand.setVerticalStretch(0)
size_policy_v_expand.setHeightForWidth(frame_main_panel.sizePolicy().hasHeightForWidth())
frame_main_panel.setSizePolicy(size_policy_v_expand)
frame_main_panel.setMinimumSize(self.WIDTH_FRAME_LEFT, 0)
horizontal_layout.addWidget(frame_main_panel)
verticalLayout = QVBoxLayout(frame_main_panel)
verticalLayout.setContentsMargins(0, 0, 0, 0)
verticalLayout.setSpacing(0)
self.__action_bar = MActionBar(frame_main_panel)
size_policy_h_expand = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)
size_policy_h_expand.setHorizontalStretch(0)
size_policy_h_expand.setVerticalStretch(0)
size_policy_h_expand.setHeightForWidth(self.__action_bar.sizePolicy().hasHeightForWidth())
self.__action_bar.setSizePolicy(size_policy_h_expand)
self.__action_bar.setMinimumSize(0, 136)
verticalLayout.addWidget(self.__action_bar)
frame_music_list = MFrame(frame_main_panel)
size_policy_all_expand = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
size_policy_all_expand.setHorizontalStretch(0)
size_policy_all_expand.setVerticalStretch(0)
size_policy_all_expand.setHeightForWidth(frame_music_list.sizePolicy().hasHeightForWidth())
frame_music_list.setSizePolicy(size_policy_all_expand)
verticalLayout.addWidget(frame_music_list)
gridLayout = QGridLayout(frame_music_list)
gridLayout.setContentsMargins(9, 2, 9, 5)
self.__music_table = QTableWidget(0, 2, frame_music_list)
self.__music_table.setFrameShape(QFrame.StyledPanel)
self.__music_table.setHorizontalHeaderLabels(('标题', '时长'))
self.__music_table.setSelectionMode(QAbstractItemView.SingleSelection)
self.__music_table.setSelectionBehavior(QAbstractItemView.SelectRows)
self.__music_table.horizontalHeader().setVisible(False)
self.__music_table.verticalHeader().setVisible(False)
self.__music_table.setColumnWidth(0, 290)
self.__music_table.setColumnWidth(1, 50)
MStyleSetter.setStyle(self.__music_table, ':qss_tbl_music_list')
gridLayout.addWidget(self.__music_table, 0, 0, 1, 1)
# Lyric panel initialization start.
self.lyric_panel = MLyricPanel(self.frame)
size_policy_all_expand.setHeightForWidth(self.lyric_panel.sizePolicy().hasHeightForWidth())
self.lyric_panel.setSizePolicy(size_policy_all_expand)
horizontal_layout.addWidget(self.lyric_panel)
def __register_actions(self):
QObject.connect(self.__btn_title_close, SIGNAL('clicked()'), self, SLOT('close()'))
QObject.connect(self.__btn_title_maximize, SIGNAL('clicked()'), self, SLOT('showMaximized()'))
QObject.connect(self.__btn_title_minimize, SIGNAL('clicked()'), self, SLOT('showMinimized()'))
QObject.connect(self.__action_bar.get_widget('BTN_PREVIOUS'), SIGNAL('clicked()'), self, SLOT('__previous()'))
QObject.connect(self.__action_bar.get_widget('BTN_NEXT'), SIGNAL('clicked()'), self, SLOT('__next()'))
QObject.connect(self.__action_bar.get_widget('BTN_PLAY_PAUSE'), SIGNAL('clicked()'), self,
SLOT('__play_pause()'))
QObject.connect(self.__action_bar.get_widget('BTN_STOP'), SIGNAL('clicked()'), self, SLOT('__stop()'))
QObject.connect(self.__action_bar.get_widget('BTN_LYRIC'), SIGNAL('clicked()'), self,
SLOT('__show_hide_lyric()'))
QObject.connect(self.__action_bar.get_widget('BTN_ADD_MUSIC'), SIGNAL('clicked()'), self,
SLOT('__add_music()'))
self.__music_table.cellDoubleClicked.connect(self.__cell_double_clicked)
@pyqtSlot()
def __previous(self):
self.__audio_manager.previous()
@pyqtSlot()
def __next(self):
self.__audio_manager.next()
@pyqtSlot()
def __play_pause(self):
if self.__is_suspended:
self.__audio_manager.play()
else:
self.__audio_manager.pause()
@pyqtSlot()
def __stop(self):
self.__audio_manager.stop()
def __cell_double_clicked(self, row, column):
self.__stop()
self.__audio_manager.clearQueue()
self.__audio_manager.setCurrentSourceByIndex(row)
if self.__audio_manager.getMediaObjectState() == Phonon.PlayingState:
self.__audio_manager.stop()
else:
self.__audio_manager.play()
@pyqtSlot()
def __show_hide_lyric(self):
if self.__lyric_shown:
self.__action_bar.get_widget('BTN_LYRIC').setMStyle(MButton.Type.Show_Lyric)
self.__geometry_frame = self.geometry()
self.setMinimumSize(360, self.HEIGHT_MIN + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM)
self.resize(376, self.height())
self.__is_fixed_size = True
else:
self.__action_bar.get_widget('BTN_LYRIC').setMStyle(MButton.Type.Hide_Lyric)
self.resize(self.__geometry_frame.width(), self.__geometry_frame.height())
self.setMinimumSize(self.WIDTH_MIN + self.OFFSET_BORDER_RIGHT + self.OFFSET_BORDER_LEFT,
self.HEIGHT_MIN + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM)
self.__is_fixed_size = False
self.__lyric_shown = not self.__lyric_shown
@pyqtSlot()
def __add_music(self):
self.__audio_manager.addMusic()
def getActionBar(self):
return self.__action_bar
def getMusicTable(self):
return self.__music_table
def __get_cursor_location(self, event):
if event.globalX() == 0:
return self.CursorLocation.SCREEN_LEFT
if QApplication.desktop().screenGeometry().width() - event.globalX() == 1:
return self.CursorLocation.SCREEN_RIGHT
if event.globalY() == 0:
return self.CursorLocation.SCREEN_TOP
if event.pos().y() < 27 and event.pos().y() - 2 > self.OFFSET_BORDER_TOP and event.pos().x() + self.OFFSET_BORDER_RIGHT < self.width() and event.pos().x() + 1 > self.OFFSET_BORDER_LEFT:
return self.CursorLocation.WINDOW_TITLE_BAR
permissible_var = 2
if event.pos().x() + permissible_var >= self.OFFSET_BORDER_LEFT and event.pos().x() - permissible_var <= self.OFFSET_BORDER_LEFT:
if event.pos().y() + self.OFFSET_BORDER_BOTTOM + permissible_var >= self.rect().bottom() and event.pos().y() + self.OFFSET_BORDER_BOTTOM - permissible_var <= self.rect().bottom():
return self.CursorLocation.WINDOW_LEFT_BOTTOM
if event.pos().y() + permissible_var >= self.OFFSET_BORDER_TOP and event.pos().y() - permissible_var <= self.OFFSET_BORDER_TOP:
return self.CursorLocation.WINDOW_LEFT_TOP
return self.CursorLocation.WINDOW_LEFT
if event.pos().x() + self.OFFSET_BORDER_RIGHT + permissible_var >= self.rect().right() and event.pos().x() + self.OFFSET_BORDER_RIGHT - permissible_var <= self.rect().right():
if event.pos().y() + self.OFFSET_BORDER_BOTTOM + permissible_var >= self.rect().bottom() and event.pos().y() + self.OFFSET_BORDER_BOTTOM - permissible_var <= self.rect().bottom():
return self.CursorLocation.WINDOW_RIGHT_BOTTOM
if event.pos().y() + permissible_var >= self.OFFSET_BORDER_TOP and event.pos().y() - permissible_var <= self.OFFSET_BORDER_TOP:
return self.CursorLocation.WINDOW_RIGHT_TOP
return self.CursorLocation.WINDOW_RIGHT
if event.pos().y() + permissible_var >= self.OFFSET_BORDER_TOP and event.pos().y() - permissible_var <= self.OFFSET_BORDER_TOP:
return self.CursorLocation.WINDOW_TOP
if event.pos().y() + self.OFFSET_BORDER_BOTTOM + permissible_var >= self.rect().bottom() and event.pos().y() + self.OFFSET_BORDER_BOTTOM - permissible_var <= self.rect().bottom():
return self.CursorLocation.WINDOW_BOTTOM
return -1
def __set_cursor_shape(self, flag):
if self.__is_fixed_size:
return
if flag == self.CursorLocation.WINDOW_LEFT or flag == self.CursorLocation.WINDOW_RIGHT:
if self.__is_fixed_width:
self.setCursor(Qt.ArrowCursor)
return False
self.setCursor(Qt.SizeHorCursor)
return True
if flag == self.CursorLocation.WINDOW_TOP or flag == self.CursorLocation.WINDOW_BOTTOM:
if self.__is_fixed_height:
self.setCursor(Qt.ArrowCursor)
return False
self.setCursor(Qt.SizeVerCursor)
return True
if self.__is_fixed_width or self.__is_fixed_height:
self.setCursor(Qt.ArrowCursor)
return False
if flag == self.CursorLocation.WINDOW_LEFT_TOP or flag == self.CursorLocation.WINDOW_RIGHT_BOTTOM:
self.setCursor(Qt.SizeFDiagCursor)
return True
if flag == self.CursorLocation.WINDOW_RIGHT_TOP or flag == self.CursorLocation.WINDOW_LEFT_BOTTOM:
self.setCursor(Qt.SizeBDiagCursor)
return True
self.setCursor(Qt.ArrowCursor)
return False
def __change_window_size(self, flag, pos_global):
pos_global = self.mapToParent(pos_global)
global_x = pos_global.x() - self.OFFSET_BORDER_LEFT
global_y = pos_global.y() - self.OFFSET_BORDER_TOP
widget_x = self.pos().x()
widget_y = self.pos().y()
length_l = widget_x + self.width() - global_x
length_r = global_x - widget_x + 1 + self.OFFSET_BORDER_RIGHT + self.OFFSET_BORDER_LEFT
length_t = widget_y + self.height() - global_y
length_b = global_y - widget_y + 1 + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM
if length_l <= self.WIDTH_MIN:
global_x = self.pos().x()
if length_t <= self.HEIGHT_MIN:
global_y = self.pos().y()
if flag == self.CursorLocation.WINDOW_LEFT:
self.setGeometry(global_x, widget_y, length_l, self.height())
self.__geometry_frame = self.geometry()
return
if flag == self.CursorLocation.WINDOW_RIGHT:
self.setGeometry(widget_x, widget_y, length_r, self.height())
self.__geometry_frame = self.geometry()
return
if flag == self.CursorLocation.WINDOW_TOP:
self.setGeometry(widget_x, global_y, self.width(), length_t)
self.__geometry_frame = self.geometry()
return
if flag == self.CursorLocation.WINDOW_BOTTOM:
self.setGeometry(widget_x, widget_y, self.width(), length_b)
self.__geometry_frame = self.geometry()
return
if flag == self.CursorLocation.WINDOW_LEFT_TOP:
self.setGeometry(global_x, global_y, length_l, length_t)
self.__geometry_frame = self.geometry()
return
if flag == self.CursorLocation.WINDOW_LEFT_BOTTOM:
self.setGeometry(global_x, widget_y, length_l, length_b)
self.__geometry_frame = self.geometry()
return
if flag == self.CursorLocation.WINDOW_RIGHT_TOP:
self.setGeometry(widget_x, global_y, length_r, length_t)
self.__geometry_frame = self.geometry()
return
if flag == self.CursorLocation.WINDOW_RIGHT_BOTTOM:
self.setGeometry(widget_x, widget_y, length_r, length_b)
self.__geometry_frame = self.geometry()
return
def __switch_max_button(self, b_style):
self.__btn_title_maximize.setMStyle(b_style)
if b_style == MButton.Type.Maximize:
self.__btn_title_maximize.setToolTip('最大化')
return
if b_style == MButton.Type.Restore:
self.__btn_title_maximize.setToolTip('向下还原')
return
def __window_stick_to(self, s_type):
if self.__is_sticking:
return
if s_type == self.StickType.LEFT:
geometry_tgt = QRect(-self.OFFSET_BORDER_LEFT, -self.OFFSET_BORDER_TOP,
QApplication.desktop().screenGeometry().width() / 2 + self.OFFSET_BORDER_LEFT + self.OFFSET_BORDER_RIGHT,
QApplication.desktop().availableGeometry().height() + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM)
if self.geometry() != geometry_tgt:
self.setGeometry(geometry_tgt)
self.__is_sticking = True
return
if s_type == self.StickType.RIGHT:
geometry_tgt = QRect(QApplication.desktop().screenGeometry().width() / 2 - self.OFFSET_BORDER_LEFT,
-self.OFFSET_BORDER_TOP,
QApplication.desktop().screenGeometry().width() / 2 + self.OFFSET_BORDER_LEFT + self.OFFSET_BORDER_RIGHT,
QApplication.desktop().availableGeometry().height() + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM)
if self.geometry() != geometry_tgt:
self.setGeometry(geometry_tgt)
self.__is_sticking = True
return
if s_type == self.StickType.FULL_SCREEN:
self.setGeometry(- self.OFFSET_BORDER_LEFT, - self.OFFSET_BORDER_TOP,
QApplication.desktop().availableGeometry().width() + self.OFFSET_BORDER_RIGHT + self.OFFSET_BORDER_LEFT,
QApplication.desktop().availableGeometry().height() + self.OFFSET_BORDER_TOP + self.OFFSET_BORDER_BOTTOM)
self.__is_sticking = True
return
def paintEvent(self, *args, **kwargs):
painter = QPainter(self)
painter.drawPixmap(0, 0, 12, self.WIDTH_BORDER_TOP, self.__pic_bg, 0, 0, 12, self.WIDTH_BORDER_TOP)
painter.drawPixmap(self.WIDTH_BORDER_LEFT, 0, self.width() - self.WIDTH_BORDER_RIGHT - self.WIDTH_BORDER_LEFT,
self.WIDTH_BORDER_TOP, self.__pic_bg, 12, 0, 1, self.WIDTH_BORDER_TOP)
painter.drawPixmap(self.width() - self.WIDTH_BORDER_RIGHT, 0, self.__pic_bg, 13, 0, 12, self.WIDTH_BORDER_TOP)
painter.drawPixmap(0, self.height() - self.WIDTH_BORDER_BOTTOM, self.__pic_bg, 0, 90, 12, 14)
painter.drawPixmap(0, self.WIDTH_BORDER_TOP, self.WIDTH_BORDER_LEFT,
self.height() - self.WIDTH_BORDER_BOTTOM - self.WIDTH_BORDER_TOP, self.__pic_bg, 0, 89, 12,
1)
painter.drawPixmap(self.width() - self.WIDTH_BORDER_RIGHT, self.WIDTH_BORDER_TOP, self.WIDTH_BORDER_LEFT,
self.height() - self.WIDTH_BORDER_BOTTOM - self.WIDTH_BORDER_TOP, self.__pic_bg, 13, 89, 12,
1)
painter.drawPixmap(self.WIDTH_BORDER_LEFT, self.height() - self.WIDTH_BORDER_BOTTOM,
self.width() - self.WIDTH_BORDER_RIGHT - self.WIDTH_BORDER_LEFT, self.WIDTH_BORDER_BOTTOM,
self.__pic_bg, 12, 90, 1, 14)
painter.drawPixmap(self.width() - self.WIDTH_BORDER_RIGHT, self.height() - self.WIDTH_BORDER_BOTTOM,
self.__pic_bg, 13, 90, 12, 14)
painter.fillRect(self.WIDTH_BORDER_LEFT - 4, self.WIDTH_BORDER_TOP,
self.width() - self.WIDTH_BORDER_LEFT - self.WIDTH_BORDER_RIGHT + 8,
self.height() - self.WIDTH_BORDER_BOTTOM - self.WIDTH_BORDER_TOP,
QBrush(QColor(255, 255, 255)))
painter.setFont(QFont('Microsoft Yahei', 8, QFont.Bold))
painter.setPen(QColor(250, 250, 250, 220))
painter.drawText(1, 5, self.width(), 27, Qt.AlignHCenter | Qt.AlignVCenter, self.windowTitle())
painter.setPen(QColor(50, 50, 50, 255))
painter.drawText(0, 4, self.width(), 27, Qt.AlignHCenter | Qt.AlignVCenter, self.windowTitle())
painter.setPen(QColor(142, 142, 142, 255))
if self.width() > 380:
painter.drawLine(self.WIDTH_FRAME_LEFT + self.OFFSET_BORDER_LEFT, self.OFFSET_BORDER_TOP + 22,
self.WIDTH_FRAME_LEFT + self.OFFSET_BORDER_LEFT,
self.height() - self.OFFSET_BORDER_BOTTOM - 1)
@pyqtSlot()
def showMaximized(self):
if self.__is_sticked:
return
if self.__is_maximized:
self.setGeometry(self.__geometry_frame)
self.__switch_max_button(MButton.Type.Maximize)
self.__is_maximized = False
else:
self.__geometry_frame = self.geometry()
self.__window_stick_to(self.StickType.FULL_SCREEN)
self.__switch_max_button(MButton.Type.Restore)
self.__is_maximized = True
def mouseDoubleClickEvent(self, event):
if event.button() != Qt.LeftButton:
return
if self.__get_cursor_location(event) == self.CursorLocation.WINDOW_TITLE_BAR:
self.showMaximized()
def mousePressEvent(self, event):
if event.button() != Qt.LeftButton:
return
if self.__get_cursor_location(event) == self.CursorLocation.WINDOW_TITLE_BAR:
self.__mouse_pressed = True
if self.__cursor_changed:
self.__window_resizing = True
self.__move_point = event.globalPos() - self.pos()
def mouseMoveEvent(self, event):
if self.__window_resizing:
self.__change_window_size(self.__cursor_loc, event.pos())
return
else:
self.__cursor_loc = self.__get_cursor_location(event)
self.__cursor_changed = self.__set_cursor_shape(self.__cursor_loc)
if not self.__mouse_pressed:
return
if self.__is_maximized and not self.__is_sticked:
self.resize(self.__geometry_frame.size())
self.__switch_max_button(MButton.Type.Maximize)
self.__is_maximized = False
if event.pos().x() < self.__geometry_frame.width():
self.__is_zdt = False
else:
self.__is_zdt = True
self.move(event.globalPos() - QPoint(self.width() / 2, 15))
elif not self.__is_sticking and event.globalPos().y() < QApplication.desktop().availableGeometry().height():
if not self.__is_zdt:
self.move(event.globalPos() - self.__move_point)
else:
self.move(event.globalPos() - QPoint(self.width() / 2, 15))
self.__cursor_loc = self.__get_cursor_location(event)
if self.__cursor_loc == self.CursorLocation.SCREEN_LEFT:
if self.__lyric_shown:
self.__window_stick_to(self.StickType.LEFT)
elif self.__cursor_loc == self.CursorLocation.SCREEN_RIGHT:
if self.__lyric_shown:
self.__window_stick_to(self.StickType.RIGHT)
elif self.__cursor_loc == self.CursorLocation.SCREEN_TOP:
if self.__lyric_shown:
self.showMaximized()
self.__is_sticked = True
else:
if self.width() - self.OFFSET_BORDER_LEFT - self.OFFSET_BORDER_RIGHT >= QApplication.desktop().screenGeometry().width():
self.resize(800, self.height())
if self.height() - self.OFFSET_BORDER_TOP - self.OFFSET_BORDER_BOTTOM >= QApplication.desktop().availableGeometry().height():
self.resize(self.width(), 600)
self.__is_sticking = False
self.__is_sticked = False
def mouseReleaseEvent(self, event):
if event.button() != Qt.LeftButton:
return
if self.geometry().y() < -self.OFFSET_BORDER_TOP:
self.setGeometry(self.geometry().x(), - self.OFFSET_BORDER_TOP, self.geometry().width(),
self.geometry().height())
self.__mouse_pressed = False
self.__window_resizing = False
self.__is_sticking = False
self.__is_sticked = False
if not self.__is_maximized:
self.__is_zdt = False
def resizeEvent(self, event):
self.frame.setGeometry(self.OFFSET_BORDER_LEFT, self.OFFSET_BORDER_TOP + 22,
self.width() - self.OFFSET_BORDER_LEFT - self.OFFSET_BORDER_RIGHT,
self.height() - self.OFFSET_BORDER_TOP - self.OFFSET_BORDER_BOTTOM - 26)
def setFixedSize(self, *__args):
count_parm = len(__args)
if count_parm == 0 or count_parm > 2:
raise TypeError('Argument error occurred. (1 or 2 given)')
if count_parm == 1:
if isinstance(__args[0], QSize):
super(ModiaWindow, self).setFixedSize(__args[0])
self.__is_fixed_size = True
else:
raise ValueError('Given argument not QSize type. (QSize type required for 1 argument)')
else:
if isinstance(__args[0], int) and isinstance(__args[1], int):
super(ModiaWindow, self).setFixedSize(__args[0], __args[1])
self.__is_fixed_size = True
else:
raise ValueError('Given arguments not int type. (int type required for 2 arguments)')
def setFixedWidth(self, p_int):
if not isinstance(p_int, int):
raise ValueError('Given argument not int type. (int type required)')
self.resize(p_int, self.height())
self.__is_fixed_width = True
def setFixedHeight(self, p_int):
if not isinstance(p_int, int):
raise ValueError('Given argument not int type. (int type required)')
self.resize(self.width(), p_int)
self.__is_fixed_height = True
def setSuspendStatus(self, bool_suspended):
self.__is_suspended = bool_suspended
| 53.35497 | 194 | 0.642982 |
909815833c734863eff7069ad4ffdc753ba6f20b | 1,867 | py | Python | src/CrackingTheCodingInterview/src/Book/IX_InterviewQuestions/DataStructures/Ch02_LinkedLists/Python/LinkedList.py | FeodorFitsner/learning_computer-science | a18ea45b3e52057e4a07124a11f7da92d243b794 | [
"MIT"
] | 23 | 2020-11-22T07:37:07.000Z | 2022-03-23T17:23:44.000Z | src/CrackingTheCodingInterview/src/Book/IX_InterviewQuestions/DataStructures/Ch02_LinkedLists/Python/LinkedList.py | FeodorFitsner/learning_computer-science | a18ea45b3e52057e4a07124a11f7da92d243b794 | [
"MIT"
] | 40 | 2018-01-30T18:59:17.000Z | 2018-03-13T22:58:05.000Z | src/CrackingTheCodingInterview/src/Book/IX_InterviewQuestions/DataStructures/Ch02_LinkedLists/Python/LinkedList.py | FeodorFitsner/learning_computer-science | a18ea45b3e52057e4a07124a11f7da92d243b794 | [
"MIT"
] | 4 | 2020-10-19T01:14:15.000Z | 2022-03-23T20:25:36.000Z | from random import randint
class LinkedListNode:
def __init__(self, value, next_node=None, prev_node=None):
self.value = value
self.next = next_node
self.prev = prev_node
def __str__(self):
return str(self.value)
class LinkedList:
def __init__(self, values=None):
self.head = None
self.tail = None
if values is not None:
self.add_multiple(values)
def __iter__(self):
current = self.head
while current:
yield current
current = current.next
def __str__(self):
values = [str(x) for x in self]
return ' -> '.join(values)
def __len__(self):
result = 0
node = self.head
while node:
result += 1
node = node.next
return result
def add(self, value):
if self.head is None:
self.tail = self.head = LinkedListNode(value)
else:
self.tail.next = LinkedListNode(value)
self.tail = self.tail.next
return self.tail
def add_to_beginning(self, value):
if self.head is None:
self.tail = self.head = LinkedListNode(value)
else:
self.head = LinkedListNode(value, self.head)
return self.head
def add_multiple(self, values):
for v in values:
self.add(v)
def generate(self, n, min_value, max_value):
self.head = self.tail = None
for i in range(n):
self.add(randint(min_value, max_value))
return self
class DoublyLinkedList(LinkedList):
def add(self, value):
if self.head is None:
self.tail = self.head = LinkedListNode(value, None, self.tail)
else:
self.tail.next = LinkedListNode(value)
self.tail = self.tail.next
return self
| 24.565789 | 74 | 0.567756 |
d4327a48f107ec7913b6b51df3c828342e81a0ea | 8,277 | py | Python | src/activation_model.py | msmarsch/Thesis | 703a94fade702c3df34417a3f0c811b0b3ff5ee8 | [
"MIT"
] | null | null | null | src/activation_model.py | msmarsch/Thesis | 703a94fade702c3df34417a3f0c811b0b3ff5ee8 | [
"MIT"
] | null | null | null | src/activation_model.py | msmarsch/Thesis | 703a94fade702c3df34417a3f0c811b0b3ff5ee8 | [
"MIT"
] | null | null | null | ## Matthew Smarsch
## Cooper Union Electrical Engineering Master's Thesis
## Advisor: Professor Sam Keene
## Neural Network NNMF Implementation for the Cocktail Party Problem
import argparse
import sys, os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import tensorflow as tf
import numpy as np
import datetime
import wave
import matplotlib.pyplot as plt
import pylab
import librosa
import librosa.display
import scipy
from scipy.io import wavfile
from scipy.fftpack import fft
from scipy import signal
FFT_SIZE = 512
# Alias for wavfile.read -> reads .wav to numpy array
#
# Returns: Numpy array representation of .wav file
def read_wav(filename):
return librosa.load(filename, None)
def save_plot(values, filename):
plt.figure()
plt.plot(values)
plt.savefig(filename, bbox_inches='tight')
def reconstruct_wave(magnitude, phase):
reconstr = librosa.istft(magnitude * phase)
return reconstr
def model_variable(shape, name):
variable = tf.get_variable(name=name,
dtype=tf.float32,
initializer=tf.random_uniform(shape, -0.1, 0.1))
tf.add_to_collection('model_variables', variable)
return variable
class MixtureModel:
def __init__(self, speakers, rank, sess, alpha, lambduh, epochs):
self._speakers = speakers
self._rank = rank
self._sess = sess
self._learning_rate = alpha
self._lambduh = lambduh
self._epochs = epochs
self._mixture_waveform = self.get_mixture_waveform()
self._mixture_magnitude, self._mixture_phase = self.get_mixture_spectrogram()
self._mixture_reconstruction, self._loss = self.build_network()
def reconstruct_waveforms(self):
self._reconstruction_waveform = librosa.istft(self._reconstruction * self._mixture_phase)
for speaker in self._speakers:
speaker._reconstruction_waveform = librosa.istft(speaker._reconstruction * self._mixture_phase)
def build_network(self):
mixture_reconstruction = None
for speaker in self._speakers:
speaker._activations_tensor = model_variable([self._rank, np.shape(self._mixture_magnitude)[1]], speaker._name + '_Act')
tf.add_to_collection('l1', tf.reduce_sum(tf.abs(speaker._activations_tensor)))
speaker._reconstruction_tensor = tf.nn.softplus(tf.matmul(speaker._bases, speaker._activations_tensor))
if mixture_reconstruction is None:
mixture_reconstruction = speaker._reconstruction_tensor
else:
mixture_reconstruction += speaker._reconstruction_tensor
cost_function = tf.reduce_mean(tf.add(tf.subtract(tf.multiply(self._mixture_magnitude, tf.subtract(tf.log(self._mixture_magnitude), tf.log(mixture_reconstruction))), self._mixture_magnitude), mixture_reconstruction))
l1_penalty = tf.reduce_sum(tf.get_collection('l1'))
loss = cost_function + self._lambduh * l1_penalty
return mixture_reconstruction, loss
def train_init(self):
model_variables = tf.get_collection('model_variables')
self._optim = tf.train.AdamOptimizer(self._learning_rate).minimize(self._loss, var_list = model_variables)
self._sess.run(tf.global_variables_initializer())
def train_network(self):
self.train_init()
loss_values = []
print('Beginning mixture training at ' + datetime.datetime.now().strftime('%H:%M:%S') + '...')
for epoch in range(self._epochs):
iter_loss = self.train_iter(self._mixture_magnitude)
if epoch % 100 == 0:
print('{}: {}'.format(epoch, iter_loss))
loss_values.append(iter_loss)
print('Completed mixture training at ' + datetime.datetime.now().strftime('%H:%M:%S') + '...')
return loss_values
def train_iter(self, spec):
loss, _ = self._sess.run([self._loss, self._optim])
return loss
def separate_sources(self):
for speaker in self._speakers:
speaker._activations, speaker._reconstruction = self._sess.run([speaker._activations_tensor, speaker._reconstruction_tensor])
return np.squeeze(self._sess.run([self._mixture_reconstruction]))
def get_mixture_spectrogram(self):
D = librosa.stft(self._mixture_waveform, n_fft=FFT_SIZE, hop_length=int(FFT_SIZE/4))
mag, phase = librosa.magphase(D)
return mag, phase
def get_mixture_waveform(self):
mix_waveform = None
for speaker in self._speakers:
if mix_waveform is None:
mix_waveform = speaker._testing_sentence
else:
mix_waveform = np.append(mix_waveform, speaker._testing_sentence)
return mix_waveform
class SentenceSpeaker:
def __init__(self, audio_directory, bases_filename, name):
self._audio_directory = audio_directory
self._name = name
self._sentence_5, self._fs_5 = self.preprocess_sentence(audio_directory + 'Sentence_5.wav')
self._testing_sentence = self._sentence_5
self._testing_fs = self._fs_5
self._bases = np.load(bases_filename)
def preprocess_sentence(self, sentence_file):
sentence, fs = read_wav(sentence_file)
return sentence, fs
def main():
'''
ENVIRONMENT SETUP
'''
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--alpha', dest='alpha', type=float, default=0.01, help='the learning rate for backpropagation')
parser.add_argument('-act', '--activation', dest='activation', type=str, choices=['RELU', 'SOFTPLUS'], default='SOFTPLUS', help='activation function for the speech model')
parser.add_argument('-b', '--bases', dest='bases', type=str, help='absolute path to the directory containing all speaker bases')
parser.add_argument('-c', '--clean', dest='clean_model', type=str, choices=['Sentences', 'Book', 'Piano'], default='Sentences', help='clean speech type that we are trying to model')
parser.add_argument('-e', '--epochs', dest='epochs', type=int, default=10000, help='number of passes through the training set')
parser.add_argument('-l', '--lambduh', dest='lambduh', type=float, default=0, help='L1 regularization coefficient')
parser.add_argument('-r', '--rank', dest='rank', type=int, default=20, help='number of spectral components in the W (bases) matrix')
args = parser.parse_args()
simulations_dir = '/Users/MSmarsch/Documents/Thesis/Simulations/' + datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + '/'
os.mkdir(simulations_dir)
'''
AUDIO PREPROCESSING
'''
if args.bases[-1] != '/':
args.bases += '/'
if args.clean_model == 'Sentences':
## Load Sentences in
sentences_dir = '/Users/MSmarsch/Documents/Thesis/Audio/Clean_Speech/Sentences/'
male_speaker_1 = SentenceSpeaker(sentences_dir + 'Male_1/', args.bases + 'Male_1_Bases.npy', 'Male_1')
male_speaker_2 = SentenceSpeaker(sentences_dir + 'Male_2/', args.bases + 'Male_2_Bases.npy', 'Male_2')
male_speaker_3 = SentenceSpeaker(sentences_dir + 'Male_3/', args.bases + 'Male_3_Bases.npy', 'Male_3')
female_speaker_1 = SentenceSpeaker(sentences_dir + 'Female_1/', args.bases + 'Female_1_Bases.npy', 'Female_1')
female_speaker_2 = SentenceSpeaker(sentences_dir + 'Female_2/', args.bases + 'Female_2_Bases.npy', 'Female_2')
speakers = [male_speaker_1, male_speaker_2, male_speaker_3, female_speaker_1, female_speaker_2]
male_noise = male_speaker_3
female_noise = female_speaker_2
with tf.Session() as sess:
mixture_model = MixtureModel(speakers, args.rank, sess, args.alpha, args.lambduh, args.epochs)
loss_values = mixture_model.train_network()
mixture_model._reconstruction = mixture_model.separate_sources()
mixture_model.reconstruct_waveforms()
librosa.output.write_wav('input.wav', mixture_model._mixture_waveform, 8000)
librosa.output.write_wav('complete_reconstruction.wav', mixture_model._reconstruction_waveform, 8000)
for speaker in speakers:
librosa.output.write_wav(speaker._name + '.wav', speaker._reconstruction_waveform, 8000)
if __name__ == '__main__':
main()
| 44.983696 | 224 | 0.691434 |
46ba5a28dd561cb66601da41be491fe0e65e223e | 29,152 | py | Python | libs/cryptography/x509/extensions.py | Sparklingx/nzbhydra | e2433e1155255ba37341cc79750b104e7dd8889a | [
"Apache-2.0"
] | 674 | 2015-11-06T04:22:47.000Z | 2022-02-26T17:31:43.000Z | python27/win32/Lib/site-packages/cryptography/x509/extensions.py | hehahovip/oss-ftp | 400b934b2fa625e31da41b3c6af98fc14f4f63ab | [
"MIT"
] | 713 | 2015-11-06T10:48:58.000Z | 2018-11-27T16:32:18.000Z | python27/win32/Lib/site-packages/cryptography/x509/extensions.py | hehahovip/oss-ftp | 400b934b2fa625e31da41b3c6af98fc14f4f63ab | [
"MIT"
] | 106 | 2015-12-07T11:21:06.000Z | 2022-03-11T10:58:41.000Z | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import abc
import hashlib
import ipaddress
from enum import Enum
from pyasn1.codec.der import decoder
from pyasn1.type import namedtype, univ
import six
from cryptography import utils
from cryptography.hazmat.primitives import constant_time, serialization
from cryptography.x509.general_name import GeneralName, IPAddress, OtherName
from cryptography.x509.name import Name
from cryptography.x509.oid import (
AuthorityInformationAccessOID, ExtensionOID, ObjectIdentifier
)
class _SubjectPublicKeyInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('algorithm', univ.Sequence()),
namedtype.NamedType('subjectPublicKey', univ.BitString())
)
def _key_identifier_from_public_key(public_key):
# This is a very slow way to do this.
serialized = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo
)
spki, remaining = decoder.decode(
serialized, asn1Spec=_SubjectPublicKeyInfo()
)
assert not remaining
# the univ.BitString object is a tuple of bits. We need bytes and
# pyasn1 really doesn't want to give them to us. To get it we'll
# build an integer and convert that to bytes.
bits = 0
for bit in spki.getComponentByName("subjectPublicKey"):
bits = bits << 1 | bit
data = utils.int_to_bytes(bits)
return hashlib.sha1(data).digest()
class DuplicateExtension(Exception):
def __init__(self, msg, oid):
super(DuplicateExtension, self).__init__(msg)
self.oid = oid
class UnsupportedExtension(Exception):
def __init__(self, msg, oid):
super(UnsupportedExtension, self).__init__(msg)
self.oid = oid
class ExtensionNotFound(Exception):
def __init__(self, msg, oid):
super(ExtensionNotFound, self).__init__(msg)
self.oid = oid
@six.add_metaclass(abc.ABCMeta)
class ExtensionType(object):
@abc.abstractproperty
def oid(self):
"""
Returns the oid associated with the given extension type.
"""
class Extensions(object):
def __init__(self, extensions):
self._extensions = extensions
def get_extension_for_oid(self, oid):
for ext in self:
if ext.oid == oid:
return ext
raise ExtensionNotFound("No {0} extension was found".format(oid), oid)
def get_extension_for_class(self, extclass):
for ext in self:
if isinstance(ext.value, extclass):
return ext
raise ExtensionNotFound(
"No {0} extension was found".format(extclass), extclass.oid
)
def __iter__(self):
return iter(self._extensions)
def __len__(self):
return len(self._extensions)
def __repr__(self):
return (
"<Extensions({0})>".format(self._extensions)
)
@utils.register_interface(ExtensionType)
class AuthorityKeyIdentifier(object):
oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
def __init__(self, key_identifier, authority_cert_issuer,
authority_cert_serial_number):
if authority_cert_issuer or authority_cert_serial_number:
if not authority_cert_issuer or not authority_cert_serial_number:
raise ValueError(
"authority_cert_issuer and authority_cert_serial_number "
"must both be present or both None"
)
if not all(
isinstance(x, GeneralName) for x in authority_cert_issuer
):
raise TypeError(
"authority_cert_issuer must be a list of GeneralName "
"objects"
)
if not isinstance(authority_cert_serial_number, six.integer_types):
raise TypeError(
"authority_cert_serial_number must be an integer"
)
self._key_identifier = key_identifier
self._authority_cert_issuer = authority_cert_issuer
self._authority_cert_serial_number = authority_cert_serial_number
@classmethod
def from_issuer_public_key(cls, public_key):
digest = _key_identifier_from_public_key(public_key)
return cls(
key_identifier=digest,
authority_cert_issuer=None,
authority_cert_serial_number=None
)
def __repr__(self):
return (
"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, "
"authority_cert_issuer={0.authority_cert_issuer}, "
"authority_cert_serial_number={0.authority_cert_serial_number}"
")>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AuthorityKeyIdentifier):
return NotImplemented
return (
self.key_identifier == other.key_identifier and
self.authority_cert_issuer == other.authority_cert_issuer and
self.authority_cert_serial_number ==
other.authority_cert_serial_number
)
def __ne__(self, other):
return not self == other
key_identifier = utils.read_only_property("_key_identifier")
authority_cert_issuer = utils.read_only_property("_authority_cert_issuer")
authority_cert_serial_number = utils.read_only_property(
"_authority_cert_serial_number"
)
@utils.register_interface(ExtensionType)
class SubjectKeyIdentifier(object):
oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER
def __init__(self, digest):
self._digest = digest
@classmethod
def from_public_key(cls, public_key):
return cls(_key_identifier_from_public_key(public_key))
digest = utils.read_only_property("_digest")
def __repr__(self):
return "<SubjectKeyIdentifier(digest={0!r})>".format(self.digest)
def __eq__(self, other):
if not isinstance(other, SubjectKeyIdentifier):
return NotImplemented
return constant_time.bytes_eq(self.digest, other.digest)
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class AuthorityInformationAccess(object):
oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
def __init__(self, descriptions):
if not all(isinstance(x, AccessDescription) for x in descriptions):
raise TypeError(
"Every item in the descriptions list must be an "
"AccessDescription"
)
self._descriptions = descriptions
def __iter__(self):
return iter(self._descriptions)
def __len__(self):
return len(self._descriptions)
def __repr__(self):
return "<AuthorityInformationAccess({0})>".format(self._descriptions)
def __eq__(self, other):
if not isinstance(other, AuthorityInformationAccess):
return NotImplemented
return self._descriptions == other._descriptions
def __ne__(self, other):
return not self == other
class AccessDescription(object):
def __init__(self, access_method, access_location):
if not (access_method == AuthorityInformationAccessOID.OCSP or
access_method == AuthorityInformationAccessOID.CA_ISSUERS):
raise ValueError(
"access_method must be OID_OCSP or OID_CA_ISSUERS"
)
if not isinstance(access_location, GeneralName):
raise TypeError("access_location must be a GeneralName")
self._access_method = access_method
self._access_location = access_location
def __repr__(self):
return (
"<AccessDescription(access_method={0.access_method}, access_locati"
"on={0.access_location})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, AccessDescription):
return NotImplemented
return (
self.access_method == other.access_method and
self.access_location == other.access_location
)
def __ne__(self, other):
return not self == other
access_method = utils.read_only_property("_access_method")
access_location = utils.read_only_property("_access_location")
@utils.register_interface(ExtensionType)
class BasicConstraints(object):
oid = ExtensionOID.BASIC_CONSTRAINTS
def __init__(self, ca, path_length):
if not isinstance(ca, bool):
raise TypeError("ca must be a boolean value")
if path_length is not None and not ca:
raise ValueError("path_length must be None when ca is False")
if (
path_length is not None and
(not isinstance(path_length, six.integer_types) or path_length < 0)
):
raise TypeError(
"path_length must be a non-negative integer or None"
)
self._ca = ca
self._path_length = path_length
ca = utils.read_only_property("_ca")
path_length = utils.read_only_property("_path_length")
def __repr__(self):
return ("<BasicConstraints(ca={0.ca}, "
"path_length={0.path_length})>").format(self)
def __eq__(self, other):
if not isinstance(other, BasicConstraints):
return NotImplemented
return self.ca == other.ca and self.path_length == other.path_length
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class CRLDistributionPoints(object):
oid = ExtensionOID.CRL_DISTRIBUTION_POINTS
def __init__(self, distribution_points):
if not all(
isinstance(x, DistributionPoint) for x in distribution_points
):
raise TypeError(
"distribution_points must be a list of DistributionPoint "
"objects"
)
self._distribution_points = distribution_points
def __iter__(self):
return iter(self._distribution_points)
def __len__(self):
return len(self._distribution_points)
def __repr__(self):
return "<CRLDistributionPoints({0})>".format(self._distribution_points)
def __eq__(self, other):
if not isinstance(other, CRLDistributionPoints):
return NotImplemented
return self._distribution_points == other._distribution_points
def __ne__(self, other):
return not self == other
class DistributionPoint(object):
def __init__(self, full_name, relative_name, reasons, crl_issuer):
if full_name and relative_name:
raise ValueError(
"You cannot provide both full_name and relative_name, at "
"least one must be None."
)
if full_name and not all(
isinstance(x, GeneralName) for x in full_name
):
raise TypeError(
"full_name must be a list of GeneralName objects"
)
if relative_name and not isinstance(relative_name, Name):
raise TypeError("relative_name must be a Name")
if crl_issuer and not all(
isinstance(x, GeneralName) for x in crl_issuer
):
raise TypeError(
"crl_issuer must be None or a list of general names"
)
if reasons and (not isinstance(reasons, frozenset) or not all(
isinstance(x, ReasonFlags) for x in reasons
)):
raise TypeError("reasons must be None or frozenset of ReasonFlags")
if reasons and (
ReasonFlags.unspecified in reasons or
ReasonFlags.remove_from_crl in reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in a "
"DistributionPoint"
)
if reasons and not crl_issuer and not (full_name or relative_name):
raise ValueError(
"You must supply crl_issuer, full_name, or relative_name when "
"reasons is not None"
)
self._full_name = full_name
self._relative_name = relative_name
self._reasons = reasons
self._crl_issuer = crl_issuer
def __repr__(self):
return (
"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela"
"tive_name}, reasons={0.reasons}, crl_issuer={0.crl_is"
"suer})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, DistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name and
self.relative_name == other.relative_name and
self.reasons == other.reasons and
self.crl_issuer == other.crl_issuer
)
def __ne__(self, other):
return not self == other
full_name = utils.read_only_property("_full_name")
relative_name = utils.read_only_property("_relative_name")
reasons = utils.read_only_property("_reasons")
crl_issuer = utils.read_only_property("_crl_issuer")
class ReasonFlags(Enum):
unspecified = "unspecified"
key_compromise = "keyCompromise"
ca_compromise = "cACompromise"
affiliation_changed = "affiliationChanged"
superseded = "superseded"
cessation_of_operation = "cessationOfOperation"
certificate_hold = "certificateHold"
privilege_withdrawn = "privilegeWithdrawn"
aa_compromise = "aACompromise"
remove_from_crl = "removeFromCRL"
@utils.register_interface(ExtensionType)
class CertificatePolicies(object):
oid = ExtensionOID.CERTIFICATE_POLICIES
def __init__(self, policies):
if not all(isinstance(x, PolicyInformation) for x in policies):
raise TypeError(
"Every item in the policies list must be a "
"PolicyInformation"
)
self._policies = policies
def __iter__(self):
return iter(self._policies)
def __len__(self):
return len(self._policies)
def __repr__(self):
return "<CertificatePolicies({0})>".format(self._policies)
def __eq__(self, other):
if not isinstance(other, CertificatePolicies):
return NotImplemented
return self._policies == other._policies
def __ne__(self, other):
return not self == other
class PolicyInformation(object):
def __init__(self, policy_identifier, policy_qualifiers):
if not isinstance(policy_identifier, ObjectIdentifier):
raise TypeError("policy_identifier must be an ObjectIdentifier")
self._policy_identifier = policy_identifier
if policy_qualifiers and not all(
isinstance(
x, (six.text_type, UserNotice)
) for x in policy_qualifiers
):
raise TypeError(
"policy_qualifiers must be a list of strings and/or UserNotice"
" objects or None"
)
self._policy_qualifiers = policy_qualifiers
def __repr__(self):
return (
"<PolicyInformation(policy_identifier={0.policy_identifier}, polic"
"y_qualifiers={0.policy_qualifiers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, PolicyInformation):
return NotImplemented
return (
self.policy_identifier == other.policy_identifier and
self.policy_qualifiers == other.policy_qualifiers
)
def __ne__(self, other):
return not self == other
policy_identifier = utils.read_only_property("_policy_identifier")
policy_qualifiers = utils.read_only_property("_policy_qualifiers")
class UserNotice(object):
def __init__(self, notice_reference, explicit_text):
if notice_reference and not isinstance(
notice_reference, NoticeReference
):
raise TypeError(
"notice_reference must be None or a NoticeReference"
)
self._notice_reference = notice_reference
self._explicit_text = explicit_text
def __repr__(self):
return (
"<UserNotice(notice_reference={0.notice_reference}, explicit_text="
"{0.explicit_text!r})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, UserNotice):
return NotImplemented
return (
self.notice_reference == other.notice_reference and
self.explicit_text == other.explicit_text
)
def __ne__(self, other):
return not self == other
notice_reference = utils.read_only_property("_notice_reference")
explicit_text = utils.read_only_property("_explicit_text")
class NoticeReference(object):
def __init__(self, organization, notice_numbers):
self._organization = organization
if not isinstance(notice_numbers, list) or not all(
isinstance(x, int) for x in notice_numbers
):
raise TypeError(
"notice_numbers must be a list of integers"
)
self._notice_numbers = notice_numbers
def __repr__(self):
return (
"<NoticeReference(organization={0.organization!r}, notice_numbers="
"{0.notice_numbers})>".format(self)
)
def __eq__(self, other):
if not isinstance(other, NoticeReference):
return NotImplemented
return (
self.organization == other.organization and
self.notice_numbers == other.notice_numbers
)
def __ne__(self, other):
return not self == other
organization = utils.read_only_property("_organization")
notice_numbers = utils.read_only_property("_notice_numbers")
@utils.register_interface(ExtensionType)
class ExtendedKeyUsage(object):
oid = ExtensionOID.EXTENDED_KEY_USAGE
def __init__(self, usages):
if not all(isinstance(x, ObjectIdentifier) for x in usages):
raise TypeError(
"Every item in the usages list must be an ObjectIdentifier"
)
self._usages = usages
def __iter__(self):
return iter(self._usages)
def __len__(self):
return len(self._usages)
def __repr__(self):
return "<ExtendedKeyUsage({0})>".format(self._usages)
def __eq__(self, other):
if not isinstance(other, ExtendedKeyUsage):
return NotImplemented
return self._usages == other._usages
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class OCSPNoCheck(object):
oid = ExtensionOID.OCSP_NO_CHECK
@utils.register_interface(ExtensionType)
class InhibitAnyPolicy(object):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs):
if not isinstance(skip_certs, six.integer_types):
raise TypeError("skip_certs must be an integer")
if skip_certs < 0:
raise ValueError("skip_certs must be a non-negative integer")
self._skip_certs = skip_certs
def __repr__(self):
return "<InhibitAnyPolicy(skip_certs={0.skip_certs})>".format(self)
def __eq__(self, other):
if not isinstance(other, InhibitAnyPolicy):
return NotImplemented
return self.skip_certs == other.skip_certs
def __ne__(self, other):
return not self == other
skip_certs = utils.read_only_property("_skip_certs")
@utils.register_interface(ExtensionType)
class KeyUsage(object):
oid = ExtensionOID.KEY_USAGE
def __init__(self, digital_signature, content_commitment, key_encipherment,
data_encipherment, key_agreement, key_cert_sign, crl_sign,
encipher_only, decipher_only):
if not key_agreement and (encipher_only or decipher_only):
raise ValueError(
"encipher_only and decipher_only can only be true when "
"key_agreement is true"
)
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
digital_signature = utils.read_only_property("_digital_signature")
content_commitment = utils.read_only_property("_content_commitment")
key_encipherment = utils.read_only_property("_key_encipherment")
data_encipherment = utils.read_only_property("_data_encipherment")
key_agreement = utils.read_only_property("_key_agreement")
key_cert_sign = utils.read_only_property("_key_cert_sign")
crl_sign = utils.read_only_property("_crl_sign")
@property
def encipher_only(self):
if not self.key_agreement:
raise ValueError(
"encipher_only is undefined unless key_agreement is true"
)
else:
return self._encipher_only
@property
def decipher_only(self):
if not self.key_agreement:
raise ValueError(
"decipher_only is undefined unless key_agreement is true"
)
else:
return self._decipher_only
def __repr__(self):
try:
encipher_only = self.encipher_only
decipher_only = self.decipher_only
except ValueError:
encipher_only = None
decipher_only = None
return ("<KeyUsage(digital_signature={0.digital_signature}, "
"content_commitment={0.content_commitment}, "
"key_encipherment={0.key_encipherment}, "
"data_encipherment={0.data_encipherment}, "
"key_agreement={0.key_agreement}, "
"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, "
"encipher_only={1}, decipher_only={2})>").format(
self, encipher_only, decipher_only)
def __eq__(self, other):
if not isinstance(other, KeyUsage):
return NotImplemented
return (
self.digital_signature == other.digital_signature and
self.content_commitment == other.content_commitment and
self.key_encipherment == other.key_encipherment and
self.data_encipherment == other.data_encipherment and
self.key_agreement == other.key_agreement and
self.key_cert_sign == other.key_cert_sign and
self.crl_sign == other.crl_sign and
self._encipher_only == other._encipher_only and
self._decipher_only == other._decipher_only
)
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class NameConstraints(object):
oid = ExtensionOID.NAME_CONSTRAINTS
def __init__(self, permitted_subtrees, excluded_subtrees):
if permitted_subtrees is not None:
if not all(
isinstance(x, GeneralName) for x in permitted_subtrees
):
raise TypeError(
"permitted_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(permitted_subtrees)
if excluded_subtrees is not None:
if not all(
isinstance(x, GeneralName) for x in excluded_subtrees
):
raise TypeError(
"excluded_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_ip_name(excluded_subtrees)
if permitted_subtrees is None and excluded_subtrees is None:
raise ValueError(
"At least one of permitted_subtrees and excluded_subtrees "
"must not be None"
)
self._permitted_subtrees = permitted_subtrees
self._excluded_subtrees = excluded_subtrees
def __eq__(self, other):
if not isinstance(other, NameConstraints):
return NotImplemented
return (
self.excluded_subtrees == other.excluded_subtrees and
self.permitted_subtrees == other.permitted_subtrees
)
def __ne__(self, other):
return not self == other
def _validate_ip_name(self, tree):
if any(isinstance(name, IPAddress) and not isinstance(
name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)
) for name in tree):
raise TypeError(
"IPAddress name constraints must be an IPv4Network or"
" IPv6Network object"
)
def __repr__(self):
return (
u"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, "
u"excluded_subtrees={0.excluded_subtrees})>".format(self)
)
permitted_subtrees = utils.read_only_property("_permitted_subtrees")
excluded_subtrees = utils.read_only_property("_excluded_subtrees")
class Extension(object):
def __init__(self, oid, critical, value):
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(critical, bool):
raise TypeError("critical must be a boolean value")
self._oid = oid
self._critical = critical
self._value = value
oid = utils.read_only_property("_oid")
critical = utils.read_only_property("_critical")
value = utils.read_only_property("_value")
def __repr__(self):
return ("<Extension(oid={0.oid}, critical={0.critical}, "
"value={0.value})>").format(self)
def __eq__(self, other):
if not isinstance(other, Extension):
return NotImplemented
return (
self.oid == other.oid and
self.critical == other.critical and
self.value == other.value
)
def __ne__(self, other):
return not self == other
class GeneralNames(object):
def __init__(self, general_names):
if not all(isinstance(x, GeneralName) for x in general_names):
raise TypeError(
"Every item in the general_names list must be an "
"object conforming to the GeneralName interface"
)
self._general_names = general_names
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
# Return the value of each GeneralName, except for OtherName instances
# which we return directly because it has two important properties not
# just one value.
objs = (i for i in self if isinstance(i, type))
if type != OtherName:
objs = (i.value for i in objs)
return list(objs)
def __repr__(self):
return "<GeneralNames({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, GeneralNames):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class SubjectAlternativeName(object):
oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<SubjectAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, SubjectAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
@utils.register_interface(ExtensionType)
class IssuerAlternativeName(object):
oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME
def __init__(self, general_names):
self._general_names = GeneralNames(general_names)
def __iter__(self):
return iter(self._general_names)
def __len__(self):
return len(self._general_names)
def get_values_for_type(self, type):
return self._general_names.get_values_for_type(type)
def __repr__(self):
return "<IssuerAlternativeName({0})>".format(self._general_names)
def __eq__(self, other):
if not isinstance(other, IssuerAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __ne__(self, other):
return not self == other
| 31.515676 | 79 | 0.647537 |
299db5de964d3f590a9c2e4394e32f6380aab5c4 | 1,904 | py | Python | enrolment/tests/test_helpers.py | uktrade/directory-ui-buyer | b97f4f47a45ea67f00ca0e82ee5a5cb85fe130a3 | [
"MIT"
] | 3 | 2017-06-02T09:09:04.000Z | 2017-12-19T15:16:03.000Z | enrolment/tests/test_helpers.py | uktrade/directory-ui-buyer | b97f4f47a45ea67f00ca0e82ee5a5cb85fe130a3 | [
"MIT"
] | 257 | 2017-02-16T18:00:57.000Z | 2022-02-10T13:55:24.000Z | enrolment/tests/test_helpers.py | uktrade/directory-ui-buyer | b97f4f47a45ea67f00ca0e82ee5a5cb85fe130a3 | [
"MIT"
] | null | null | null | import http
from unittest.mock import patch
from requests import Response
import requests_mock
from enrolment import helpers
def profile_api_404(*args, **kwargs):
response = Response()
response.status_code = http.client.NOT_FOUND
return response
@patch.object(helpers.api_client.supplier, 'retrieve_profile')
def test_has_company_no_company(mock_retrieve_supplier_profile):
mock_response = Response()
mock_response.status_code = http.client.OK
mock_response.json = lambda: {
'company': '',
}
mock_retrieve_supplier_profile.return_value = mock_response
assert helpers.has_company(sso_session_id=123) is False
@patch.object(helpers.api_client.supplier, 'retrieve_profile', profile_api_404)
def test_has_company_404():
assert helpers.has_company(sso_session_id=134) is False
def test_companies_house_client_consumes_auth(settings):
helpers.CompaniesHouseClient.api_key = 'ff'
with requests_mock.mock() as mock:
mock.get('https://thing.com')
response = helpers.CompaniesHouseClient.get('https://thing.com')
expected = 'Basic ZmY6' # base64 encoded ff
assert response.request.headers['Authorization'] == expected
def test_verify_oauth2_code():
with requests_mock.mock() as mock:
mock.post(
'https://account.companieshouse.gov.uk/oauth2/token',
status_code=http.client.OK,
)
response = helpers.CompaniesHouseClient.verify_oauth2_code(
code='123',
redirect_uri='http://redirect.com',
)
assert response.status_code == 200
request = mock.request_history[0]
assert request.url == (
'https://account.companieshouse.gov.uk/oauth2/token'
'?grant_type=authorization_code'
'&code=123'
'&client_id=debug'
'&client_secret=debug'
'&redirect_uri=http%3A%2F%2Fredirect.com'
)
| 29.75 | 79 | 0.702206 |
eb4ef8f179b48ec88fc360952d57908314653e78 | 333 | py | Python | common/migrations/0016_merge_20190828_1929.py | uonafya/mfl_api | 379310b9b56cde084620f1f2dbfe4c6d7c1de47b | [
"MIT"
] | null | null | null | common/migrations/0016_merge_20190828_1929.py | uonafya/mfl_api | 379310b9b56cde084620f1f2dbfe4c6d7c1de47b | [
"MIT"
] | null | null | null | common/migrations/0016_merge_20190828_1929.py | uonafya/mfl_api | 379310b9b56cde084620f1f2dbfe4c6d7c1de47b | [
"MIT"
] | 4 | 2018-07-26T05:53:06.000Z | 2021-07-17T14:30:09.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2019-08-28 19:29
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('common', '0014_auto_20170522_2223'),
('common', '0015_apiauthentication'),
]
operations = [
]
| 19.588235 | 46 | 0.660661 |
f422e965b0fd9240218c9eef942c1f3446adadc0 | 4,679 | py | Python | Microsoft.ML.TensorFlow.TestModels/mnist_conv_model/mnist_conv_model.py | terrajobst/machinelearning-testdata | 296625f4e49d50fcd6a48a0d92bea7584e198c0f | [
"MIT"
] | 6 | 2019-03-02T18:54:43.000Z | 2021-12-28T13:23:25.000Z | Microsoft.ML.TensorFlow.TestModels/mnist_conv_model/mnist_conv_model.py | terrajobst/machinelearning-testdata | 296625f4e49d50fcd6a48a0d92bea7584e198c0f | [
"MIT"
] | 7 | 2018-08-28T22:28:19.000Z | 2022-03-14T19:53:27.000Z | Microsoft.ML.TensorFlow.TestModels/mnist_conv_model/mnist_conv_model.py | terrajobst/machinelearning-testdata | 296625f4e49d50fcd6a48a0d92bea7584e198c0f | [
"MIT"
] | 12 | 2018-08-28T21:25:42.000Z | 2022-02-27T17:06:46.000Z | import os
import sys
import time
from datetime import datetime
import math
import pandas as pd
import numpy as np
import tensorflow as tf
def load_data(fileName):
data = pd.read_csv(fileName, header = None, sep='\t')
colnames = [ 'V'+str(x) for x in range(0, data.shape[1])]
data.columns = colnames
return data
def train_model(dataTrain, labCol, config):
print('Training Data Dimensions: (%d,%d)' % (dataTrain.shape[0],dataTrain.shape[1]))
colNames = np.array(list(dataTrain))
features = np.delete(colNames,labCol)
train_X = dataTrain.ix[:, features].values
train_X = train_X.reshape(train_X.shape[0], 28,28, 1)
train_Y = dataTrain.ix[:,labCol].values.ravel()
tf.set_random_seed(1)
lr = tf.placeholder(tf.float32, name = "learning_rate")
pkeep = tf.placeholder_with_default(1.0, shape=(), name="DropoutProb")
features = tf.placeholder(tf.float32, [None, train_X.shape[1], train_X.shape[2], train_X.shape[3]], name="Features")
labels = tf.placeholder(tf.int64, [None], "Label")
K = 6 # first convolutional layer output depth
L = 12 # second convolutional layer output depth
M = 24 # third convolutional layer
N = 200 # fully connected layer (softmax)
W1 = tf.Variable(tf.truncated_normal([6, 6, 1, K], stddev=0.1)) # 6x6 patch, 1 input channel, K output channels
B1 = tf.Variable(tf.constant(0.1, tf.float32, [K]))
W2 = tf.Variable(tf.truncated_normal([5, 5, K, L], stddev=0.1))
B2 = tf.Variable(tf.constant(0.1, tf.float32, [L]))
W3 = tf.Variable(tf.truncated_normal([4, 4, L, M], stddev=0.1))
B3 = tf.Variable(tf.constant(0.1, tf.float32, [M]))
W4 = tf.Variable(tf.truncated_normal([7 * 7 * M, N], stddev=0.1))
B4 = tf.Variable(tf.constant(0.1, tf.float32, [N]))
W5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1))
B5 = tf.Variable(tf.constant(0.1, tf.float32, [10]))
# The model
stride = 1 # output is 28x28
Y1 = tf.nn.relu(tf.nn.conv2d(features, W1, strides=[1, stride, stride, 1], padding='SAME') + B1)
stride = 2 # output is 14x14
Y2 = tf.nn.relu(tf.nn.conv2d(Y1, W2, strides=[1, stride, stride, 1], padding='SAME') + B2)
stride = 2 # output is 7x7
Y3 = tf.nn.relu(tf.nn.conv2d(Y2, W3, strides=[1, stride, stride, 1], padding='SAME') + B3)
# reshape the output from the third convolution for the fully connected layer
YY = tf.reshape(Y3, shape=[-1, 7 * 7 * M])
Y4 = tf.nn.relu(tf.matmul(YY, W4) + B4)
YY4 = tf.nn.dropout(Y4, pkeep)
model = tf.matmul(YY4, W5) + B5
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=model), name='Loss')
prediction = tf.nn.softmax(model, name = "Prediction")
accuracy = tf.reduce_mean( tf.cast(tf.equal( tf.argmax(prediction,1), labels), tf.float32), name = "Accuracy")
optimizer = tf.train.MomentumOptimizer(learning_rate=lr, momentum=0.9).minimize(loss, name="MomentumOp")
init = tf.global_variables_initializer()
# Launch the graph.
tfconfig = tf.ConfigProto()
tfconfig.gpu_options.allow_growth = True
sess = tf.Session(config=tfconfig)
#sess = tf.Session()
sess.run(init)
batch_size =config['batch_size']
train_time_sec = 0
total_batch = int(train_X.shape[0] / batch_size)
for epoch in range(config['epochs']):
avg_loss = 0
perm = np.arange(train_X.shape[0])
np.random.shuffle(perm)
train_X = train_X[perm]
train_Y = train_Y[perm]
for batch_idx in range(0, train_X.shape[0], batch_size):
X_batch = train_X[batch_idx:batch_idx+batch_size]
Y_batch = train_Y[batch_idx:batch_idx+batch_size]
t0 = time.time()
_, loss_val, acc = sess.run([optimizer, loss, accuracy], feed_dict={features: X_batch, labels: Y_batch, pkeep:0.9, lr:0.01})
train_time_sec = train_time_sec + (time.time() - t0)
avg_loss += loss_val / total_batch
print('Epoch: ', '%04d' % (epoch+1), 'cost (cross-entropy) = %.4f , acc = %.4f' % (avg_loss, acc))
tf.saved_model.simple_save(
sess,
export_dir = "./conv",
inputs = {"Features" : features},
outputs = {"Prediction": prediction})
tool_version = tf.__version__
print('TensorFlow version: {}.'.format(tool_version))
config = {}
config['batch_size'] = 100
config['epochs'] = 20
config['model_dir'] = os.getcwd()+'/model'
# Start the clock!
ptm = time.time()
print('Loading data...')
dataTrain = load_data('mnist_train.1K.tsv')
dataTest = load_data('mnist_test.1K.tsv')
print('Done!\n')
labCol = [0]
modelData = train_model(dataTrain, labCol, config)
| 38.991667 | 136 | 0.650353 |
853fdf0031390a87986e91b4568c44d204ee6783 | 1,131 | py | Python | stv/gui_helper/verify_bridge.py | blackbat13/stv | fc73fd50ad1ab6a36a6b4d6b1aec02c4bcd1b094 | [
"MIT"
] | 2 | 2021-07-11T09:52:59.000Z | 2022-02-13T17:34:59.000Z | stv/gui_helper/verify_bridge.py | blackbat13/stv | fc73fd50ad1ab6a36a6b4d6b1aec02c4bcd1b094 | [
"MIT"
] | 3 | 2020-07-26T13:49:59.000Z | 2021-01-19T18:04:10.000Z | stv/gui_helper/verify_bridge.py | blackbat13/stv | fc73fd50ad1ab6a36a6b4d6b1aec02c4bcd1b094 | [
"MIT"
] | null | null | null | import json
import sys
from stv.models import BridgeModel
n = int(sys.argv[3])
k = int(sys.argv[4])
v = int(sys.argv[5])
file_hands = open("bridge_hands.txt", "r")
json_hands = file_hands.readline()
file_hands.close()
hands = json.loads(json_hands)
bridge_model = BridgeModel(n, k, {'board': [-1, -1, -1, -1], 'lefts': [0, 0],
'hands': hands, 'next': 0, 'history': [],
'beginning': 0, 'clock': 0, 'suit': -1})
bridge_model.generate()
bridge_model.transitions_to_readable()
if v == 1:
atl_model = bridge_model.model.to_atl_imperfect(bridge_model.get_actions())
else:
atl_model = bridge_model.model.to_atl_perfect(bridge_model.get_actions())
winning = set()
state_id = -1
for state in bridge_model.states:
state_id += 1
if state['lefts'][0] > state['lefts'][1] and state['lefts'][0] + state['lefts'][1] == k:
winning.add(state_id)
result = atl_model.minimum_formula_many_agents([0], winning)
if 0 in result:
print(1)
else:
print(0)
print(len(result))
print(bridge_model.model.js_dump_strategy_objective(atl_model.strategy)) | 26.928571 | 92 | 0.651636 |
2b27685fad0f91bff2ad4c7d1f21c88739bd3845 | 12,756 | py | Python | models/official/mask_rcnn/roi_ops.py | lionsky123/tpu | f4468c3b2e9a0214a40ac80a76a9cf5ff237d030 | [
"Apache-2.0"
] | 3 | 2019-05-30T06:01:24.000Z | 2019-10-27T14:36:49.000Z | models/official/mask_rcnn/roi_ops.py | pluieciel/tpu | 4b7c183cfca336fcc96b5a3c45cb99eda260e8df | [
"Apache-2.0"
] | null | null | null | models/official/mask_rcnn/roi_ops.py | pluieciel/tpu | 4b7c183cfca336fcc96b5a3c45cb99eda260e8df | [
"Apache-2.0"
] | 2 | 2019-07-03T20:53:03.000Z | 2021-09-18T08:18:32.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""ROI-related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import box_utils
def _propose_rois_tpu(scores,
boxes,
anchor_boxes,
height,
width,
scale,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights):
"""Proposes RoIs giva group of candidates (TPU version).
Args:
scores: a tensor with a shape of [batch_size, num_boxes].
boxes: a tensor with a shape of [batch_size, num_boxes, 4],
in the encoded form.
anchor_boxes: an Anchors object that contains the anchors with a shape of
[batch_size, num_boxes, 4].
height: a tensor of shape [batch_size, 1, 1] representing the image height.
width: a tensor of shape [batch_size, 1, 1] representing the image width.
scale: a tensor of shape [batch_size, 1, 1] representing the image scale.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
bbox_reg_weights: None or a list of four integer specifying the weights used
when decoding the box.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals. It has same dtype as input
scores.
boxes: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
represneting the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax]. It has same dtype as
input boxes.
"""
_, num_boxes = scores.get_shape().as_list()
topk_limit = (num_boxes if num_boxes < rpn_pre_nms_topn
else rpn_pre_nms_topn)
scores, boxes_list = box_utils.top_k(
scores, k=topk_limit, boxes_list=[boxes, anchor_boxes])
boxes = boxes_list[0]
anchor_boxes = boxes_list[1]
# Decode boxes w.r.t. anchors and transform to the absoluate coordinates.
boxes = box_utils.decode_boxes(boxes, anchor_boxes, bbox_reg_weights)
# Clip boxes that exceed the boundary.
boxes = box_utils.clip_boxes(boxes, height, width)
# Filter boxes that one side is less than rpn_min_size threshold.
boxes, scores = box_utils.filter_boxes(
boxes,
tf.expand_dims(scores, axis=-1),
rpn_min_size,
height,
width,
scale)
scores = tf.squeeze(scores, axis=-1)
post_nms_topk_limit = (topk_limit if topk_limit < rpn_post_nms_topn else
rpn_post_nms_topn)
# NMS.
if rpn_nms_threshold > 0:
scores, boxes = box_utils.sorted_non_max_suppression_padded(
scores, boxes, max_output_size=post_nms_topk_limit,
iou_threshold=rpn_nms_threshold)
# Pick top-K post NMS'ed boxes.
scores, boxes = box_utils.top_k(
scores, k=post_nms_topk_limit, boxes_list=[boxes])
boxes = boxes[0]
return scores, boxes
def _propose_rois_gpu(scores,
boxes,
anchor_boxes,
height,
width,
scale,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights):
"""Proposes RoIs giva group of candidates (GPU version).
Args:
scores: a tensor with a shape of [batch_size, num_boxes].
boxes: a tensor with a shape of [batch_size, num_boxes, 4],
in the encoded form.
anchor_boxes: an Anchors object that contains the anchors with a shape of
[batch_size, num_boxes, 4].
height: a tensor of shape [batch_size, 1, 1] representing the image height.
width: a tensor of shape [batch_size, 1, 1] representing the image width.
scale: a tensor of shape [batch_size, 1, 1] representing the image scale.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
bbox_reg_weights: None or a list of four integer specifying the weights used
when decoding the box.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals. It has same dtype as input
scores.
boxes: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
represneting the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax]. It has same dtype as
input boxes.
"""
batch_size, num_boxes = scores.get_shape().as_list()
topk_limit = (num_boxes if num_boxes < rpn_pre_nms_topn
else rpn_pre_nms_topn)
boxes = box_utils.decode_boxes(boxes, anchor_boxes, bbox_reg_weights)
boxes = box_utils.clip_boxes(boxes, height, width)
if rpn_min_size > 0.0:
boxes, scores = box_utils.filter_boxes(
boxes,
tf.expand_dims(scores, axis=-1),
rpn_min_size,
height,
width,
scale)
scores = tf.squeeze(scores, axis=-1)
post_nms_topk_limit = (topk_limit if topk_limit < rpn_post_nms_topn else
rpn_post_nms_topn)
if rpn_nms_threshold > 0:
# Normalize coordinates as combined_non_max_suppression currently
# only support normalized coordinates.
pre_nms_boxes = box_utils.to_normalized_coordinates(
boxes, height, width)
pre_nms_boxes = tf.reshape(pre_nms_boxes, [batch_size, num_boxes, 1, 4])
pre_nms_scores = tf.reshape(scores, [batch_size, num_boxes, 1])
boxes, scores, _, _ = tf.image.combined_non_max_suppression(
pre_nms_boxes,
pre_nms_scores,
max_output_size_per_class=topk_limit,
max_total_size=post_nms_topk_limit,
iou_threshold=rpn_nms_threshold,
score_threshold=0.0,
pad_per_class=False)
boxes = box_utils.to_absolute_coordinates(boxes, height, width)
else:
scores, boxes = box_utils.top_k(
scores, k=post_nms_topk_limit, boxes_list=[boxes])
boxes = boxes[0]
return scores, boxes
def multilevel_propose_rois(scores_outputs,
box_outputs,
all_anchors,
image_info,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights,
use_batched_nms=False):
"""Proposes RoIs given a group of candidates from different FPN levels.
Args:
scores_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width, num_anchors].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4]
all_anchors: an Anchors object that contains the all anchors.
image_info: a tensor of shape [batch_size, 5] where the three columns
encode the input image's [height, width, scale,
original_height, original_width]. Height and width are for
the input to the network, not the original image; scale is the scale
factor used to scale the network input size to the original image size.
See dataloader.DetectionInputProcessor for details. The last two are
original height and width. See dataloader.DetectionInputProcessor for
details.
rpn_pre_nms_topn: a integer number of top scoring RPN proposals to keep
before applying NMS. This is *per FPN level* (not total).
rpn_post_nms_topn: a integer number of top scoring RPN proposals to keep
after applying NMS. This is the total number of RPN proposals produced.
rpn_nms_threshold: a float number between 0 and 1 as the NMS threshold
used on RPN proposals.
rpn_min_size: a integer number as the minimum proposal height and width as
both need to be greater than this number. Note that this number is at
origingal image scale; not scale used during training or inference).
bbox_reg_weights: None or a list of four integer specifying the weights used
when decoding the box.
use_batched_nms: whether use batched nms. The batched nms will use
tf.combined_non_max_suppression, which is only available for CPU/GPU.
Returns:
scores: a tensor with a shape of [batch_size, rpn_post_nms_topn, 1]
representing the scores of the proposals.
rois: a tensor with a shape of [batch_size, rpn_post_nms_topn, 4]
representing the boxes of the proposals. The boxes are in normalized
coordinates with a form of [ymin, xmin, ymax, xmax].
"""
with tf.name_scope('multilevel_propose_rois'):
levels = scores_outputs.keys()
scores = []
rois = []
anchor_boxes = all_anchors.get_unpacked_boxes()
height = tf.expand_dims(image_info[:, 0:1], axis=-1)
width = tf.expand_dims(image_info[:, 1:2], axis=-1)
scale = tf.expand_dims(image_info[:, 2:3], axis=-1)
for level in levels:
with tf.name_scope('level_%d' % level):
batch_size, feature_h, feature_w, num_anchors_per_location = (
scores_outputs[level].get_shape().as_list())
num_boxes = feature_h * feature_w * num_anchors_per_location
this_level_scores = tf.reshape(
scores_outputs[level], [batch_size, num_boxes])
this_level_scores = tf.sigmoid(this_level_scores)
this_level_boxes = tf.reshape(
box_outputs[level], [batch_size, num_boxes, 4])
this_level_anchors = tf.cast(
tf.reshape(
tf.expand_dims(anchor_boxes[level], axis=0) *
tf.ones([batch_size, 1, 1, 1]),
[batch_size, num_boxes, 4]),
dtype=this_level_scores.dtype)
if use_batched_nms:
propose_rois_fn = _propose_rois_gpu
else:
propose_rois_fn = _propose_rois_tpu
this_level_scores, this_level_boxes = propose_rois_fn(
this_level_scores,
this_level_boxes,
this_level_anchors,
height,
width,
scale,
rpn_pre_nms_topn,
rpn_post_nms_topn,
rpn_nms_threshold,
rpn_min_size,
bbox_reg_weights)
scores.append(this_level_scores)
rois.append(this_level_boxes)
scores = tf.concat(scores, axis=1)
rois = tf.concat(rois, axis=1)
with tf.name_scope('roi_post_nms_topk'):
post_nms_num_anchors = scores.shape[1]
post_nms_topk_limit = (
post_nms_num_anchors if post_nms_num_anchors < rpn_post_nms_topn
else rpn_post_nms_topn)
top_k_scores, top_k_rois = box_utils.top_k(
scores, k=post_nms_topk_limit, boxes_list=[rois])
top_k_rois = top_k_rois[0]
return top_k_scores, top_k_rois
| 41.686275 | 80 | 0.668313 |
b118c3fede539f7e271f9be54020b3b90d2de2d9 | 1,257 | py | Python | L16-multiobj-plotting.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | 10 | 2018-12-23T02:59:06.000Z | 2021-12-07T11:55:21.000Z | L16-multiobj-plotting.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | null | null | null | L16-multiobj-plotting.py | jdherman/eci273 | 86828b2e075258afdd528e86295170e162cc99e3 | [
"MIT"
] | 7 | 2018-12-21T02:06:51.000Z | 2021-12-11T02:36:47.000Z | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# load a 4-objective pareto front
data = np.loadtxt('data/example-pareto-front.csv', delimiter=',')
# Example 1: N-dimensional scatter plot
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# use 's' and 'c' for size/color, respectively
h = ax.scatter(data[:,0], data[:,1], data[:,2],
c=data[:,3], cmap=plt.cm.cool)
#s=50*data[:,0], edgecolor='none')
plt.colorbar(h)
plt.show()
# Example 2: parallel axis
# see also: http://syntagmatic.github.io/parallel-coordinates/examples/table.html
# first normalize each objective to [0,1] range
# for objs in data:
# objs = (objs - data.min(axis=0)) / (data.max(axis=0) - data.min(axis=0))
# # filtering
# if objs[2] < 0.5 and objs[0] < 0.25:
# plt.plot(range(4), objs, color='steelblue', zorder=2)
# else:
# plt.plot(range(4), objs, color='0.8', zorder=1)
# plt.gca().set_xticks(range(4))
# plt.gca().set_xticklabels(['A','B','C','D'])
# plt.show()
# Example 3: scatter plot matrix
# for i in range(4):
# for j in range(4):
# plt.subplot(4,4,i*4+j+1)
# plt.scatter(data[:,i], data[:,j])
# plt.show()
| 25.653061 | 81 | 0.641209 |
f3ce0adeb1418de78ae8e7b604167b61be133b21 | 1,556 | py | Python | setup.py | jhunkeler/exyapps | 744da64b4b8897ef9354a38ea0543e908992000c | [
"MIT"
] | null | null | null | setup.py | jhunkeler/exyapps | 744da64b4b8897ef9354a38ea0543e908992000c | [
"MIT"
] | null | null | null | setup.py | jhunkeler/exyapps | 744da64b4b8897ef9354a38ea0543e908992000c | [
"MIT"
] | 1 | 2020-02-07T14:31:42.000Z | 2020-02-07T14:31:42.000Z | #!/usr/bin/env python
"""Setup script for 'exyapps'"""
from distutils.core import setup
description = "Extensions of Yet Another Python Parser System"
long_description = \
"""
EXYAPPS is an easy to use parser generator that is written in Python and
generates Python code. It is intended to be simple, very easy to use,
and produce human-readable parsers.
It is not the fastest or most powerful parser. Exyapps is designed
to be used when regular expressions are not enough and other parser
systems are too much: situations where you might otherwise write your
own recursive descent parser.
Exyapps is derived from YAPPS, with various extensions:
- Handle stacked input ("include files")
- augmented ignore-able patterns (can parse multi-line C comments correctly)
- better error reporting
- read input incrementally
- the generated parser does not require any runtime library
"""
setup (
name = "exyapps",
version = "3.0",
description = description,
long_description = long_description,
url="https://svn.stsci.edu/trac/ssb/etal/wiki/exyapps",
maintainer="Mark Sienkiewicz",
maintainer_email='no_spam@see_url',
# bug: replace this and put acknowledgements of these guys in the docs
# url = "http://theory.stanford.edu/~amitp/yapps/",
# author = "Amit J. Patel, Matthias Urlichs, Mark Sienkiewicz",
license = 'MIT',
platforms = ['POSIX'],
keywords = ['parsing'],
packages = ['exyapps'],
scripts = ['scripts/exyapps'],
# if we ever start using distribute
# zip_safe = False,
)
| 32.416667 | 76 | 0.721722 |
296a79b26461f407982abc84abcb66fb7efeae8b | 868 | py | Python | tests/test_requests.py | phonosync/introApp | 74003ed3960ddf03294f5ce3f4ed5df8fb0ef90e | [
"MIT"
] | 2 | 2019-12-14T20:53:50.000Z | 2020-02-04T13:29:00.000Z | tests/test_requests.py | phonosync/introApp | 74003ed3960ddf03294f5ce3f4ed5df8fb0ef90e | [
"MIT"
] | null | null | null | tests/test_requests.py | phonosync/introApp | 74003ed3960ddf03294f5ce3f4ed5df8fb0ef90e | [
"MIT"
] | null | null | null | import os
import requests
from dotenv import load_dotenv
load_dotenv()
def test_base():
url = 'http://' + str(os.getenv('APP_HOST')) + ':' + str(os.getenv('APP_PORT'))
response = requests.get(url)
body = response.json()
assert response.status_code == 200
assert body['status'] == 'ok'
def test_train():
url = 'http://' + str(os.getenv('APP_HOST')) + ':' + str(os.getenv('APP_PORT')) + '/train'
payload = {'x': [[1], [2], [3]],
'y': [1.0, 2.0, 3.0]}
response = requests.post(url, json=payload)
assert response.status_code == 200
def test_predict():
url = 'http://' + str(os.getenv('APP_HOST')) + ':' + str(os.getenv('APP_PORT')) + '/predict'
payload = {'x': [[1.0]]}
response = requests.post(url, json=payload)
assert response.status_code == 200
if __name__ == '__main__':
test_predict()
| 25.529412 | 96 | 0.591014 |
b58d7ba5a94d78fde03adc48a9fd7da1af22f8c7 | 2,017 | py | Python | tests/api/endpoints/admin/test_license.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 420 | 2015-01-03T11:34:46.000Z | 2022-03-10T07:15:41.000Z | tests/api/endpoints/admin/test_license.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 735 | 2015-01-04T21:22:51.000Z | 2022-03-31T09:26:07.000Z | tests/api/endpoints/admin/test_license.py | weimens/seahub | 5ecf78ed7a2ddc72a23961804ee41be21c24893f | [
"Apache-2.0"
] | 379 | 2015-01-05T17:08:03.000Z | 2022-03-06T00:11:50.000Z | import os
import json
from mock import patch
from django.urls import reverse
from seahub.api2.endpoints.admin import license as license_api
from seahub.settings import LICENSE_PATH
from seahub.utils.error_msg import file_type_error_msg
from seahub.test_utils import BaseTestCase
from tests.common.utils import urljoin
from tests.common.common import BASE_URL
class AdminLicenseTest(BaseTestCase):
def setUp(self):
self.login_as(self.admin)
def test_post_admin_permission_denied(self):
self.logout()
self.login_as(self.admin_cannot_config_system)
resp = self.client.post(reverse('api-v2.1-admin-license'))
self.assertEqual(403, resp.status_code)
@patch.object(license_api, 'ccnet_api')
def test_update_license(self, mock_ccnet_api):
mock_ccnet_api.return_val = {}
url = reverse('api-v2.1-admin-license')
url = urljoin(BASE_URL, url)
with open(
os.path.join(os.getcwd(), 'tests/seahub/utils/seafile-license.txt'), 'rb') as f:
resp = self.client.post(url, {'license': f})
json_resp = json.loads(resp.content)
assert json_resp['license_expiration'] is not None
assert json_resp['license_mode'] is not None
assert json_resp['license_maxusers'] is not None
assert json_resp['license_to'] is not None
assert os.path.exists(LICENSE_PATH)
@patch.object(license_api, 'ccnet_api')
def test_update_license_with_invalid_type(self, mock_ccnet_api):
mock_ccnet_api.return_val = {}
url = reverse('api-v2.1-admin-license')
url = urljoin(BASE_URL, url)
with open('temp.notxt', 'w') as f:
f.write('1')
with open(
os.path.join(os.getcwd(), 'temp.notxt'), 'rb') as f:
resp = self.client.post(url, {'license': f})
json_resp = json.loads(resp.content)
assert 400 == resp.status_code
assert file_type_error_msg('notxt', 'txt') == json_resp['error_msg']
| 35.385965 | 96 | 0.671294 |
ffda72ccd5208fdbb762536828ab882ff7144355 | 305 | py | Python | models.py | 19121A05D9/project1 | 4bf68136004e84136adf49c87b3599b4a87e7085 | [
"MIT"
] | 4 | 2020-08-24T11:45:17.000Z | 2022-03-12T07:32:12.000Z | models.py | 19121A05D9/project1 | 4bf68136004e84136adf49c87b3599b4a87e7085 | [
"MIT"
] | 6 | 2021-02-18T19:34:31.000Z | 2022-01-19T14:16:22.000Z | models.py | 19121A05D9/project1 | 4bf68136004e84136adf49c87b3599b4a87e7085 | [
"MIT"
] | 5 | 2022-02-22T09:37:30.000Z | 2022-03-24T11:29:00.000Z | from flask_login import UserMixin
from . import db
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True) # primary keys are required by SQLAlchemy
email = db.Column(db.String(100), unique=True)
password = db.Column(db.String(100))
name = db.Column(db.String(1000)) | 38.125 | 90 | 0.721311 |
38737fac2f189bcb25791f223b3dd6e130a13e6a | 5,084 | py | Python | trt_yolo.py | Siraj-Qazi/tensorrt_demos | f8c602712c4b0f00e5566ed99ca05c46708a9284 | [
"MIT"
] | null | null | null | trt_yolo.py | Siraj-Qazi/tensorrt_demos | f8c602712c4b0f00e5566ed99ca05c46708a9284 | [
"MIT"
] | null | null | null | trt_yolo.py | Siraj-Qazi/tensorrt_demos | f8c602712c4b0f00e5566ed99ca05c46708a9284 | [
"MIT"
] | null | null | null | """trt_yolo.py
This script demonstrates how to do real-time object detection with
TensorRT optimized YOLO engine.
"""
import os
import time
import argparse
import cv2
import pycuda.autoinit # This is needed for initializing CUDA driver
from utils.yolo_classes import get_cls_dict
from utils.camera import add_camera_args, Camera
from utils.display import open_window, set_display, show_fps
from utils.visualization import BBoxVisualization
from utils.yolo_with_plugins import get_input_shape, TrtYOLO
import glob
WINDOW_NAME = 'TrtYOLODemo'
def parse_args():
"""Parse input arguments."""
desc = ('Capture and display live camera video, while doing '
'real-time object detection with TensorRT optimized '
'YOLO model on Jetson')
parser = argparse.ArgumentParser(description=desc)
parser = add_camera_args(parser)
parser.add_argument(
'-c', '--category_num', type=int, default=80,
help='number of object categories [80]')
parser.add_argument(
'-i', '--test-images', type=str, default='test/images',
help='Test images')
parser.add_argument(
'-m', '--model', type=str, required=True,
help=('[yolov3|yolov3-tiny|yolov3-spp|yolov4|yolov4-tiny]-'
'[{dimension}], where dimension could be a single '
'number (e.g. 288, 416, 608) or WxH (e.g. 416x256)'))
parser.add_argument(
'-l', '--letter_box', action='store_true',
help='inference with letterboxed image [False]')
args = parser.parse_args()
return args
def loop_and_detect_images(trt_yolo, conf_th, vis, test_imgs):
"""Continuously capture images from camera and do object detection.
# Arguments
cam: the camera instance (video source).
trt_yolo: the TRT YOLO object detector instance.
conf_th: confidence/score threshold for object detection.
vis: for visualization.
"""
test_imgs = glob.glob(test_imgs+'/*.jpg')
fps = 0.0
tic = time.time()
time_start = tic
import os
if not os.path.exists('detections'):
os.mkdir('detections')
for test_img in test_imgs:
img = cv2.imread(test_img)
if img is None:
break
boxes, confs, clss = trt_yolo.detect(img, conf_th)
img = vis.draw_bboxes(img, boxes, confs, clss)
img = show_fps(img, fps)
toc = time.time()
curr_fps = 1.0 / (toc - tic)
# calculate an exponentially decaying average of fps number
fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05)
tic = toc
output_img_name = 'detections/'+os.path.basename(test_img)
print('Saving ',output_img_name)
cv2.imwrite(output_img_name,img)
total_time = time.time() - time_start
print('Processing speed (incl. File I/O): {} FPS'.format(len(test_imgs)/total_time))
def loop_and_detect(cam, trt_yolo, conf_th, vis):
"""Continuously capture images from camera and do object detection.
# Arguments
cam: the camera instance (video source).
trt_yolo: the TRT YOLO object detector instance.
conf_th: confidence/score threshold for object detection.
vis: for visualization.
"""
full_scrn = False
fps = 0.0
tic = time.time()
while True:
if cv2.getWindowProperty(WINDOW_NAME, 0) < 0:
break
img = cam.read()
boxes, confs, clss = trt_yolo.detect(img, conf_th)
img = vis.draw_bboxes(img, boxes, confs, clss)
img = show_fps(img, fps)
cv2.imshow(WINDOW_NAME, img)
toc = time.time()
curr_fps = 1.0 / (toc - tic)
# calculate an exponentially decaying average of fps number
fps = curr_fps if fps == 0.0 else (fps*0.95 + curr_fps*0.05)
tic = toc
key = cv2.waitKey(1)
if key == 27: # ESC key: quit program
break
elif key == ord('F') or key == ord('f'): # Toggle fullscreen
full_scrn = not full_scrn
set_display(WINDOW_NAME, full_scrn)
def main():
args = parse_args()
if args.category_num <= 0:
raise SystemExit('ERROR: bad category_num (%d)!' % args.category_num)
if not os.path.isfile('yolo/%s.trt' % args.model):
raise SystemExit('ERROR: file (yolo/%s.trt) not found!' % args.model)
cls_dict = get_cls_dict(args.category_num)
vis = BBoxVisualization(cls_dict)
h, w = get_input_shape(args.model)
trt_yolo = TrtYOLO(args.model, (h, w), args.category_num, args.letter_box)
if args.test_images:
loop_and_detect_images(trt_yolo, conf_th=0.5, vis=vis, test_imgs=args.test_images)
else:
cam = Camera(args)
if not cam.isOpened():
raise SystemExit('ERROR: failed to open camera!')
open_window(
WINDOW_NAME, 'Camera TensorRT YOLO Demo',
cam.img_width, cam.img_height)
loop_and_detect(cam, trt_yolo, conf_th=0.5, vis=vis)
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| 31.974843 | 90 | 0.636703 |
a2c110424698ae1c8fb6d1cc76c9d40897a540da | 2,406 | py | Python | tools/telemetry/telemetry/core/local_server_unittest.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2019-01-16T03:57:39.000Z | 2019-01-16T03:57:39.000Z | tools/telemetry/telemetry/core/local_server_unittest.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2018-02-10T21:00:08.000Z | 2018-03-20T05:09:50.000Z | tools/telemetry/telemetry/core/local_server_unittest.py | aranajhonny/chromium | caf5bcb822f79b8997720e589334266551a50a13 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
import SimpleHTTPServer
from telemetry.core import local_server
from telemetry.unittest import tab_test_case
class SimpleLocalServerBackendRequestHandler(
SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
msg = """<!DOCTYPE html>
<html>
<body>
hello world
</body>
"""
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.send_header('Content-Length', len(msg))
self.end_headers()
self.wfile.write(msg)
def log_request(self, code='-', size='-'):
pass
class SimpleLocalServerBackend(BaseHTTPServer.HTTPServer,
local_server.LocalServerBackend):
def __init__(self):
BaseHTTPServer.HTTPServer.__init__(
self, ('127.0.0.1', 0), SimpleLocalServerBackendRequestHandler)
local_server.LocalServerBackend.__init__(self)
def StartAndGetNamedPorts(self, args):
assert 'hello' in args
assert args['hello'] == 'world'
return [local_server.NamedPort('http', self.server_address[1])]
def ServeForever(self):
self.serve_forever()
class SimpleLocalServer(local_server.LocalServer):
def __init__(self):
super(SimpleLocalServer, self).__init__(SimpleLocalServerBackend)
def GetBackendStartupArgs(self):
return {'hello': 'world'}
@property
def url(self):
return self.forwarder.url + '/'
class LocalServerUnittest(tab_test_case.TabTestCase):
@classmethod
def setUpClass(cls):
super(LocalServerUnittest, cls).setUpClass()
cls._server = SimpleLocalServer()
cls._browser.StartLocalServer(cls._server)
def testLocalServer(self):
self.assertTrue(self._server in self._browser.local_servers)
self._tab.Navigate(self._server.url)
self._tab.WaitForDocumentReadyStateToBeComplete()
body_text = self._tab.EvaluateJavaScript('document.body.textContent')
body_text = body_text.strip()
self.assertEquals('hello world', body_text)
def testStartingAndRestarting(self):
server2 = SimpleLocalServer()
self.assertRaises(Exception,
lambda: self._browser.StartLocalServer(server2))
self._server.Close()
self.assertTrue(self._server not in self._browser.local_servers)
self._browser.StartLocalServer(server2)
| 30.846154 | 73 | 0.736076 |
ab7992df21389ef0e37bff54c250eea7fd4d3ff7 | 26,701 | py | Python | straph/parser/parser.py | GiulioRossetti/Straph | edc021d25243bcca619c62dca1f28cf05b73a92c | [
"Apache-2.0"
] | null | null | null | straph/parser/parser.py | GiulioRossetti/Straph | edc021d25243bcca619c62dca1f28cf05b73a92c | [
"Apache-2.0"
] | null | null | null | straph/parser/parser.py | GiulioRossetti/Straph | edc021d25243bcca619c62dca1f28cf05b73a92c | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2017-2021 Léo Rannou - Sorbonne Université/LIP6 - Thales
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import dateutil.parser as du
import dpkt
import json
import math
import os
import socket
import time
from collections import defaultdict
from sortedcollections import SortedSet
from tqdm import tqdm
from straph import stream as sg
# TODO : parse PCAP (adpat pcap_to_csv and shit), see pcap_reader.
# TODO : parse net, to finish (for Pajek datasets).
__nb_2_protocol__ = {0: 'IPv6_HbH', # IPv6 Hop by Hop
1: 'ICMP', # Internet Control Message
2: 'IGMP', # Internet Group Management
3: 'GGP', # Gateway-to-Gateway
4: 'IPv4',
5: 'ST2', # Sream v2 aka "IPv5"
6: 'TCP', # Transmission Control
7: 'CBT',
8: 'EGP', # Exterior Gateway Protocol
17: 'UDP', # User Datagram
41: 'IPv6',
43: 'IPv6_Route', # Routing header for IPv6
47: 'GRE', # Generic Routing encapsulation
50: 'ESP', # Encap Security Payload
51: 'AH', # Authenfication Header
58: 'IPv6_ICMP', # IPV6 ICMP
103: 'PIM', # Protocol Independent Multicast
}
def inet_to_str(inet):
"""
Convert inet object to a string
:param inet: inet network address
:return: str: Printable/readable IP address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_ntop(socket.AF_INET, inet)
except ValueError:
return socket.inet_ntop(socket.AF_INET6, inet)
def datetime_to_timestamp(s):
return du.parse(s).timestamp()
def pcap_to_csv(file_input, destination, protocol=None):
"""
Transform a pcap file to a csv
:param file_input:
:param destination:
:param protocol:
:return:
"""
counter = 0
dict_ip = defaultdict(lambda: len(dict_ip))
dict_label = {}
if protocol:
protocol = [key for key, value in __nb_2_protocol__.items() if value == protocol][0]
print("protocol :", protocol)
with open(destination, 'w') as output, open(file_input, 'rb') as input:
writer = csv.writer(output, delimiter=';')
writer.writerow(["time", "src", "dst", "protocol", "len", "src_port", "dst_port"])
for ts, pkt in tqdm(dpkt.pcap.Reader(input)):
eth = dpkt.ethernet.Ethernet(pkt)
# if counter == 150000000000000000000000000000:
# print("Time end dkpt :", time.time() - start)
# break
if counter == 0:
t0 = ts
counter += 1
if isinstance(eth.data, bytes):
continue
ip = eth.data
if ip.src is not None:
ip_src = inet_to_str(ip.src).encode()
else:
continue
ip_dst = inet_to_str(ip.dst).encode()
id_src = dict_ip[ip_src]
id_dst = dict_ip[ip_dst]
dict_label[id_src] = ip_src
dict_label[id_dst] = ip_dst
# if counter % 1000000 == 0:
# print("Counter dkpt:", counter, "time dkpt:", time.time() - start)
# We ignore 'ICMP' protocols, ICMP scan useless
if ip.p == 1:
continue
if protocol:
if ip.p != protocol:
continue
if isinstance(eth.data, dpkt.ip6.IP6):
# print("IPv6 !")
len_pckt = ip.plen
else:
len_pckt = ip.len
if isinstance(ip.data, dpkt.tcp.TCP) or isinstance(ip.data, dpkt.udp.UDP):
tcp = ip.data
src_port = tcp.sport
dst_port = tcp.dport
else:
src_port = None
dst_port = None
# print("ip src :", ip_src, " ip dst :", ip_dst, " ip protocol :",
# protocol, " ip len :", len_pckt,"src port :", tcp.sport, "dest port :", tcp.dport)
writer.writerow([round(ts - t0, 6), id_src, id_dst, ip.p, len_pckt, src_port, dst_port])
def parse_net(input_file, output_file_nodes, output_file_links, link_duration=1):
"""
A Stream Graph reader for dataset issued by Pajek
Format of interactions : .net
:param input_file:
:param output_file_nodes:
:param output_file_links:
:param link_duration:
:return:
"""
E = defaultdict(list)
W = defaultdict(list)
type_node = None
with open(input_file, 'r') as input_file:
for line in input_file:
l = line.strip().split()
if l[0] == '*Vertices':
type_node = True
continue
if l[0] == '*Edges':
type_node = False
continue
if type_node:
continue
else:
u, v = int(l[0]), int(l[1])
e = (u, v)
if u == v:
# SELF LOOP : we ignore it
continue
t = l[3].strip('[').strip(']').split(',')
for current_time in t:
current_time = int(current_time)
if e in E and E[e][-1] >= current_time:
# print("Extend Link Presence")
E[e][-1] = max(E[e][-1], current_time + link_duration)
else:
E[e] += [current_time, current_time + link_duration]
if u in W and W[u][-1] >= current_time:
# print("Extend Node Presence")
W[u][-1] = max(W[u][-1], current_time + link_duration)
else:
W[u] += [current_time, current_time + link_duration]
if v in W and W[v][-1] >= current_time:
# print("Extend Node Presence")
W[v][-1] = max(W[v][-1], current_time + link_duration)
else:
W[v] += [current_time, current_time + link_duration]
with open(output_file_links, 'w') as output_file:
for k, v in E.items():
output_file.write(str(k[0]) + " " + str(k[1]) + " ")
for t in v:
output_file.write(str(t) + " ")
output_file.write("\n")
with open(output_file_nodes, 'w') as output_file:
for k, v in W.items():
output_file.write(str(k) + " ")
for t in v:
output_file.write(str(t) + " ")
output_file.write("\n")
return None
def parse_csv(input_file, entry_format, **kwargs):
"""
Reader for .csv files
:param input_file:
:param entry_format:
:param kwargs:
:return:
"""
# Convert entry format
t_pos, b_pos, e_pos, link_duration_pos = None, None, None, None
if len(entry_format) == 3:
(t_pos, u_pos, v_pos) = entry_format['t_pos'], entry_format['u_pos'], entry_format['v_pos']
elif len(entry_format) == 4 and 'link_duration_pos' in entry_format:
(t_pos, link_duration_pos, u_pos, v_pos) = entry_format['t_pos'], entry_format['link_duration_pos'], \
entry_format['u_pos'], entry_format['v_pos']
elif len(entry_format) == 4 and 'b_pos' in entry_format:
(b_pos, e_pos, u_pos, v_pos) = entry_format['b_pos'], entry_format['e_pos'], \
entry_format['u_pos'], entry_format['v_pos']
else:
raise TypeError("Entry format is not supported, see documentation !")
E = defaultdict(list)
W = defaultdict(list)
cnt_rows = 0
nodes_to_label = {}
label_to_id = defaultdict(lambda: len(label_to_id))
min_t, max_t = math.inf, -math.inf
with open(input_file, 'r') as input_file:
reader = csv.reader(input_file, delimiter=kwargs['delimiter'])
if kwargs['ignore_header']:
next(reader, None)
if kwargs['link_duration']:
link_duration = kwargs['link_duration']
elif 'link_duration_pos' not in entry_format and 'b_pos' not in entry_format:
link_duration = 0
print("[WARNING] No link_duration provided, links durations are set to 0.")
for line in tqdm(reader, desc='Parsing CSV', total=kwargs['nrows'] - 1):
cnt_rows += 1
if cnt_rows > kwargs['nrows']:
break
if kwargs['nodes_to_label']:
# Convert Label to int
u_label = line[u_pos]
v_label = line[v_pos]
if u_label in {'', ' '} or v_label in {'', ' '}:
# print(" Blank node line:",cnt_rows)
# print(" Content:",line)
continue
if u_label == v_label:
# SELF LOOP : we ignore it
continue
# If we haven't these label before they are assigned to len(label_to_id) = new_id
u = label_to_id[u_label]
v = label_to_id[v_label]
nodes_to_label[u] = u_label
nodes_to_label[v] = v_label
else:
u = int(line[u_pos])
v = int(line[v_pos])
if u == v:
# SELF LOOP : we ignore it
continue
if kwargs['time_is_datetime']:
if 't_pos' in entry_format:
t = datetime_to_timestamp(line[t_pos])
elif 'b_pos' in entry_format:
b = datetime_to_timestamp(line[b_pos])
e = datetime_to_timestamp(line[e_pos])
link_duration = e - b
t = b
else:
if 't_pos' in entry_format:
t = float(line[t_pos].replace(',', ''))
elif 'b_pos' in entry_format:
b = float(line[b_pos].replace(',', ''))
e = float(line[e_pos].replace(',', ''))
link_duration = e - b
t = b
if 'link_duration_pos' in entry_format:
link_duration = float(line[link_duration_pos].replace(',', ''))
min_t, max_t = min(min_t, t), max(max_t, t + link_duration)
if kwargs['is_directed']:
l = (u, v)
else:
if (v, u) in E:
l = (v, u)
else:
l = (u, v)
if l in E and E[l][-1] >= t:
E[l][-1] = max(E[l][-1], t + link_duration)
else:
E[l] += [t, t + link_duration]
if kwargs['is_link_stream'] is False:
if u in W and W[u][-1] >= t:
W[u][-1] = max(W[u][-1], t + link_duration)
else:
W[u] += [t, t + link_duration]
if v in W and W[v][-1] >= t:
W[v][-1] = max(W[v][-1], t + link_duration)
else:
W[v] += [t, t + link_duration]
else:
W[u] = [min_t, max_t]
W[v] = [min_t, max_t]
if kwargs['is_link_stream'] is True:
W = {k: [min_t, max_t] for k in W.keys()}
if kwargs['delta']:
delta = kwargs['delta']
chrono = time.time()
W, E = approximate_events(W, E, delta)
print("\t Approximate events with delta :", delta, " in ", time.time() - chrono)
S = sg.StreamGraph(times=[min_t, max_t],
nodes=list(W.keys()),
links=list(E.keys()),
node_presence=[W[k] for k in W.keys()],
link_presence=[E[k] for k in E.keys()],
node_to_label=nodes_to_label,
node_to_id={i: i for i in W.keys()})
return S
def approximate_events(W, E, delta):
"""
Approximation method reducing the number of distinct event times while preserving connectivity properties
of the original dataset.
:param W:
:param E:
:param delta:
:return:
"""
# Seems strange but avoid float imprecision
event_times = sorted(set([t for np in W.values() for t in np] + [t for lp in E.values() for t in lp]))
t_old = event_times[0]
discretized_event_times = SortedSet()
discretized_event_times.add(t_old)
for t in event_times:
if t - t_old >= delta:
discretized_event_times.add(t)
t_old = t
new_W = {}
for n, np in W.items():
new_W[n] = []
for t0, t1 in zip(np[::2], np[1::2]):
assert t1 - t0 >= delta
if t0 not in discretized_event_times:
# # Catch time after t0 in discretized event times:
t0 = discretized_event_times[discretized_event_times.bisect(t0)]
if t1 not in discretized_event_times:
# #Catch time before t1 in discretize event times:
t1 = discretized_event_times[discretized_event_times.bisect(t1) - 1]
# # new_W[n] += [t0, t1]
# a, b = delta * math.ceil(t0 / delta), delta * math.floor(t1 / delta)
#
# if math.isclose(a, t0):
# a = t0
# if math.isclose(b, t1):
# b = t1
new_W[n] += [t0, t1]
new_E = {}
for l, lp in E.items():
new_E[l] = []
for t0, t1 in zip(lp[::2], lp[1::2]):
assert t1 - t0 >= delta
if t0 not in discretized_event_times:
# # Catch time after t0 in discretized event times:
t0 = discretized_event_times[discretized_event_times.bisect(t0)]
if t1 not in discretized_event_times:
# # Catch time before t1 in discretize event times:
t1 = discretized_event_times[discretized_event_times.bisect(t1) - 1]
# new_E[l] += [t0, t1]
# a, b = delta * math.ceil(t0 / delta), delta * math.floor(t1 / delta)
# if math.isclose(a, t0):
# a = t0
# if math.isclose(b, t1):
# b = t1
new_E[l] += [t0, t1]
return new_W, new_E
def parse_json(input_file, entry_format, **kwargs):
"""
A Stream Graph reader for JSON dataset.
:param input_file:
:param entry_format:
:param kwargs:
:return:
"""
# Convert entry format
u_pos, v_pos, t_pos, b_pos, e_pos, link_duration_pos = None, None, None, None, None, None
if len(entry_format) == 3:
(t_pos, u_pos, v_pos) = entry_format['t_pos'], entry_format['u_pos'], entry_format['v_pos']
elif len(entry_format) == 4 and 'link_duration_pos' in entry_format:
(t_pos, link_duration_pos, u_pos, v_pos) = entry_format['t_pos'], entry_format['link_duration_pos'], \
entry_format['u_pos'], entry_format['v_pos']
elif len(entry_format) == 4 and 'b_pos' in entry_format:
(b_pos, e_pos, u_pos, v_pos) = entry_format['b_pos'], entry_format['e_pos'], \
entry_format['u_pos'], entry_format['v_pos']
E = defaultdict(list)
W = defaultdict(list)
cnt_rows = 0
id_to_label = {}
label_to_id = defaultdict(lambda: len(label_to_id))
min_t, max_t = math.inf, -math.inf
with open(input_file, 'r') as input_file:
reader = json.load(input_file)
for line in tqdm(reader, desc="Parsing JSON", total=kwargs['nrows']):
cnt_rows += 1
# if cnt_rows % 100000 == 0:
# print((cnt_rows / kwargs['nrows']) * 100, "% loaded")
if cnt_rows > kwargs['nrows']:
break
if kwargs['nodes_to_label']:
# Convert Label to int
u_label = line[u_pos]
v_label = line[v_pos]
# If we haven't these label before they are assigned to len(label_to_id = new_id)
u = label_to_id[u_label]
v = label_to_id[v_label]
id_to_label[u] = u_label
id_to_label[v] = v_label
else:
u = int(line[u_pos])
v = int(line[v_pos])
if kwargs['time_is_datetime']:
if 't_pos' in entry_format:
t = datetime_to_timestamp(line[t_pos])
elif 'b_pos' in entry_format:
b = datetime_to_timestamp(line[b_pos])
e = datetime_to_timestamp(line[e_pos])
link_duration = e - b
t = b
else:
if 't_pos' in entry_format:
t = float(line[t_pos].replace(',', ''))
elif 'b_pos' in entry_format:
b = float(line[b_pos].replace(',', ''))
e = float(line[e_pos].replace(',', ''))
link_duration = e - b
t = b
min_t, max_t = min(min_t, t), max(max_t, t + link_duration)
if kwargs['link_duration']:
link_duration = kwargs['link_duration']
elif 'link_duration_pos' in entry_format:
link_duration = float(line[link_duration_pos].replace(',', ''))
if kwargs['is_directed']:
l = (u, v)
else:
if (v, u) in E:
l = (v, u)
else:
l = (u, v)
if u == v:
# SELF LOOP : we ignore it
continue
if l in E and E[l][-1] >= t:
E[l][-1] = max(E[l][-1], t + link_duration)
else:
E[l] += [t, t + link_duration]
if kwargs['is_link_stream'] is False:
if u in W and W[u][-1] >= t:
W[u][-1] = max(W[u][-1], t + link_duration)
else:
W[u] += [t, t + link_duration]
if v in W and W[v][-1] >= t:
W[v][-1] = max(W[v][-1], t + link_duration)
else:
W[v] += [t, t + link_duration]
S = sg.StreamGraph(times=[min_t, max_t],
nodes=list(W.keys()),
links=list(E.keys()),
node_presence=[W[k] for k in W.keys()],
link_presence=[E[k] for k in E.keys()],
node_to_label=id_to_label)
return S
def parse_link_stream(input_file):
"""
Parse link stream format:
alpha t0
omega t1
b e u v
.
.
.
b e v w
:param input_file:
:return:
"""
E = defaultdict(list)
W = defaultdict(list)
cnt_rows = 0
nodes_to_label = {}
label_to_id = defaultdict(lambda: len(label_to_id))
with open(input_file, 'r') as ipt:
size = sum(1 for _ in ipt)
with open(input_file, 'r') as input_file:
for line in tqdm(input_file, total=size):
cnt_rows += 1
# if cnt_rows % 100000 == 0:
# print((cnt_rows / size) * 100, "% loaded")
l = line.strip().split()
if len(l) == 2:
assert l[0] in ["alpha", "omega"]
if l[0] == "alpha":
alpha = float(l[1])
else:
omega = float(l[1])
else:
assert (len(l) == 4)
b, e, u_label, v_label = l
u = label_to_id[u_label]
v = label_to_id[v_label]
nodes_to_label[u] = u_label
nodes_to_label[v] = v_label
b = float(b)
e = float(e)
l = (u, v)
if l in E:
l = (v, u)
if l in E and E[l][-1] >= b:
E[l][-1] = max(E[l][-1], e)
else:
E[l] += [b, e]
if u not in W:
W[u] = [alpha, omega]
if v not in W:
W[v] = [alpha, omega]
S = sg.StreamGraph(times=[alpha, omega],
nodes=list(W.keys()),
links=list(E.keys()),
node_presence=[W[k] for k in W.keys()],
link_presence=[E[k] for k in E.keys()],
node_to_label=nodes_to_label,
node_to_id={i: i for i in W.keys()})
return S
def parse_pcap(file_input, entry_format, **options):
pcap_to_csv(file_input, "tmp.csv")
S = parse_csv("tmp.csv", entry_format, **options)
os.remove("tmp.csv")
return S
def parser(input_file, input_format, entry_format, output_file=None, simplify_presence=False, output_format='sg',
**kwargs):
"""
Straph's tunable parser. Compatible with several data formats: CSV, TSV, JSon and PCAP.
:param simplify_presence:
:param input_file: Input FILE (name only)
:param input_format: Format d'entrée acceptés : JSON, CSV, PCAP
:param entry_format: Format of each line to be readed (t,u,v) = (line[x],line[y],line[w])
:param output_file: Output FILE (name only)
:param output_format: Format de sortie : SG,SGF,json
:return:
"""
with open(input_file) as ipt:
options = {'delimiter': ',',
'is_link_stream': False,
'is_directed': False,
'nrows': sum(1 for _ in ipt),
'link_duration': False,
'order_sgf': False,
'ignore_header': True,
'nodes_to_label': False,
'time_is_datetime': False,
'delta': None,
}
options.update(kwargs)
if ('t_pos' in entry_format or 'link_duration_pos' in entry_format) and \
('b_pos' in entry_format or 'e_pos' in entry_format):
raise TypeError('Invalide entry format :' + str(entry_format) + ' should be of type {t_pos,u_pos,v_pos} or'
' {t_pos,link_duration_pos,u_pos,v_pos} or'
'{b_pos,e_pos,u_pos,v_pos} !')
if options['link_duration'] and ('b_pos' in entry_format or 'e_pos' in entry_format):
raise TypeError('link_duration is incompatible with entry format : {b_pos,e_pos,u_pos,v_pos} !')
if options['link_duration'] and ('link_duration_pos' in entry_format):
raise TypeError('link_duration is incompatible with entry format : {t_pos,link_duration_pos,u_pos,v_pos} !')
if input_format == 'csv':
S = parse_csv(input_file, entry_format, **options)
elif input_format == 'json':
S = parse_json(input_file, entry_format, **options)
elif input_format == 'pcap':
S = parse_pcap(input_file, entry_format, **options)
elif input_format == 'net':
raise ValueError("File format 'net' not yet supported.")
# S = parse_net(input_format, entry_format, **options)
else:
raise TypeError('Format not supported')
if simplify_presence is True:
S.node_presence = [[np[0], np[-1]] for np in
S.node_presence] # Set nodes to be present from ther 1st intercations to their last
if output_file is not None:
if isinstance(output_format, str):
output_format = [output_format]
for of in output_format:
if of == 'sg':
S.write_to_sg(output_file)
elif of == 'json':
S.write_to_json(output_file)
return S
def sort_csv(input_file, entry_format, output=None, **kwargs):
with open(input_file) as ipt:
options = {'delimiter': ',',
'is_link_stream': False,
'is_directed': False,
'nrows': sum(1 for _ in ipt),
'link_duration': False,
'order_sgf': False,
'ignore_header': True,
'nodes_to_label': False,
'time_is_datetime': False,
'delta': None,
}
options.update(kwargs)
list_lines = []
with open(input_file, 'r') as input:
reader = csv.reader(input, delimiter=options['delimiter'])
if options['ignore_header']:
next(reader, None)
for line in tqdm(reader, desc='Reading CSV before sorting', total=options['nrows']):
list_lines.append(line)
if len(entry_format) == 3:
(t_pos, u_pos, v_pos) = entry_format['t_pos'], entry_format['u_pos'], entry_format['v_pos']
if options['time_is_datetime']:
list_lines = sorted(list_lines, key=lambda x: datetime_to_timestamp(x[t_pos]))
else:
list_lines = sorted(list_lines, key=lambda x: float(x[t_pos]))
elif len(entry_format) == 4 and 'link_duration_pos' in entry_format:
(t_pos, link_duration_pos, u_pos, v_pos) = entry_format['t_pos'], entry_format['link_duration_pos'], \
entry_format['u_pos'], entry_format['v_pos']
if options['time_is_datetime']:
list_lines = sorted(list_lines, key=lambda x: datetime_to_timestamp(x[t_pos]))
else:
list_lines = sorted(list_lines, key=lambda x: float(x[t_pos]))
elif len(entry_format) == 4 and 'b_pos' in entry_format:
(b_pos, e_pos, u_pos, v_pos) = entry_format['b_pos'], entry_format['e_pos'], \
entry_format['u_pos'], entry_format['v_pos']
if options['time_is_datetime']:
list_lines = sorted(list_lines, key=lambda x: datetime_to_timestamp(x[b_pos]))
else:
list_lines = sorted(list_lines, key=lambda x: float(x[b_pos]))
if output is None:
output = input_file
with open(output, 'w', newline='') as output:
writer = csv.writer(output, delimiter=options['delimiter'])
for line in tqdm(list_lines, desc='Writing CSV'):
writer.writerow(line)
| 37.76662 | 116 | 0.514213 |
a7d1a813715f460f072307e6616ade70a0637f8e | 3,119 | py | Python | hard/282-expression-add-operators.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 2 | 2021-03-14T11:38:26.000Z | 2021-03-14T11:38:30.000Z | hard/282-expression-add-operators.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | null | null | null | hard/282-expression-add-operators.py | wanglongjiang/leetcode | c61d2e719e81575cfb5bde9d64e15cee7cf01ef3 | [
"MIT"
] | 1 | 2022-01-17T19:33:23.000Z | 2022-01-17T19:33:23.000Z | '''
给表达式添加运算符
给定一个仅包含数字 0-9 的字符串和一个目标值,在数字之间添加 二元 运算符(不是一元)+、- 或 * ,返回所有能够得到目标值的表达式。
示例 1:
输入: num = "123", target = 6
输出: ["1+2+3", "1*2*3"]
示例 2:
输入: num = "232", target = 8
输出: ["2*3+2", "2+3*2"]
示例 3:
输入: num = "105", target = 5
输出: ["1*0+5","10-5"]
示例 4:
输入: num = "00", target = 0
输出: ["0+0", "0-0", "0*0"]
示例 5:
输入: num = "3456237490", target = 9191
输出: []
提示:
0 <= num.length <= 10
num 仅含数字
'''
from typing import List
'''
思路1,暴力回溯
一个长度为n的数字字符串,可供插入运算符的位置有n-1个,运算符有3种,再加上没有运算符,故所有可能的表达式有4^(n-1)个。
根据提示,n最大是10,所有可能的表达式约2.6*10^5个,可以尝试暴力计算一下。
时间复杂度:O(4^(n-1)*20),共生成4^(n-1)个表达式,每个表达式计算时间按长度计算约20
空间复杂度:O(n)
'''
class Solution:
def addOperators(self, num: str, target: int) -> List[str]:
n = len(num)
if n == 0:
return []
if n == 1:
if int(num) == target:
return [num]
else:
return []
# 判断表达式计算结果是否为target
def isTarget(ops):
nstk, opstk = [], []
i, m = 0, len(ops)
while i < m:
if ops[i] == '+' or ops[i] == '-':
opstk.append(ops[i])
elif ops[i] == '*':
i += 1
nstk.append(nstk.pop() * int(ops[i]))
else:
nstk.append(int(ops[i]))
i += 1
while opstk:
if opstk.pop() == '-':
nstk.append(-nstk.pop() + nstk.pop())
else:
nstk.append(nstk.pop() + nstk.pop())
return nstk[0] == target
# 回溯所有可能的表达式
def backtrack(index, ops):
for op in ('+', '-', '*'):
ops.append(op)
if num[index] == '0': # 第index个字符为0,不能与后面的字符组合成整数
ops.append('0')
if index == n - 1:
if isTarget(ops):
ans.append(''.join(ops))
else:
backtrack(index + 1, ops)
ops.pop()
else:
for i in range(index, n): # 当前操作数的长度从index一直到末尾
if i == n - 1:
ops.append(num[index:])
if isTarget(ops):
ans.append(''.join(ops))
ops.pop()
else:
ops.append(num[index:i + 1])
backtrack(i + 1, ops)
ops.pop()
ops.pop()
ans = []
if num[0] == '0': # 第1个数字是0,只能将第1个操作数设置为0
backtrack(1, ['0'])
else:
if int(num) == target:
ans.append(num)
for i in range(1, n): # 将数字切分成任意数字,作为第1个操作数
backtrack(i, [num[:i]])
return ans
s = Solution()
print(s.addOperators(num="00", target=0))
print(s.addOperators(num="123", target=6))
print(s.addOperators(num="232", target=8))
print(s.addOperators(num="105", target=5))
print(s.addOperators(num="3456237490", target=9191))
| 26.65812 | 70 | 0.422571 |
bff73787f62ae8d062f94c30fff70a26cea59a0d | 12,089 | py | Python | modules/misc.py | RotivV/Rotsong | 67b28986f2aa8ec711a4bcd831448208049daedc | [
"MIT"
] | null | null | null | modules/misc.py | RotivV/Rotsong | 67b28986f2aa8ec711a4bcd831448208049daedc | [
"MIT"
] | null | null | null | modules/misc.py | RotivV/Rotsong | 67b28986f2aa8ec711a4bcd831448208049daedc | [
"MIT"
] | 1 | 2022-01-26T22:01:20.000Z | 2022-01-26T22:01:20.000Z | import disnake
from disnake.ext import commands
import asyncio
from typing import Optional
from aiohttp import ClientSession
from utils.client import BotCore
from utils.music.converters import time_format, URL_REG
import psutil
import humanize
from itertools import cycle
from random import shuffle
from os import getpid
import platform
desc_prefix = "🔰 [Outros] 🔰 | "
class Misc(commands.Cog):
def __init__(self, bot: BotCore):
self.bot = bot
self.source_owner: Optional[disnake.User] = None
self.activities = None
self.task = self.bot.loop.create_task(self.presences())
def placeholders(self, text: str):
if not text:
return ""
return text \
.replace("{users}", str(len([m for m in self.bot.users if not m.bot]))) \
.replace("{playing}", str(len(self.bot.music.players))) \
.replace("{guilds}", str(len(self.bot.guilds))) \
.replace("{uptime}", time_format((disnake.utils.utcnow() - self.bot.uptime).total_seconds() * 1000,
use_names=True))
async def presences(self):
if not self.activities:
activities = []
for i in self.bot.config.get("LISTENING_PRESENCES", "").split("||"):
if i:
activities.append({"name":i, "type": "listening"})
for i in self.bot.config.get("WATCHING_PRESENCES", "").split("||"):
if i:
activities.append({"name": i, "type": "watching"})
for i in self.bot.config.get("PLAYING_PRESENCES", "").split("||"):
if i:
activities.append({"name": i, "type": "playing"})
shuffle(activities)
self.activities = cycle(activities)
while True:
await self.bot.wait_until_ready()
activity_data = next(self.activities)
if activity_data["type"] == "listening":
activity = disnake.Activity(type=disnake.ActivityType.listening, name=self.placeholders(activity_data["name"]))
elif activity_data["type"] == "watching":
activity = disnake.Activity(type=disnake.ActivityType.watching, name=self.placeholders(activity_data["name"]))
else:
activity = disnake.Game(name=self.placeholders(activity_data["name"]))
await self.bot.change_presence(activity=activity)
await asyncio.sleep(self.bot.config["PRESENCE_INTERVAL"])
@commands.Cog.listener("on_guild_join")
async def guild_add(self, guild: disnake.Guild):
if not guild.system_channel or not guild.system_channel.permissions_for(guild.me).send_messages:
return
prefix = (await self.bot.db.get_data(guild.id, db_name="guilds"))["prefix"] or self.bot.default_prefix
embed = disnake.Embed(
description="Olá! Para ver todos os meus comandos use **/**\n\n",
color=self.bot.get_color(guild.me)
)
if cmd:=self.bot.get_slash_command("setup"):
embed.description += f"Caso queira, use o comando **/{cmd.name}** para criar um canal dedicado para pedir " \
"músicas sem comandos e deixar o music player fixo no canal.\n\n"
embed.description += f"Caso os comandos de barra (/) não apareçam, use o comando:\n{prefix}syncguild"
await guild.system_channel.send(embed=embed)
@commands.slash_command(description=f"{desc_prefix}Exibir informações sobre mim.")
async def about(
self,
inter: disnake.AppCmdInter,
hidden: bool = commands.Param(name="modo_oculto", description="Não exibir a mensagem do comando", default=False)
):
if not self.source_owner:
self.source_owner = await self.bot.get_or_fetch_user(935379030360871004)
ram_usage = humanize.naturalsize(psutil.Process(getpid()).memory_info().rss)
embed = disnake.Embed(
description=f"**Sobre mim:**\n\n"
f"> **Estou em:** `{len(self.bot.guilds)} servidor(es)`\n",
color=self.bot.get_color(inter.guild.me)
)
if self.bot.music.players:
embed.description += f"> **Players ativos:** `{len(self.bot.music.players)}`\n"
if self.bot.commit:
embed.description += f"> **Versão atual:** [`3.0`]\n"
embed.description += f"> **Versão do Python:** `{platform.python_version()}`\n"\
f"> **Versão do Disnake:** `{disnake.__version__}`\n" \
f"> **Ping:** `{round(self.bot.latency * 1000)}ms`\n" \
f"> **Uso de RAM:** `{ram_usage}`\n" \
f"> **Online há:** `{time_format((disnake.utils.utcnow() - self.bot.uptime).total_seconds()*1000)}`\n"
try:
embed.set_thumbnail(url=self.bot.user.avatar.with_static_format("png").url)
except AttributeError:
pass
guild_data = await self.bot.db.get_data(inter.guild.id, db_name="guilds")
prefix = guild_data["prefix"] or self.bot.default_prefix
if self.bot.default_prefix and not self.bot.config["INTERACTION_COMMAND_ONLY"]:
embed.description += f"> **Prefixo:** {prefix}\n"
links = "[`[Instagram]`](https://www.instagram.com/vitao_aeee/?hl=pt-br)"
if (await self.bot.application_info()).bot_public:
links = f"[`[Convite]`](https://discord.com/api/oauth2/authorize?client_id={self.bot.user.id}&permissions=" \
f"8&scope=bot%20applications.commands) **|** {links}"
if self.bot.config["SUPPORT_SERVER"]:
links += f" **|** [`[Suporte]`]({self.bot.config['SUPPORT_SERVER']})"
embed.description += f"> {links}\n"
try:
avatar = self.bot.owner.avatar.with_static_format("png").url
except AttributeError:
avatar = self.bot.owner.default_avatar.with_static_format("png").url
embed.set_footer(
icon_url=avatar,
text=f"Dev: {self.bot.owner}"
)
if self.bot.config["HIDE_SOURCE_OWNER"] is not False and self.bot.owner.id == self.source_owner.id:
embed.footer.text += f" | Source by: {self.source_owner}"
await inter.send(embed=embed, ephemeral=hidden)
@commands.slash_command(description="🔰 [Outros] 🔰 | Exibir meu link de convite.")
async def invite(self, inter: disnake.ApplicationCommandInteraction):
#criando embed
embed = disnake.Embed(
colour=disnake.Colour.dark_red(),
description=f"[**Clique aqui**](https://discord.com/api/oauth2/authorize?client_id=923604031669157971&permissions=8&scope=bot%20applications.commands) para me adicionar no seu servidor.")
#criando view
view = disnake.ui.View()
#criando button
button = disnake.ui.Button(label='👤 LINK',
style = disnake.ButtonStyle.link, url = 'https://discord.com/api/oauth2/authorize?client_id=923604031669157971&permissions=8&scope=bot%20applications.commands')
view.add_item(button)
await inter.send(embed = embed, view = view ,ephemeral=True)
@commands.slash_command(description="🔰 [Outros] 🔰 | Exibir meu ping/latência.")
async def ping(self, inter: disnake.ApplicationCommandInteraction):
await inter.send(
embed = disnake.Embed(
colour=self.bot.get_color(inter.guild.me),
description=f"> <:network:935233918926458931> ┃ **Ping em:** `{round(self.bot.latency * 1000)}ms`"
),
ephemeral=True
)
@commands.slash_command(description="🔰 [Outros] 🔰 | Exibir o site do bot.")
async def site(self, inter: disnake.ApplicationCommandInteraction):
#criando embed
embed = disnake.Embed(
colour=disnake.Colour.dark_red(),
description=f"[**Clique aqui**](https://rotbots.netlify.app/) para ver meu site.")
#criando view
view = disnake.ui.View()
#criando button
button = disnake.ui.Button(label='🌐 SITE',
style = disnake.ButtonStyle.link, url = 'https://rotbots.netlify.app/')
view.add_item(button)
await inter.send(embed = embed, view = view ,ephemeral=True)
@commands.user_command(name="avatar")
async def avatar(self, inter: disnake.UserCommandInteraction):
embeds = []
assets = {}
user = await self.bot.fetch_user(inter.target.id) if not inter.target.bot else self.bot.get_user(
inter.target.id)
if inter.target.guild_avatar:
assets["Avatar (Server)"] = inter.target.guild_avatar.with_static_format("png")
assets["Avatar (User)"] = user.avatar.with_static_format("png")
if user.banner:
assets["Banner"] = user.banner.with_static_format("png")
for name, asset in assets.items():
embed = disnake.Embed(description=f"{inter.target.mention} **[{name}]({asset.with_size(2048).url})**",
color=self.bot.get_color(inter.guild.me))
embed.set_image(asset.with_size(256).url)
embeds.append(embed)
await inter.send(embeds=embeds, ephemeral=True)
def cog_unload(self):
try:
self.task.cancel()
except:
pass
class GuildLog(commands.Cog):
def __init__(self, bot: BotCore):
self.bot = bot
if URL_REG.match(bot.config["BOT_ADD_REMOVE_LOG"]):
hook_url = bot.config["BOT_ADD_REMOVE_LOG"]
else:
print("URL do webhook inválido (para envio de logs ao adicionar/remover bot).")
hook_url = ""
self.hook_url: str = hook_url
@commands.Cog.listener()
async def on_guild_remove(self, guild: disnake.Guild):
print(f"Removido do servidor: {guild.name} - [{guild.id}]")
if not self.hook_url:
return
embed = disnake.Embed(
description=f"**Me removeram do servidor:**\n"
f"```{guild.name}```\n"
f"**ID:** `{guild.id}`",
color=disnake.Colour.red()
)
try:
embed.set_thumbnail(url=guild.icon.replace(static_format="png").url)
except AttributeError:
pass
await self.send_hook(self.bot.owner.mention, embed=embed)
@commands.Cog.listener()
async def on_guild_join(self, guild: disnake.Guild):
print(f"Novo servidor: {guild.name} - [{guild.id}]")
if not self.hook_url:
return
created_at = int(guild.created_at.timestamp())
embed =disnake.Embed(
description="__**Me adicionaram em um novo servidor:**__\n"
f"```{guild.name}```\n"
f"**ID:** `{guild.id}`\n"
f"**Dono:** `{guild.owner}`\n"
f"**Criado em:** <t:{created_at}:f> - <t:{created_at}:R>\n"
f"**Nível de verificação:** `{guild.verification_level or 'nenhuma'}`\n"
f"**Membros:** `{len([m for m in guild.members if not m.bot])}`\n"
f"**Bots:** `{len([m for m in guild.members if m.bot])}`\n",
color=disnake.Colour.green()
)
try:
embed.set_thumbnail(url=guild.icon.replace(static_format="png").url)
except AttributeError:
pass
await self.send_hook(self.bot.owner.mention, embed=embed)
async def send_hook(self, content="", *, embed: disnake.Embed=None):
async with ClientSession() as session:
webhook = disnake.Webhook.from_url(self.hook_url, session=session)
await webhook.send(
content=content,
username=self.bot.user.name,
avatar_url=self.bot.user.avatar.replace(static_format='png').url,
embed=embed
)
def setup(bot: BotCore):
bot.add_cog(Misc(bot))
bot.add_cog(GuildLog(bot))
| 36.412651 | 192 | 0.59277 |
7159f801d7246094839eee35026d0996210a168a | 720 | py | Python | Guess_Program.py | linuxbytes/GuessNumber | 1e26280466675ecdef2a4d22b7a4a56a049631bd | [
"Unlicense"
] | null | null | null | Guess_Program.py | linuxbytes/GuessNumber | 1e26280466675ecdef2a4d22b7a4a56a049631bd | [
"Unlicense"
] | null | null | null | Guess_Program.py | linuxbytes/GuessNumber | 1e26280466675ecdef2a4d22b7a4a56a049631bd | [
"Unlicense"
] | null | null | null | import random
print('----------------------------')
print(' Guess That Number Game ')
print('----------------------------')
print()
the_number = random.randint(0, 100)
guess = -1
name = input('Player, Please your name')
while guess != the_number:
guess_test = input('Guess a number between 0 and 100: ')
guess = int(guess_test)
if guess < the_number:
print('Hey {} Your Number {} was too Low !!!'.format(name, guess))
elif guess > the_number:
print('Hey {} Your Number {} was too High !!!'.format(name, guess))
else:
print('Hey {} you wish'.format(name))
# TODO: remove this line
# print(guess_test, type(guess_test))
# print(guess, type(guess))
print('done')
| 21.818182 | 75 | 0.579167 |
1a9d81268b04798828aa306e650d0b9028230b64 | 1,517 | py | Python | emlearn/cgen.py | bwaldt/emtrees | e3a164902a548cc0a6fa649bb2d65fcf2e594b35 | [
"MIT"
] | 41 | 2018-10-24T09:30:29.000Z | 2019-03-08T18:34:01.000Z | emlearn/cgen.py | bwaldt/emtrees | e3a164902a548cc0a6fa649bb2d65fcf2e594b35 | [
"MIT"
] | 4 | 2018-11-02T11:12:26.000Z | 2019-03-07T08:09:09.000Z | emlearn/cgen.py | bwaldt/emtrees | e3a164902a548cc0a6fa649bb2d65fcf2e594b35 | [
"MIT"
] | 3 | 2018-04-04T08:28:37.000Z | 2018-09-21T22:30:22.000Z |
"""Utilities to generate C code"""
def struct_init(*args):
"""Struct initializer
>>> from emlearn import cgen
>>> cgen.struct_init([ 1, 2, 3 ])
"{ 1, 2, 3 }"
"""
return '{ ' + ', '.join(str(a) for a in args) + ' }'
def constant(val, dtype='float'):
"""A literal value
>>> from emlearn import cgen
>>> cgen.constant(3.14)
"3.14f"
"""
if dtype == 'float':
return "{:.6f}f".format(val)
else:
return str(val)
def constant_declare(name, val, dtype='int'):
v = constant(val, dtype=dtype)
return f'static const {dtype} {name} = {v}; '
def array_declare(name, size=None, dtype='float', modifiers='static const',
values=None, end='', indent=''):
"""
Declare and optionally initialize an array.
>>> from emlearn import cgen
>>> cgen.array_declare("declareonly", 10)
"static const float declareonly[10];"
Also intialize it.
>>> from emlearn import cgen
>>> cgen.array_declare("initialized", 3, dtype='int', modifiers='const')
"const int initialized[3] = { 1, 2, 3 };"
"""
if values is not None:
if size is None:
size = len(values)
assert size == len(values), 'size does not match length'
init = ''
if values is not None:
init_values = ', '.join(constant(v, dtype) for v in values)
init = ' = {{ {init_values} }}'.format(**locals())
return '{indent}{modifiers} {dtype} {name}[{size}]{init};{end}'.format(**locals())
| 25.283333 | 86 | 0.567568 |
67dbac574843a18d91a2d16dd9cbf3448d71e4df | 1,633 | py | Python | python-twisted/examples/subscribe_group.py | parasyte/pubnub-python | 4a44c563ea8af7211197d166596be41ede05c179 | [
"MIT"
] | null | null | null | python-twisted/examples/subscribe_group.py | parasyte/pubnub-python | 4a44c563ea8af7211197d166596be41ede05c179 | [
"MIT"
] | null | null | null | python-twisted/examples/subscribe_group.py | parasyte/pubnub-python | 4a44c563ea8af7211197d166596be41ede05c179 | [
"MIT"
] | null | null | null | ## www.pubnub.com - PubNub Real-time push service in the cloud.
# coding=utf8
## PubNub Real-time Push APIs and Notifications Framework
## Copyright (c) 2010 Stephen Blum
## http://www.pubnub.com/
import sys
from Pubnub import Pubnub
publish_key = len(sys.argv) > 1 and sys.argv[1] or 'demo'
subscribe_key = len(sys.argv) > 2 and sys.argv[2] or 'demo'
secret_key = len(sys.argv) > 3 and sys.argv[3] or 'demo'
cipher_key = len(sys.argv) > 4 and sys.argv[4] or 'abcd'
ssl_on = len(sys.argv) > 5 and bool(sys.argv[5]) or False
## -----------------------------------------------------------------------
## Initiate Pubnub State
## -----------------------------------------------------------------------
pubnub = Pubnub(publish_key=publish_key, subscribe_key=subscribe_key,
secret_key=secret_key, cipher_key=cipher_key, ssl_on=ssl_on, daemon=False)
channel = 'ab'
# Asynchronous usage
def callback(message, channel):
print(str(message) + ' , ' + channel)
def error(message):
print("ERROR : " + str(message))
def connect(message):
print("CONNECTED " + str(message))
def reconnect(message):
print("RECONNECTED " + str(message))
def disconnect(message):
print("DISCONNECTED " + str(message))
print pubnub.channel_group_add_channel(channel_group='abc', channel="a")
pubnub.subscribe_group(channel_groups='abc', callback=callback, error=callback,
connect=connect, reconnect=reconnect, disconnect=disconnect)
#pubnub.subscribe(channels='d', callback=callback, error=callback,
# connect=connect, reconnect=reconnect, disconnect=disconnect)
pubnub.start()
| 28.649123 | 90 | 0.642376 |
a02fe67deb1bf3c9d507fc4ec86c9499bd0eaaf1 | 1,402 | py | Python | spirit/forms/comment_flag.py | Si-elegans/Web-based_GUI_Tools | 58a9b7a76bc46467554192a38ff5329a94e2b627 | [
"Apache-2.0"
] | 3 | 2017-12-01T08:17:38.000Z | 2021-01-29T15:40:06.000Z | spirit/forms/comment_flag.py | SwiftChina/Spirit | 3ab129aa062b69eb970be525c66a1e2445e29206 | [
"MIT"
] | 9 | 2020-06-05T17:44:02.000Z | 2022-01-13T00:42:34.000Z | spirit/forms/comment_flag.py | SwiftChina/Spirit | 3ab129aa062b69eb970be525c66a1e2445e29206 | [
"MIT"
] | 1 | 2020-11-08T21:47:32.000Z | 2020-11-08T21:47:32.000Z | #-*- coding: utf-8 -*-
from django import forms
from django.utils.translation import ugettext as _
from django.db import IntegrityError
from django.utils import timezone
from spirit.models.comment_flag import Flag, CommentFlag
class FlagForm(forms.ModelForm):
class Meta:
model = Flag
fields = ['reason', 'body']
def __init__(self, user=None, comment=None, *args, **kwargs):
super(FlagForm, self).__init__(*args, **kwargs)
self.user = user
self.comment = comment
def clean(self):
cleaned_data = super(FlagForm, self).clean()
flag = Flag.objects.filter(user=self.user,
comment=self.comment)
if flag.exists():
# Do this since some of the unique_together fields are excluded.
raise forms.ValidationError(_("This flag already exists"))
return cleaned_data
def save(self, commit=True):
if not self.instance.pk:
self.instance.user = self.user
self.instance.comment = self.comment
# TODO: use update_or_create on django 1.7
try:
CommentFlag.objects.create(comment=self.comment)
except IntegrityError:
CommentFlag.objects.filter(comment=self.comment)\
.update(date=timezone.now())
return super(FlagForm, self).save(commit) | 30.478261 | 76 | 0.619116 |
bb1aeb7e3ba83fce29c9dc0ae9d27fb910ec61c3 | 18,146 | py | Python | cassiopeia/TreeSolver/reconstruct_sim_tree.py | Lioscro/Cassiopeia | fa630e167b3d8e6fb1c88740dff71130224ca54c | [
"MIT"
] | null | null | null | cassiopeia/TreeSolver/reconstruct_sim_tree.py | Lioscro/Cassiopeia | fa630e167b3d8e6fb1c88740dff71130224ca54c | [
"MIT"
] | null | null | null | cassiopeia/TreeSolver/reconstruct_sim_tree.py | Lioscro/Cassiopeia | fa630e167b3d8e6fb1c88740dff71130224ca54c | [
"MIT"
] | null | null | null | from __future__ import division
from __future__ import print_function
import subprocess
import time
from string import ascii_uppercase
import numpy as np
import pandas as pd
import pandascharm as pc
import random
from pylab import *
import pickle as pic
from pathlib import Path
import Bio.Phylo as Phylo
from Bio.Phylo.TreeConstruction import DistanceCalculator, ParsimonyScorer
from Bio import AlignIO
from Bio.Align import MultipleSeqAlignment
from Bio.Phylo.TreeConstruction import DistanceTreeConstructor
import networkx as nx
from skbio import DistanceMatrix
from skbio.tree import nj
import scipy as sp
import sys
import os
import argparse
from cassiopeia.TreeSolver.lineage_solver.lineage_solver import *
from cassiopeia.TreeSolver.simulation_tools.simulation_utils import *
from cassiopeia.TreeSolver.utilities import (
fill_in_tree,
tree_collapse,
tree_collapse2,
convert_network_to_newick_format,
newick_to_network,
)
from cassiopeia.TreeSolver.Cassiopeia_Tree import Cassiopeia_Tree
from cassiopeia.TreeSolver.Node import Node
from numba import jit
import cassiopeia as sclt
SCLT_PATH = Path(sclt.__path__[0])
@jit(parallel=True)
def compute_distance_mat(cm, C, priors=None):
dm = np.zeros(C * (C - 1) // 2, dtype=float)
k = 0
for i in range(C - 1):
for j in range(i + 1, C):
s1 = cm[i]
s2 = cm[j]
dm[k] = pairwise_dist(s1, s2, priors)
k += 1
return dm
def pairwise_dist(s1, s2, priors=None):
d = 0
num_present = 0
for i in range(len(s1)):
if s1[i] == "-" or s2[i] == "-":
continue
num_present += 1
if priors:
if s1[i] == s2[i]:
d += np.log(priors[i][str(s1[i])])
if s1[i] != s2[i]:
if s1[i] == "0" or s2[i] == "0":
if priors:
if s1[i] != "0":
d -= np.log(priors[i][str(s1[i])])
else:
d -= np.log(priors[i][str(s2[i])])
else:
d += 1
else:
if priors:
d -= np.log(priors[i][str(s1[i])]) + np.log(priors[i][str(s2[i])])
else:
d += 2
if num_present == 0:
return 0
return d / num_present
def write_leaves_to_charmat(target_nodes, fn):
"""
Helper function to write TARGET_NODES to a character matrix to conver to multistate;
needed to run camin-sokal.
"""
number_of_characters = len(target_nodes[0].char_string.split("|"))
with open(fn, "w") as f:
f.write("cellBC")
for i in range(number_of_characters):
f.write("\t" + str(i))
f.write("\n")
for n in target_nodes:
charstring, sname = n.char_string, n.name
f.write(sname)
chars = charstring.split("|")
for c in chars:
f.write("\t" + c)
f.write("\n")
def unique_alignments(aln):
new_aln = []
obs = []
for a in aln:
if a.seq in obs:
continue
new_aln.append(a)
obs.append(a.seq)
return MultipleSeqAlignment(new_aln)
def nx_to_charmat(target_nodes):
number_of_characters = len(target_nodes[0].split("|"))
cm = pd.DataFrame(np.zeros((len(target_nodes), number_of_characters)))
ind = []
for i in range(len(target_nodes)):
nr = []
n = target_nodes[i]
charstring, sname = n.split("_")
ind.append("s" + sname)
chars = charstring.split("|")
for c in chars:
nr.append(c)
cm.iloc[i] = np.array(nr)
cm.columns = [("r" + str(i)) for i in range(number_of_characters)]
cm.index = ind
return cm
def construct_weights(phy, weights_fn, write=True):
"""
Given some binary phylip infile file path, compute the character-wise log frequencies
and translate to the phylip scaling (0-Z) for the weights file.
"""
aln = AlignIO.read(phy, "phylip")
df = pc.from_bioalignment(aln)
abund = df.apply(lambda x: len(x[x == "1"]) / len(x), axis=1)
labund = np.array(
list(map(lambda x: float(-1 * np.log2(x)) if x > 1 else x, abund))
)
labund[labund == 0] = labund.min()
# scale linearly to range for phylip weights
_min = 0
_max = 35
scaled = (_max - _min) / (labund.max() - labund.min()) * (
labund - labund.max()
) + _max
scaled = list(map(lambda x: int(x), scaled))
weights_range = [str(i) for i in range(10)] + [l for l in ascii_uppercase]
weights_dict = dict(zip(range(36), weights_range))
scaled = list(map(lambda x: weights_dict[x], scaled))
if write:
with open(weights_fn, "w") as f:
f.write("".join(scaled))
return scaled
def main():
"""
Takes in a character matrix, an algorithm, and an output file and
returns a tree in newick format.
"""
parser = argparse.ArgumentParser()
parser.add_argument("netfp", type=str, help="character_matrix")
parser.add_argument("-nj", "--neighbor-joining", action="store_true", default=False)
parser.add_argument(
"--neighbor_joining_weighted", action="store_true", default=False
)
parser.add_argument("--ilp", action="store_true", default=False)
parser.add_argument("--hybrid", action="store_true", default=False)
parser.add_argument(
"--cutoff", type=int, default=80, help="Cutoff for ILP during Hybrid algorithm"
)
parser.add_argument(
"--hybrid_lca_mode",
action="store_true",
help="Use LCA distances to transition in hybrid mode, instead of number of cells",
)
parser.add_argument(
"--time_limit", type=int, default=-1, help="Time limit for ILP convergence"
)
parser.add_argument(
"--iter_limit",
type=int,
default=-1,
help="Max number of iterations for ILP solver",
)
parser.add_argument("--greedy", "-g", action="store_true", default=False)
parser.add_argument("--camin-sokal", "-cs", action="store_true", default=False)
parser.add_argument(
"--verbose", action="store_true", default=False, help="output verbosity"
)
parser.add_argument("--mutation_map", type=str, default="")
parser.add_argument("--num_threads", type=int, default=1)
parser.add_argument("--no_triplets", action="store_true", default=False)
parser.add_argument("--max_neighborhood_size", type=str, default=3000)
parser.add_argument("--out_fp", type=str, default=None, help="optional output file")
parser.add_argument(
"--seed", type=int, default=None, help="Random seed for ILP solver"
)
args = parser.parse_args()
netfp = args.netfp
outfp = args.out_fp
verbose = args.verbose
lca_mode = args.hybrid_lca_mode
if lca_mode:
lca_cutoff = args.cutoff
cell_cutoff = None
else:
cell_cutoff = args.cutoff
lca_cutoff = None
time_limit = args.time_limit
iter_limit = args.iter_limit
num_threads = args.num_threads
max_neighborhood_size = args.max_neighborhood_size
seed = args.seed
if seed is not None:
random.seed(seed)
np.random.seed(seed)
score_triplets = not args.no_triplets
prior_probs = None
if args.mutation_map != "":
prior_probs = pic.load(open(args.mutation_map, "rb"))
name = netfp.split("/")[-1]
stem = ".".join(name.split(".")[:-1])
true_network = nx.read_gpickle(netfp)
if isinstance(true_network, Cassiopeia_Tree):
true_network = true_network.get_network()
target_nodes = get_leaves_of_tree(true_network)
target_nodes_uniq = []
seen_charstrings = []
for t in target_nodes:
if t.char_string not in seen_charstrings:
seen_charstrings.append(t.char_string)
target_nodes_uniq.append(t)
if args.greedy:
if verbose:
print(
"Running Greedy Algorithm on " + str(len(target_nodes_uniq)) + " Cells"
)
reconstructed_network_greedy = solve_lineage_instance(
target_nodes_uniq, method="greedy", prior_probabilities=prior_probs
)
net = reconstructed_network_greedy[0]
if outfp is None:
outfp = name.replace("true", "greedy")
pic.dump(net, open(outfp, "wb"))
elif args.hybrid:
if verbose:
print(
"Running Hybrid Algorithm on " + str(len(target_nodes_uniq)) + " Cells"
)
print(
"Parameters: ILP on sets of "
+ str(cutoff)
+ " cells "
+ str(time_limit)
+ "s to complete optimization"
)
reconstructed_network_hybrid = solve_lineage_instance(
target_nodes_uniq,
method="hybrid",
hybrid_cell_cutoff=cell_cutoff,
hybrid_lca_cutoff=lca_cutoff,
prior_probabilities=prior_probs,
time_limit=time_limit,
threads=num_threads,
max_neighborhood_size=max_neighborhood_size,
seed=seed,
num_iter=iter_limit,
)
net = reconstructed_network_hybrid[0]
if outfp is None:
outfp = name.replace("true", "hybrid")
pic.dump(net, open(outfp, "wb"))
elif args.ilp:
if verbose:
print(
"Running Hybrid Algorithm on " + str(len(target_nodes_uniq)) + " Cells"
)
print(
"Parameters: ILP on sets of "
+ str(cutoff)
+ " cells "
+ str(time_limit)
+ "s to complete optimization"
)
reconstructed_network_ilp = solve_lineage_instance(
target_nodes_uniq,
method="ilp",
hybrid_subset_cutoff=cutoff,
prior_probabilities=prior_probs,
time_limit=time_limit,
max_neighborhood_size=max_neighborhood_size,
seed=seed,
num_iter=iter_limit,
)
net = reconstructed_network_ilp[0]
# reconstructed_network_ilp = nx.relabel_nodes(reconstructed_network_ilp, string_to_sample)
if outfp is None:
outfp = name.replace("true", "ilp")
pic.dump(net, open(outfp, "wb"))
elif args.neighbor_joining:
if verbose:
print(
"Running Neighbor-Joining on "
+ str(len(target_nodes_uniq))
+ " Unique Cells"
)
infile = "".join(name.split(".")[:-1]) + "infile.txt"
fn = "".join(name.split(".")[:-1]) + "phylo.txt"
write_leaves_to_charmat(target_nodes_uniq, fn)
script = SCLT_PATH / "TreeSolver" / "binarize_multistate_charmat.py"
cmd = "python3.6 " + str(script) + " " + fn + " " + infile + " --relaxed"
p = subprocess.Popen(cmd, shell=True)
pid, ecode = os.waitpid(p.pid, 0)
aln = AlignIO.read(infile, "phylip-relaxed")
aln = unique_alignments(aln)
t0 = time.time()
calculator = DistanceCalculator("identity", skip_letters="?")
constructor = DistanceTreeConstructor(calculator, "nj")
tree = constructor.build_tree(aln)
tree.root_at_midpoint()
nj_net = Phylo.to_networkx(tree)
# convert labels to characters for writing to file
i = 0
rndict = {}
for n in nj_net:
if n.name is None:
rndict[n] = Node("state-node", [])
# n.name = "internal" + str(i)
# i += 1
else:
rndict[n] = Node(n.name, [])
nj_net = nx.relabel_nodes(nj_net, rndict)
# convert labels to strings, not Bio.Phylo.Clade objects
# c2str = map(lambda x: x.name, list(nj_net.nodes()))
# c2strdict = dict(zip(list(nj_net.nodes()), c2str))
# nj_net = nx.relabel_nodes(nj_net, c2strdict)
cm = pd.read_csv(fn, sep="\t", index_col=0)
cm_lookup = dict(
zip(
list(cm.apply(lambda x: "|".join([str(k) for k in x.values]), axis=1)),
cm.index.values,
)
)
nj_net = fill_in_tree(nj_net, cm)
nj_net = tree_collapse(nj_net)
for n in nj_net:
if n.char_string in cm_lookup.keys():
n.is_target = True
nj_net = Cassiopeia_Tree("neighbor-joining", network=nj_net)
if outfp is None:
outfp = name.replace("true", "nj")
pic.dump(nj_net, open(outfp, "wb"))
# Phylo.write(tree, out, 'newick')
os.system("rm " + infile)
os.system("rm " + fn)
elif args.neighbor_joining_weighted:
if verbose:
print(
"Running Neighbor-Joining with Weighted Scoring on "
+ str(len(target_nodes_uniq))
+ " Unique Cells"
)
target_node_charstrings = np.array(
[t.get_character_vec() for t in target_nodes_uniq]
)
dm = compute_distance_mat(
target_node_charstrings, len(target_node_charstrings), priors=prior_probs
)
ids = [t.name for t in target_nodes_uniq]
cm_uniq = pd.DataFrame(target_node_charstrings)
cm_uniq.index = ids
dm = sp.spatial.distance.squareform(dm)
dm = DistanceMatrix(dm, ids)
newick_str = nj(dm, result_constructor=str)
tree = newick_to_network(newick_str, cm_uniq)
nj_net = fill_in_tree(tree, cm_uniq)
nj_net = tree_collapse(nj_net)
cm_lookup = dict(
zip(
list(
cm_uniq.apply(
lambda x: "|".join([str(k) for k in x.values]), axis=1
)
),
cm_uniq.index.values,
)
)
rdict = {}
for n in nj_net:
if n.char_string in cm_lookup:
n.is_target = True
else:
n.is_target = False
nj_net = Cassiopeia_Tree("neighbor-joining", network=nj_net)
if outfp is None:
outfp = name.replace("true", "nj_weighted")
pic.dump(nj_net, open(outfp, "wb"))
elif args.camin_sokal:
if verbose:
print(
"Running Camin-Sokal Max Parsimony Algorithm on "
+ str(len(target_nodes_uniq))
+ " Unique Cells"
)
samples_to_cells = {}
indices = []
for i, n in zip(range(len(target_nodes_uniq)), target_nodes_uniq):
samples_to_cells["s" + str(i)] = n.name
indices.append(n.name)
n.name = str(i)
infile = "".join(name.split(".")[:-1]) + "_cs_infile.txt"
fn = "".join(name.split(".")[:-1]) + "_cs_phylo.txt"
weights_fn = "".join(name.split(".")[:-1]) + "_cs_weights.txt"
write_leaves_to_charmat(target_nodes_uniq, fn)
script = SCLT_PATH / "TreeSolver" / "binarize_multistate_charmat.py"
cmd = "python3.6 " + str(script) + " " + fn + " " + infile
pi = subprocess.Popen(cmd, shell=True)
pid, ecode = os.waitpid(pi.pid, 0)
weights = construct_weights(infile, weights_fn)
os.system("touch outfile")
os.system("touch outtree")
outfile = stem + "outfile.txt"
outtree = stem + "outtree.txt"
# run phylip mix with camin-sokal
responses = "." + stem + ".temp.txt"
FH = open(responses, "w")
current_dir = os.getcwd()
FH.write(infile + "\n")
FH.write("F\n" + outfile + "\n")
FH.write("P\n")
FH.write("W\n")
FH.write("Y\n")
FH.write(weights_fn + "\n")
FH.write("F\n" + outtree + "\n")
FH.close()
t0 = time.time()
cmd = "~/software/phylip-3.697/exe/mix"
cmd += " < " + responses + " > screenout1"
p = subprocess.Popen(cmd, shell=True)
pid, ecode = os.waitpid(p.pid, 0)
consense_outtree = stem + "consenseouttree.txt"
consense_outfile = stem + "consenseoutfile.txt"
FH = open(responses, "w")
FH.write(outtree + "\n")
FH.write("F\n" + consense_outfile + "\n")
FH.write("Y\n")
FH.write("F\n" + consense_outtree + "\n")
FH.close()
if verbose:
print("Computing Consensus Tree, elasped time: " + str(time.time() - t0))
cmd = "~/software/phylip-3.697/exe/consense"
cmd += " < " + responses + " > screenout"
p2 = subprocess.Popen(cmd, shell=True)
pid, ecode = os.waitpid(p2.pid, 0)
newick_str = ""
with open(consense_outtree, "r") as f:
for l in f:
l = l.strip()
newick_str += l
cm = pd.read_csv(fn, sep="\t", index_col=0, dtype=str)
cm.index = indices
cs_net = newick_to_network(newick_str, cm)
for n in cs_net:
if n.name in samples_to_cells:
n.name = samples_to_cells[n.name]
cs_net = fill_in_tree(cs_net, cm)
cs_net = tree_collapse2(cs_net)
cm_lookup = dict(
zip(
list(cm.apply(lambda x: "|".join([str(k) for k in x.values]), axis=1)),
cm.index.values,
)
)
for n in cs_net:
if n.char_string in cm_lookup.keys():
n.is_target = True
cs_net = Cassiopeia_Tree("camin-sokal", network=cs_net)
if outfp is None:
outfp = name.replace("true", "cs")
pic.dump(cs_net, open(outfp, "wb"))
os.system("rm " + outfile)
os.system("rm " + responses)
os.system("rm " + outtree)
os.system("rm " + consense_outfile)
os.system("rm " + infile)
os.system("rm " + fn)
else:
raise Exception(
"Please choose an algorithm from the list: greedy, hybrid, ilp, nj, or camin-sokal"
)
if __name__ == "__main__":
main()
| 28.848967 | 99 | 0.56624 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.