max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
|---|---|---|---|---|---|---|---|---|---|---|
CTFd/plugins/dynamic_challenges/__init__.py
|
MarkSablan/CTFd
| 8
|
6627051
|
<gh_stars>1-10
from __future__ import division # Use floating point for math calculations
import math
from flask import Blueprint
from CTFd.models import Challenges, Solves, db
from CTFd.plugins import register_plugin_assets_directory
from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge
from CTFd.plugins.migrations import upgrade
from CTFd.utils.modes import get_model
class DynamicChallenge(Challenges):
__mapper_args__ = {"polymorphic_identity": "dynamic"}
id = db.Column(
db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True
)
initial = db.Column(db.Integer, default=0)
minimum = db.Column(db.Integer, default=0)
decay = db.Column(db.Integer, default=0)
def __init__(self, *args, **kwargs):
super(DynamicChallenge, self).__init__(**kwargs)
self.initial = kwargs["value"]
class DynamicValueChallenge(BaseChallenge):
id = "dynamic" # Unique identifier used to register challenges
name = "dynamic" # Name of a challenge type
templates = { # Handlebars templates used for each aspect of challenge editing & viewing
"create": "/plugins/dynamic_challenges/assets/create.html",
"update": "/plugins/dynamic_challenges/assets/update.html",
"view": "/plugins/dynamic_challenges/assets/view.html",
}
scripts = { # Scripts that are loaded when a template is loaded
"create": "/plugins/dynamic_challenges/assets/create.js",
"update": "/plugins/dynamic_challenges/assets/update.js",
"view": "/plugins/dynamic_challenges/assets/view.js",
}
# Route at which files are accessible. This must be registered using register_plugin_assets_directory()
route = "/plugins/dynamic_challenges/assets/"
# Blueprint used to access the static_folder directory.
blueprint = Blueprint(
"dynamic_challenges",
__name__,
template_folder="templates",
static_folder="assets",
)
challenge_model = DynamicChallenge
@classmethod
def calculate_value(cls, challenge):
Model = get_model()
solve_count = (
Solves.query.join(Model, Solves.account_id == Model.id)
.filter(
Solves.challenge_id == challenge.id,
Model.hidden == False,
Model.banned == False,
)
.count()
)
# If the solve count is 0 we shouldn't manipulate the solve count to
# let the math update back to normal
if solve_count != 0:
# We subtract -1 to allow the first solver to get max point value
solve_count -= 1
# It is important that this calculation takes into account floats.
# Hence this file uses from __future__ import division
value = (
((challenge.minimum - challenge.initial) / (challenge.decay ** 2))
* (solve_count ** 2)
) + challenge.initial
value = math.ceil(value)
if value < challenge.minimum:
value = challenge.minimum
challenge.value = value
db.session.commit()
return challenge
@classmethod
def read(cls, challenge):
"""
This method is in used to access the data of a challenge in a format processable by the front end.
:param challenge:
:return: Challenge object, data dictionary to be returned to the user
"""
challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()
data = {
"id": challenge.id,
"name": challenge.name,
"value": challenge.value,
"initial": challenge.initial,
"decay": challenge.decay,
"minimum": challenge.minimum,
"description": challenge.description,
"category": challenge.category,
"state": challenge.state,
"max_attempts": challenge.max_attempts,
"type": challenge.type,
"type_data": {
"id": cls.id,
"name": cls.name,
"templates": cls.templates,
"scripts": cls.scripts,
},
}
return data
@classmethod
def update(cls, challenge, request):
"""
This method is used to update the information associated with a challenge. This should be kept strictly to the
Challenges table and any child tables.
:param challenge:
:param request:
:return:
"""
data = request.form or request.get_json()
for attr, value in data.items():
# We need to set these to floats so that the next operations don't operate on strings
if attr in ("initial", "minimum", "decay"):
value = float(value)
setattr(challenge, attr, value)
return DynamicValueChallenge.calculate_value(challenge)
@classmethod
def solve(cls, user, team, challenge, request):
super().solve(user, team, challenge, request)
DynamicValueChallenge.calculate_value(challenge)
def load(app):
upgrade()
CHALLENGE_CLASSES["dynamic"] = DynamicValueChallenge
register_plugin_assets_directory(
app, base_path="/plugins/dynamic_challenges/assets/"
)
|
from __future__ import division # Use floating point for math calculations
import math
from flask import Blueprint
from CTFd.models import Challenges, Solves, db
from CTFd.plugins import register_plugin_assets_directory
from CTFd.plugins.challenges import CHALLENGE_CLASSES, BaseChallenge
from CTFd.plugins.migrations import upgrade
from CTFd.utils.modes import get_model
class DynamicChallenge(Challenges):
__mapper_args__ = {"polymorphic_identity": "dynamic"}
id = db.Column(
db.Integer, db.ForeignKey("challenges.id", ondelete="CASCADE"), primary_key=True
)
initial = db.Column(db.Integer, default=0)
minimum = db.Column(db.Integer, default=0)
decay = db.Column(db.Integer, default=0)
def __init__(self, *args, **kwargs):
super(DynamicChallenge, self).__init__(**kwargs)
self.initial = kwargs["value"]
class DynamicValueChallenge(BaseChallenge):
id = "dynamic" # Unique identifier used to register challenges
name = "dynamic" # Name of a challenge type
templates = { # Handlebars templates used for each aspect of challenge editing & viewing
"create": "/plugins/dynamic_challenges/assets/create.html",
"update": "/plugins/dynamic_challenges/assets/update.html",
"view": "/plugins/dynamic_challenges/assets/view.html",
}
scripts = { # Scripts that are loaded when a template is loaded
"create": "/plugins/dynamic_challenges/assets/create.js",
"update": "/plugins/dynamic_challenges/assets/update.js",
"view": "/plugins/dynamic_challenges/assets/view.js",
}
# Route at which files are accessible. This must be registered using register_plugin_assets_directory()
route = "/plugins/dynamic_challenges/assets/"
# Blueprint used to access the static_folder directory.
blueprint = Blueprint(
"dynamic_challenges",
__name__,
template_folder="templates",
static_folder="assets",
)
challenge_model = DynamicChallenge
@classmethod
def calculate_value(cls, challenge):
Model = get_model()
solve_count = (
Solves.query.join(Model, Solves.account_id == Model.id)
.filter(
Solves.challenge_id == challenge.id,
Model.hidden == False,
Model.banned == False,
)
.count()
)
# If the solve count is 0 we shouldn't manipulate the solve count to
# let the math update back to normal
if solve_count != 0:
# We subtract -1 to allow the first solver to get max point value
solve_count -= 1
# It is important that this calculation takes into account floats.
# Hence this file uses from __future__ import division
value = (
((challenge.minimum - challenge.initial) / (challenge.decay ** 2))
* (solve_count ** 2)
) + challenge.initial
value = math.ceil(value)
if value < challenge.minimum:
value = challenge.minimum
challenge.value = value
db.session.commit()
return challenge
@classmethod
def read(cls, challenge):
"""
This method is in used to access the data of a challenge in a format processable by the front end.
:param challenge:
:return: Challenge object, data dictionary to be returned to the user
"""
challenge = DynamicChallenge.query.filter_by(id=challenge.id).first()
data = {
"id": challenge.id,
"name": challenge.name,
"value": challenge.value,
"initial": challenge.initial,
"decay": challenge.decay,
"minimum": challenge.minimum,
"description": challenge.description,
"category": challenge.category,
"state": challenge.state,
"max_attempts": challenge.max_attempts,
"type": challenge.type,
"type_data": {
"id": cls.id,
"name": cls.name,
"templates": cls.templates,
"scripts": cls.scripts,
},
}
return data
@classmethod
def update(cls, challenge, request):
"""
This method is used to update the information associated with a challenge. This should be kept strictly to the
Challenges table and any child tables.
:param challenge:
:param request:
:return:
"""
data = request.form or request.get_json()
for attr, value in data.items():
# We need to set these to floats so that the next operations don't operate on strings
if attr in ("initial", "minimum", "decay"):
value = float(value)
setattr(challenge, attr, value)
return DynamicValueChallenge.calculate_value(challenge)
@classmethod
def solve(cls, user, team, challenge, request):
super().solve(user, team, challenge, request)
DynamicValueChallenge.calculate_value(challenge)
def load(app):
upgrade()
CHALLENGE_CLASSES["dynamic"] = DynamicValueChallenge
register_plugin_assets_directory(
app, base_path="/plugins/dynamic_challenges/assets/"
)
|
en
| 0.856703
|
# Use floating point for math calculations # Unique identifier used to register challenges # Name of a challenge type # Handlebars templates used for each aspect of challenge editing & viewing # Scripts that are loaded when a template is loaded # Route at which files are accessible. This must be registered using register_plugin_assets_directory() # Blueprint used to access the static_folder directory. # If the solve count is 0 we shouldn't manipulate the solve count to # let the math update back to normal # We subtract -1 to allow the first solver to get max point value # It is important that this calculation takes into account floats. # Hence this file uses from __future__ import division This method is in used to access the data of a challenge in a format processable by the front end. :param challenge: :return: Challenge object, data dictionary to be returned to the user This method is used to update the information associated with a challenge. This should be kept strictly to the Challenges table and any child tables. :param challenge: :param request: :return: # We need to set these to floats so that the next operations don't operate on strings
| 2.208903
| 2
|
tests/user_test.py
|
eLemmings/back
| 0
|
6627052
|
<filename>tests/user_test.py
'''
Moduł do testów
Wykonuje podane zapytania do API i generuje plik html z ich rezultatami
'''
import requests
import json
import os
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('tests'))
template = env.get_template('test_template.html')
BASE = 'http://localhost:5000/'
class Request:
def __init__(self, method, endpoint: str, json: dict = None, token: str = None):
self.func = method
self.method = self.func.__name__.upper()
self.endpoint = endpoint
self.token = token
self.params = json
self.response = None
self.text = self.call()
def call(self):
self.response = self.func(BASE+self.endpoint, json=self.params,
headers={'Authorization': f'Bearer {self.token}'})
try:
return json.dumps(json.loads(self.response.text), indent=4)
except:
return self.response.text
def get_params(self):
return self.params
valid_json = {
'diaries': [
{
'name': 'test',
'min': 1,
'max': 5,
'date': 12312315,
'period': 'd',
'colors': ['#ffffff', '#000000', '#ff0000'],
'entries': [[
{'value': 1, 'description': 'aaa'},
{'value': 2},
{'value': 3},
{'value': 4}]]
}
]
}
invalid_json = {
'diaries': [
{
'type': 3,
'min': 1,
'max': 5,
'date': 12312315,
'colors': ['#fffff', '#000000', '#ff0000'],
'entries': [3, [2,3,4,1]]
}
]
}
register = Request(requests.post, 'register', {'nick': 'test_user',
'email': '<EMAIL>', 'password': 'password'})
token = Request(requests.post,
'login', {'email': '<EMAIL>', 'password': 'password'})
t = token.response.json().get('token', '')
share = Request(requests.put, 'share', {'index': 0}, token=t)
uuidres = Request(requests.get, 'share', token=t)
uuid = uuidres.response.json().get('shares')[0][0]
reqs = (
register,
token,
share,
uuidres,
Request(requests.get, f'share/{uuid}'),
Request(requests.get, 'user/data', token=t),
Request(requests.put, 'user/data', valid_json, token=t),
Request(requests.put, 'user/data', invalid_json, token=t),
Request(requests.get, 'user/data', token=t),
Request(requests.patch, 'user', {
'field': 'nick', 'value': 'patched1'}, token=t),
Request(requests.patch, 'user', {
'field': 'nick', 'value': 'patched2'}, token=t),
Request(requests.patch, 'user', {
'field': 'nick', 'value': 'patched3'}, token=t),
Request(requests.get, 'user', token=t),
Request(requests.delete, 'user', token=t),
)
with open('tests/test_results.html', 'w') as file:
file.write(template.render(title=str(datetime.now()), reqs=reqs))
|
<filename>tests/user_test.py
'''
Moduł do testów
Wykonuje podane zapytania do API i generuje plik html z ich rezultatami
'''
import requests
import json
import os
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
env = Environment(loader=FileSystemLoader('tests'))
template = env.get_template('test_template.html')
BASE = 'http://localhost:5000/'
class Request:
def __init__(self, method, endpoint: str, json: dict = None, token: str = None):
self.func = method
self.method = self.func.__name__.upper()
self.endpoint = endpoint
self.token = token
self.params = json
self.response = None
self.text = self.call()
def call(self):
self.response = self.func(BASE+self.endpoint, json=self.params,
headers={'Authorization': f'Bearer {self.token}'})
try:
return json.dumps(json.loads(self.response.text), indent=4)
except:
return self.response.text
def get_params(self):
return self.params
valid_json = {
'diaries': [
{
'name': 'test',
'min': 1,
'max': 5,
'date': 12312315,
'period': 'd',
'colors': ['#ffffff', '#000000', '#ff0000'],
'entries': [[
{'value': 1, 'description': 'aaa'},
{'value': 2},
{'value': 3},
{'value': 4}]]
}
]
}
invalid_json = {
'diaries': [
{
'type': 3,
'min': 1,
'max': 5,
'date': 12312315,
'colors': ['#fffff', '#000000', '#ff0000'],
'entries': [3, [2,3,4,1]]
}
]
}
register = Request(requests.post, 'register', {'nick': 'test_user',
'email': '<EMAIL>', 'password': 'password'})
token = Request(requests.post,
'login', {'email': '<EMAIL>', 'password': 'password'})
t = token.response.json().get('token', '')
share = Request(requests.put, 'share', {'index': 0}, token=t)
uuidres = Request(requests.get, 'share', token=t)
uuid = uuidres.response.json().get('shares')[0][0]
reqs = (
register,
token,
share,
uuidres,
Request(requests.get, f'share/{uuid}'),
Request(requests.get, 'user/data', token=t),
Request(requests.put, 'user/data', valid_json, token=t),
Request(requests.put, 'user/data', invalid_json, token=t),
Request(requests.get, 'user/data', token=t),
Request(requests.patch, 'user', {
'field': 'nick', 'value': 'patched1'}, token=t),
Request(requests.patch, 'user', {
'field': 'nick', 'value': 'patched2'}, token=t),
Request(requests.patch, 'user', {
'field': 'nick', 'value': 'patched3'}, token=t),
Request(requests.get, 'user', token=t),
Request(requests.delete, 'user', token=t),
)
with open('tests/test_results.html', 'w') as file:
file.write(template.render(title=str(datetime.now()), reqs=reqs))
|
pl
| 0.924692
|
Moduł do testów Wykonuje podane zapytania do API i generuje plik html z ich rezultatami
| 2.727388
| 3
|
tests/unit/express/facade_tests.py
|
st8st8/django-oscar-paypal
| 0
|
6627053
|
<reponame>st8st8/django-oscar-paypal<gh_stars>0
from decimal import Decimal as D
from unittest import TestCase
from unittest.mock import Mock, patch
from urllib.parse import parse_qs
import pytest
from oscar.apps.shipping.methods import Free
from purl import URL
from paypal.express.facade import fetch_transaction_details, get_paypal_url
from paypal.models import ExpressTransaction as Transaction
@pytest.mark.django_db
class MockedResponseTests(TestCase):
token = ''
response_body = ''
def setUp(self):
response = Mock()
response.text = self.response_body
response.status_code = 200
with patch('requests.post') as post:
post.return_value = response
self.perform_action()
self.mocked_post = post
def perform_action(self):
pass
def tearDown(self):
Transaction.objects.all().delete()
class BaseSetExpressCheckoutTests(MockedResponseTests):
def _get_paypal_params(self):
return parse_qs(self.mocked_post.call_args[0][1])
def assertPaypalParamEqual(self, key, value):
self.assertEqual(self._get_paypal_params()[key], [value])
def assertPaypalParamDoesNotExist(self, key):
self.assertFalse(key in self._get_paypal_params())
class SuccessfulSetExpressCheckoutTests(BaseSetExpressCheckoutTests):
token = 'EC-64699536816<PASSWORD>'
response_body = 'TOKEN=<PASSWORD>&TIMESTAMP=2012%2d03%2d26T17%3a19%3a38Z&CORRELATIONID=50a8d895e928f' \
'&ACK=Success&VERSION=60%2e0&BUILD=2649250'
def perform_action(self):
basket = Mock()
basket.id = 1
basket.currency = 'GBP'
basket.total_incl_tax = D('200')
basket.all_lines = Mock(return_value=[])
basket.offer_discounts = []
basket.voucher_discounts = []
basket.shipping_discounts = []
basket.currency = 'GBP'
methods = [Free()]
url_str = get_paypal_url(basket, methods)
self.url = URL.from_string(url_str)
def test_url_has_correct_keys(self):
self.assertTrue(self.url.has_query_param('token'))
self.assertTrue('_express-checkout', self.url.query_param('cmd'))
def test_correct_paypal_params(self):
for param in [
'LOCALECODE', 'HDRIMG', 'LANDINGPAGE', 'PAYFLOWCOLOR',
'REQCONFIRMSHIPPING', 'PAGESTYLE', 'SOLUTIONTYPE',
'BRANDNAME', 'CUSTOMERSERVICENUMBER']:
self.assertPaypalParamDoesNotExist(param)
# defaults
self.assertPaypalParamEqual('CALLBACKTIMEOUT', '3')
self.assertPaypalParamEqual('ALLOWNOTE', '1')
class ExtraPaypalSuccessfulSetExpressCheckoutTests(BaseSetExpressCheckoutTests):
token = '<PASSWORD>'
response_body = 'TOKEN=<PASSWORD>&TIMESTAMP=2012%2d03%2d26T17%3a19%3a38Z&CORRELATIONID=50a8d89<PASSWORD>8f' \
'&ACK=Success&VERSION=60%2e0&BUILD=2649250'
paypal_params = {
'CUSTOMERSERVICENUMBER': '999999999',
'SOLUTIONTYPE': 'Mark',
'LANDINGPAGE': 'Login',
'BRANDNAME': 'My Brand Name',
'PAGESTYLE': 'eee',
'HDRIMG': 'http://image.jpg',
'PAYFLOWCOLOR': '00FF00',
'LOCALECODE': 'GB',
'REQCONFIRMSHIPPING': True,
'ALLOWNOTE': False,
'CALLBACKTIMEOUT': 2
}
def perform_action(self):
basket = Mock()
basket.id = 1
basket.currency = 'GBP'
basket.total_incl_tax = D('200')
basket.all_lines = Mock(return_value=[])
basket.offer_discounts = []
basket.voucher_discounts = []
basket.shipping_discounts = []
basket.currency = 'GBP'
methods = [Free()]
url_str = get_paypal_url(basket, methods, paypal_params=self.paypal_params)
self.url = URL.from_string(url_str)
def test_corrent_paypal_params(self):
self.assertTrue(self.url.has_query_param('token'))
self.assertTrue('_express-checkout', self.url.query_param('cmd'))
for key, value in self.paypal_params.items():
if isinstance(value, bool):
value = int(value)
self.assertPaypalParamEqual(key, str(value))
class SuccessfulGetExpressCheckoutTests(MockedResponseTests):
token = '<PASSWORD>'
response_body = 'TOKEN=<PASSWORD>&CHECKOUTSTATUS=PaymentActionCompleted' \
'&TIMESTAMP=2012%2d04%2d19T10%3a07%3a46Z&CORRELATIONID=7e9c5efbda3c0&ACK=Success' \
'&VERSION=88%2e0&BUILD=2808426&EMAIL=david%2e_1332854868_per%40gmail%2ecom' \
'&PAYERID=7ZTRBDFYYA47W&PAYERSTATUS=verified&FIRSTNAME=David&LASTNAME=Winterbottom' \
'&COUNTRYCODE=GB&SHIPTONAME=David%20Winterbottom&SHIPTOSTREET=1%20Main%20Terrace' \
'&SHIPTOCITY=Wolverhampton&SHIPTOSTATE=West%20Midlands&SHIPTOZIP=W12%204LQ' \
'&SHIPTOCOUNTRYCODE=GB&SHIPTOCOUNTRYNAME=United%20Kingdom&ADDRESSSTATUS=Confirmed' \
'&CURRENCYCODE=GBP&AMT=33%2e98&SHIPPINGAMT=0%2e00&HANDLINGAMT=0%2e00&TAXAMT=0%2e00' \
'&INSURANCEAMT=0%2e00&SHIPDISCAMT=0%2e00&PAYMENTREQUEST_0_CURRENCYCODE=GBP' \
'&PAYMENTREQUEST_0_AMT=33%2e98&PAYMENTREQUEST_0_SHIPPINGAMT=0%2e00' \
'&PAYMENTREQUEST_0_HANDLINGAMT=0%2e00&PAYMENTREQUEST_0_TAXAMT=0%2e00' \
'&PAYMENTREQUEST_0_INSURANCEAMT=0%2e00&PAYMENTREQUEST_0_SHIPDISCAMT=0%2e00' \
'&PAYMENTREQUEST_0_TRANSACTIONID=51963679RW630412N' \
'&PAYMENTREQUEST_0_INSURANCEOPTIONOFFERED=false' \
'&PAYMENTREQUEST_0_SHIPTONAME=David%20Winterbottom' \
'&PAYMENTREQUEST_0_SHIPTOSTREET=1%20Main%20Terrace' \
'&PAYMENTREQUEST_0_SHIPTOCITY=Wolverhampton&PAYMENTREQUEST_0_SHIPTOSTATE=West%20Midlands' \
'&PAYMENTREQUEST_0_SHIPTOZIP=W12%204LQ&PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE=GB' \
'&PAYMENTREQUEST_0_SHIPTOCOUNTRYNAME=United%20Kingdom' \
'&PAYMENTREQUESTINFO_0_TRANSACTIONID=51963679RW630412N' \
'&PAYMENTREQUESTINFO_0_ERRORCODE=0' # noqa E501
def perform_action(self):
self.txn = fetch_transaction_details(self.token)
def test_token_is_extracted(self):
self.assertEqual(self.token, self.txn.token)
def test_is_successful(self):
self.assertTrue(self.txn.is_successful)
def test_ack(self):
self.assertEqual('Success', self.txn.ack)
def test_amount_is_saved(self):
self.assertEqual(D('33.98'), self.txn.amount)
def test_currency_is_saved(self):
self.assertEqual('GBP', self.txn.currency)
def test_correlation_id_is_saved(self):
self.assertEqual('7e9c5efbda3c0', self.txn.correlation_id)
def test_context(self):
ctx = self.txn.context
values = [
('ACK', ['Success']),
('LASTNAME', ['Winterbottom']),
]
for k, v in values:
self.assertEqual(v, ctx[k])
|
from decimal import Decimal as D
from unittest import TestCase
from unittest.mock import Mock, patch
from urllib.parse import parse_qs
import pytest
from oscar.apps.shipping.methods import Free
from purl import URL
from paypal.express.facade import fetch_transaction_details, get_paypal_url
from paypal.models import ExpressTransaction as Transaction
@pytest.mark.django_db
class MockedResponseTests(TestCase):
token = ''
response_body = ''
def setUp(self):
response = Mock()
response.text = self.response_body
response.status_code = 200
with patch('requests.post') as post:
post.return_value = response
self.perform_action()
self.mocked_post = post
def perform_action(self):
pass
def tearDown(self):
Transaction.objects.all().delete()
class BaseSetExpressCheckoutTests(MockedResponseTests):
def _get_paypal_params(self):
return parse_qs(self.mocked_post.call_args[0][1])
def assertPaypalParamEqual(self, key, value):
self.assertEqual(self._get_paypal_params()[key], [value])
def assertPaypalParamDoesNotExist(self, key):
self.assertFalse(key in self._get_paypal_params())
class SuccessfulSetExpressCheckoutTests(BaseSetExpressCheckoutTests):
token = 'EC-64699536816<PASSWORD>'
response_body = 'TOKEN=<PASSWORD>&TIMESTAMP=2012%2d03%2d26T17%3a19%3a38Z&CORRELATIONID=50a8d895e928f' \
'&ACK=Success&VERSION=60%2e0&BUILD=2649250'
def perform_action(self):
basket = Mock()
basket.id = 1
basket.currency = 'GBP'
basket.total_incl_tax = D('200')
basket.all_lines = Mock(return_value=[])
basket.offer_discounts = []
basket.voucher_discounts = []
basket.shipping_discounts = []
basket.currency = 'GBP'
methods = [Free()]
url_str = get_paypal_url(basket, methods)
self.url = URL.from_string(url_str)
def test_url_has_correct_keys(self):
self.assertTrue(self.url.has_query_param('token'))
self.assertTrue('_express-checkout', self.url.query_param('cmd'))
def test_correct_paypal_params(self):
for param in [
'LOCALECODE', 'HDRIMG', 'LANDINGPAGE', 'PAYFLOWCOLOR',
'REQCONFIRMSHIPPING', 'PAGESTYLE', 'SOLUTIONTYPE',
'BRANDNAME', 'CUSTOMERSERVICENUMBER']:
self.assertPaypalParamDoesNotExist(param)
# defaults
self.assertPaypalParamEqual('CALLBACKTIMEOUT', '3')
self.assertPaypalParamEqual('ALLOWNOTE', '1')
class ExtraPaypalSuccessfulSetExpressCheckoutTests(BaseSetExpressCheckoutTests):
token = '<PASSWORD>'
response_body = 'TOKEN=<PASSWORD>&TIMESTAMP=2012%2d03%2d26T17%3a19%3a38Z&CORRELATIONID=50a8d89<PASSWORD>8f' \
'&ACK=Success&VERSION=60%2e0&BUILD=2649250'
paypal_params = {
'CUSTOMERSERVICENUMBER': '999999999',
'SOLUTIONTYPE': 'Mark',
'LANDINGPAGE': 'Login',
'BRANDNAME': 'My Brand Name',
'PAGESTYLE': 'eee',
'HDRIMG': 'http://image.jpg',
'PAYFLOWCOLOR': '00FF00',
'LOCALECODE': 'GB',
'REQCONFIRMSHIPPING': True,
'ALLOWNOTE': False,
'CALLBACKTIMEOUT': 2
}
def perform_action(self):
basket = Mock()
basket.id = 1
basket.currency = 'GBP'
basket.total_incl_tax = D('200')
basket.all_lines = Mock(return_value=[])
basket.offer_discounts = []
basket.voucher_discounts = []
basket.shipping_discounts = []
basket.currency = 'GBP'
methods = [Free()]
url_str = get_paypal_url(basket, methods, paypal_params=self.paypal_params)
self.url = URL.from_string(url_str)
def test_corrent_paypal_params(self):
self.assertTrue(self.url.has_query_param('token'))
self.assertTrue('_express-checkout', self.url.query_param('cmd'))
for key, value in self.paypal_params.items():
if isinstance(value, bool):
value = int(value)
self.assertPaypalParamEqual(key, str(value))
class SuccessfulGetExpressCheckoutTests(MockedResponseTests):
token = '<PASSWORD>'
response_body = 'TOKEN=<PASSWORD>&CHECKOUTSTATUS=PaymentActionCompleted' \
'&TIMESTAMP=2012%2d04%2d19T10%3a07%3a46Z&CORRELATIONID=7e9c5efbda3c0&ACK=Success' \
'&VERSION=88%2e0&BUILD=2808426&EMAIL=david%2e_1332854868_per%40gmail%2ecom' \
'&PAYERID=7ZTRBDFYYA47W&PAYERSTATUS=verified&FIRSTNAME=David&LASTNAME=Winterbottom' \
'&COUNTRYCODE=GB&SHIPTONAME=David%20Winterbottom&SHIPTOSTREET=1%20Main%20Terrace' \
'&SHIPTOCITY=Wolverhampton&SHIPTOSTATE=West%20Midlands&SHIPTOZIP=W12%204LQ' \
'&SHIPTOCOUNTRYCODE=GB&SHIPTOCOUNTRYNAME=United%20Kingdom&ADDRESSSTATUS=Confirmed' \
'&CURRENCYCODE=GBP&AMT=33%2e98&SHIPPINGAMT=0%2e00&HANDLINGAMT=0%2e00&TAXAMT=0%2e00' \
'&INSURANCEAMT=0%2e00&SHIPDISCAMT=0%2e00&PAYMENTREQUEST_0_CURRENCYCODE=GBP' \
'&PAYMENTREQUEST_0_AMT=33%2e98&PAYMENTREQUEST_0_SHIPPINGAMT=0%2e00' \
'&PAYMENTREQUEST_0_HANDLINGAMT=0%2e00&PAYMENTREQUEST_0_TAXAMT=0%2e00' \
'&PAYMENTREQUEST_0_INSURANCEAMT=0%2e00&PAYMENTREQUEST_0_SHIPDISCAMT=0%2e00' \
'&PAYMENTREQUEST_0_TRANSACTIONID=51963679RW630412N' \
'&PAYMENTREQUEST_0_INSURANCEOPTIONOFFERED=false' \
'&PAYMENTREQUEST_0_SHIPTONAME=David%20Winterbottom' \
'&PAYMENTREQUEST_0_SHIPTOSTREET=1%20Main%20Terrace' \
'&PAYMENTREQUEST_0_SHIPTOCITY=Wolverhampton&PAYMENTREQUEST_0_SHIPTOSTATE=West%20Midlands' \
'&PAYMENTREQUEST_0_SHIPTOZIP=W12%204LQ&PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE=GB' \
'&PAYMENTREQUEST_0_SHIPTOCOUNTRYNAME=United%20Kingdom' \
'&PAYMENTREQUESTINFO_0_TRANSACTIONID=51963679RW630412N' \
'&PAYMENTREQUESTINFO_0_ERRORCODE=0' # noqa E501
def perform_action(self):
self.txn = fetch_transaction_details(self.token)
def test_token_is_extracted(self):
self.assertEqual(self.token, self.txn.token)
def test_is_successful(self):
self.assertTrue(self.txn.is_successful)
def test_ack(self):
self.assertEqual('Success', self.txn.ack)
def test_amount_is_saved(self):
self.assertEqual(D('33.98'), self.txn.amount)
def test_currency_is_saved(self):
self.assertEqual('GBP', self.txn.currency)
def test_correlation_id_is_saved(self):
self.assertEqual('7e9c5efbda3c0', self.txn.correlation_id)
def test_context(self):
ctx = self.txn.context
values = [
('ACK', ['Success']),
('LASTNAME', ['Winterbottom']),
]
for k, v in values:
self.assertEqual(v, ctx[k])
|
it
| 0.097741
|
# defaults # noqa E501
| 2.323642
| 2
|
fdep/commands/upload.py
|
checkr/fdep
| 9
|
6627054
|
"""Upload a file to the designated storage backend.
.. code:: bash
fdep upload <files...>
.. note:: Note that just doing ``fdep upload`` doesn't work. You have to specify the file names. We omitted that out in order to emphasize uploading, since it can be destructive.
"""
import os
import sys
import time
from threading import Thread
from fdep.backends import StorageBackend
from fdep.commands import ConfigRequiredMixin, SubcommandRunner
from fdep.interfaces.progressbar import TqdmProgressBar
from fdep.utils import HashHelper
from six.moves.queue import Queue
class UploadCommandRunner(SubcommandRunner, ConfigRequiredMixin):
"""Handle upload commands."""
COMMAND_NAME = 'upload'
def _task_upload(self, queue, progressbar, vector):
entry_name, path, source, sha1sum = vector
progressbar.set_title(entry_name)
result, _ = StorageBackend.execute(self.root_runner, progressbar, source, 'put_from', path)
if not result:
sys.stderr.write(self.messages.ERROR_WHILE_INSTALLING)
queue.put(False)
return
if sha1sum is not None:
new_sha1sum = HashHelper.calculate_sha1sum(path)
if sha1sum != new_sha1sum:
sys.stderr.write(self.messages.ERROR_WRONG_SHA1SUM.format(sha1sum, new_sha1sum))
os.unlink(path) # Clean up the wrong one.
queue.put(False)
return
queue.put(True)
def _parallelized_upload(self, to_upload):
queue = Queue()
line_filler = lambda: sys.stdout.write('\n' * (len(to_upload) - 1)) # noqa
line_filler()
print('\x1b[A' * len(to_upload))
TqdmProgressBar.set_number_of_progressbars(len(to_upload))
threads = [
Thread(
target=self._task_upload,
args=(queue, TqdmProgressBar(position=i), v)
) for i, v in enumerate(to_upload)
]
for th in threads:
th.daemon = True
th.start()
while queue.qsize() < len(to_upload):
try:
time.sleep(0.1)
except KeyboardInterrupt:
line_filler()
return False
results = [queue.get() for _ in to_upload]
line_filler()
return False not in results
def run(self, *args, **kwargs):
if not len(args):
sys.stderr.write(self.messages.ERROR_NO_FILES_TO_UPLOAD)
self.root_runner.commands['help'].run()
return False
paths = {self.path_helper.resolve_path_to_entry(x) for x in args}
entries = [(y, self.entries[y]) for y in paths]
to_upload = []
for entry_name, entry in entries:
sha1sum = entry.get('sha1sum')
version = entry.get('version')
source = entry.get('source')
path = self.path_helper.resolve_entry_to_path(entry_name)
if version:
source_to_use = '{}_{}'.format(source, version)
else:
source_to_use = source
if os.path.exists(path):
if sha1sum is not None:
new_sha1sum = HashHelper.calculate_sha1sum(path)
entry['sha1sum'] = new_sha1sum
else:
sys.stderr.write(self.messages.ERROR_NO_SUCH_FILE_ON_DISK.format(entry_name))
return False
to_upload.append((entry_name, path, source_to_use, sha1sum))
if not self._parallelized_upload(to_upload):
return False
self.config.save()
return True
|
"""Upload a file to the designated storage backend.
.. code:: bash
fdep upload <files...>
.. note:: Note that just doing ``fdep upload`` doesn't work. You have to specify the file names. We omitted that out in order to emphasize uploading, since it can be destructive.
"""
import os
import sys
import time
from threading import Thread
from fdep.backends import StorageBackend
from fdep.commands import ConfigRequiredMixin, SubcommandRunner
from fdep.interfaces.progressbar import TqdmProgressBar
from fdep.utils import HashHelper
from six.moves.queue import Queue
class UploadCommandRunner(SubcommandRunner, ConfigRequiredMixin):
"""Handle upload commands."""
COMMAND_NAME = 'upload'
def _task_upload(self, queue, progressbar, vector):
entry_name, path, source, sha1sum = vector
progressbar.set_title(entry_name)
result, _ = StorageBackend.execute(self.root_runner, progressbar, source, 'put_from', path)
if not result:
sys.stderr.write(self.messages.ERROR_WHILE_INSTALLING)
queue.put(False)
return
if sha1sum is not None:
new_sha1sum = HashHelper.calculate_sha1sum(path)
if sha1sum != new_sha1sum:
sys.stderr.write(self.messages.ERROR_WRONG_SHA1SUM.format(sha1sum, new_sha1sum))
os.unlink(path) # Clean up the wrong one.
queue.put(False)
return
queue.put(True)
def _parallelized_upload(self, to_upload):
queue = Queue()
line_filler = lambda: sys.stdout.write('\n' * (len(to_upload) - 1)) # noqa
line_filler()
print('\x1b[A' * len(to_upload))
TqdmProgressBar.set_number_of_progressbars(len(to_upload))
threads = [
Thread(
target=self._task_upload,
args=(queue, TqdmProgressBar(position=i), v)
) for i, v in enumerate(to_upload)
]
for th in threads:
th.daemon = True
th.start()
while queue.qsize() < len(to_upload):
try:
time.sleep(0.1)
except KeyboardInterrupt:
line_filler()
return False
results = [queue.get() for _ in to_upload]
line_filler()
return False not in results
def run(self, *args, **kwargs):
if not len(args):
sys.stderr.write(self.messages.ERROR_NO_FILES_TO_UPLOAD)
self.root_runner.commands['help'].run()
return False
paths = {self.path_helper.resolve_path_to_entry(x) for x in args}
entries = [(y, self.entries[y]) for y in paths]
to_upload = []
for entry_name, entry in entries:
sha1sum = entry.get('sha1sum')
version = entry.get('version')
source = entry.get('source')
path = self.path_helper.resolve_entry_to_path(entry_name)
if version:
source_to_use = '{}_{}'.format(source, version)
else:
source_to_use = source
if os.path.exists(path):
if sha1sum is not None:
new_sha1sum = HashHelper.calculate_sha1sum(path)
entry['sha1sum'] = new_sha1sum
else:
sys.stderr.write(self.messages.ERROR_NO_SUCH_FILE_ON_DISK.format(entry_name))
return False
to_upload.append((entry_name, path, source_to_use, sha1sum))
if not self._parallelized_upload(to_upload):
return False
self.config.save()
return True
|
en
| 0.941442
|
Upload a file to the designated storage backend. .. code:: bash fdep upload <files...> .. note:: Note that just doing ``fdep upload`` doesn't work. You have to specify the file names. We omitted that out in order to emphasize uploading, since it can be destructive. Handle upload commands. # Clean up the wrong one. # noqa
| 2.561269
| 3
|
tests/st/scipy_st/test_linalg.py
|
Ascend/mindspore
| 5
|
6627055
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""st for linalg."""
import pytest
import numpy as onp
import scipy as osp
from mindspore import context, Tensor
import mindspore.scipy as msp
from .utils import match_array
context.set_context(mode=context.PYNATIVE_MODE)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('args', [(), (1,), (7, -1), (3, 4, 5),
(onp.ones((3, 4), dtype=onp.float32), 5, onp.random.randn(5, 2).astype(onp.float32))])
def test_block_diag(args):
"""
Feature: ALL TO ALL
Description: test cases for block_diag
Expectation: the result match scipy
"""
tensor_args = tuple([Tensor(arg) for arg in args])
ms_res = msp.linalg.block_diag(*tensor_args)
scipy_res = osp.linalg.block_diag(*args)
match_array(ms_res.asnumpy(), scipy_res)
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""st for linalg."""
import pytest
import numpy as onp
import scipy as osp
from mindspore import context, Tensor
import mindspore.scipy as msp
from .utils import match_array
context.set_context(mode=context.PYNATIVE_MODE)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.platform_x86_cpu
@pytest.mark.env_onecard
@pytest.mark.parametrize('args', [(), (1,), (7, -1), (3, 4, 5),
(onp.ones((3, 4), dtype=onp.float32), 5, onp.random.randn(5, 2).astype(onp.float32))])
def test_block_diag(args):
"""
Feature: ALL TO ALL
Description: test cases for block_diag
Expectation: the result match scipy
"""
tensor_args = tuple([Tensor(arg) for arg in args])
ms_res = msp.linalg.block_diag(*tensor_args)
scipy_res = osp.linalg.block_diag(*args)
match_array(ms_res.asnumpy(), scipy_res)
|
en
| 0.773859
|
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ st for linalg. Feature: ALL TO ALL Description: test cases for block_diag Expectation: the result match scipy
| 2.010533
| 2
|
build/apply_locales.py
|
knopp/buildroot
| 2,151
|
6627056
|
<gh_stars>1000+
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print 'ERROR: need string and list of locales'
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print ' '.join(["'%s'" % x for x in results])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
#!/usr/bin/env python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO: remove this script when GYP has for loops
import sys
import optparse
def main(argv):
parser = optparse.OptionParser()
usage = 'usage: %s [options ...] format_string locale_list'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-d', dest='dash_to_underscore', action="store_true",
default=False,
help='map "en-US" to "en" and "-" to "_" in locales')
(options, arglist) = parser.parse_args(argv)
if len(arglist) < 3:
print 'ERROR: need string and list of locales'
return 1
str_template = arglist[1]
locales = arglist[2:]
results = []
for locale in locales:
# For Cocoa to find the locale at runtime, it needs to use '_' instead
# of '-' (http://crbug.com/20441). Also, 'en-US' should be represented
# simply as 'en' (http://crbug.com/19165, http://crbug.com/25578).
if options.dash_to_underscore:
if locale == 'en-US':
locale = 'en'
locale = locale.replace('-', '_')
results.append(str_template.replace('ZZLOCALE', locale))
# Quote each element so filename spaces don't mess up GYP's attempt to parse
# it into a list.
print ' '.join(["'%s'" % x for x in results])
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
en
| 0.860432
|
#!/usr/bin/env python # Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # TODO: remove this script when GYP has for loops # For Cocoa to find the locale at runtime, it needs to use '_' instead # of '-' (http://crbug.com/20441). Also, 'en-US' should be represented # simply as 'en' (http://crbug.com/19165, http://crbug.com/25578). # Quote each element so filename spaces don't mess up GYP's attempt to parse # it into a list.
| 2.305208
| 2
|
users/views.py
|
hahaxhhsz/fadiandian
| 0
|
6627057
|
# Create your views here.
from users.models import Users
from users.serializers import UsersSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import json
# 获取所有用户信息
class getAllUsers(APIView):
def get(self, request):
myResponse = {}
data = {}
data["userList"] = ""
try:
users = Users.objects.all()
serializer = UsersSerializer(users, many=True)
data["userList"] = serializer.data
myResponse["state"] = 1
except:
myResponse["state"] = 0
finally:
myResponse["data"] = data
return Response(myResponse)
# 注册
class registered(APIView):
def post(self, request):
serializer = UsersSerializer(data=request.data)
myResponse = {}
if serializer.is_valid():
serializer.save()
myResponse["state"] = 1
myResponse["msg"] = "Successfully registered"
else:
myResponse["state"] = 0
myResponse["msg"] = smart_str(serializer.errors)
return Response(myResponse)
# 登陆,若已登陆的账号无法实现再次登陆
class login(APIView):
def post(self, request):
myResponse = {}
data = {}
try:
theUserName = request.data["userName"]
user = Users.objects.get(userName=theUserName)
serializer = UsersSerializer(user)
state = serializer.data["state"]
thePassword = serializer.data["userPassword"]
password = request.data["userPassword"]
if password == thePassword and state == 0 :
mydata = serializer.data["id"]
data["userId"] = mydata
myResponse["state"] = 1
user.state = 1
user.save()
elif state == 1 :
myResponse["state"] = 0
data["msg"] = "This account has been logged in"
else:
myResponse["state"] = 0
data["msg"] = "The account or password is incorrect"
except:
myResponse["state"] = 0
data["msg"] = "The account or password is incorrect"
finally:
myResponse["data"] = data
return Response(myResponse)
# 登出
class logout(APIView):
def post(self, request):
myResponse = {}
data = {}
try:
theId = int(request.data["id"])
user = Users.objects.get(pk=theId)
serializer = UsersSerializer(user)
state = serializer.data["state"]
if state == 1 :
user.state = 0
user.save()
myResponse["state"] = 1
data["msg"] = "Logout successfully"
else:
myResponse["state"] = 0
data["msg"] = "This account is not logged in"
except:
myResponse["state"] = 0
data["msg"] = "id error"
finally:
myResponse["data"] = data
return Response(myResponse)
# 获取单个用户信息
class getUserInfo(APIView):
def post(self, request):
myResponse = {}
data = {}
try:
theId = int(request.data["id"])
user = Users.objects.get(pk=theId)
serializer = UsersSerializer(user)
data["userInfo"] = serializer.data
myResponse["state"] = 1
except:
data["msg"] = "id error"
myResponse["state"] = 0
finally:
myResponse["data"] = data
return Response(myResponse)
|
# Create your views here.
from users.models import Users
from users.serializers import UsersSerializer
from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
import json
# 获取所有用户信息
class getAllUsers(APIView):
def get(self, request):
myResponse = {}
data = {}
data["userList"] = ""
try:
users = Users.objects.all()
serializer = UsersSerializer(users, many=True)
data["userList"] = serializer.data
myResponse["state"] = 1
except:
myResponse["state"] = 0
finally:
myResponse["data"] = data
return Response(myResponse)
# 注册
class registered(APIView):
def post(self, request):
serializer = UsersSerializer(data=request.data)
myResponse = {}
if serializer.is_valid():
serializer.save()
myResponse["state"] = 1
myResponse["msg"] = "Successfully registered"
else:
myResponse["state"] = 0
myResponse["msg"] = smart_str(serializer.errors)
return Response(myResponse)
# 登陆,若已登陆的账号无法实现再次登陆
class login(APIView):
def post(self, request):
myResponse = {}
data = {}
try:
theUserName = request.data["userName"]
user = Users.objects.get(userName=theUserName)
serializer = UsersSerializer(user)
state = serializer.data["state"]
thePassword = serializer.data["userPassword"]
password = request.data["userPassword"]
if password == thePassword and state == 0 :
mydata = serializer.data["id"]
data["userId"] = mydata
myResponse["state"] = 1
user.state = 1
user.save()
elif state == 1 :
myResponse["state"] = 0
data["msg"] = "This account has been logged in"
else:
myResponse["state"] = 0
data["msg"] = "The account or password is incorrect"
except:
myResponse["state"] = 0
data["msg"] = "The account or password is incorrect"
finally:
myResponse["data"] = data
return Response(myResponse)
# 登出
class logout(APIView):
def post(self, request):
myResponse = {}
data = {}
try:
theId = int(request.data["id"])
user = Users.objects.get(pk=theId)
serializer = UsersSerializer(user)
state = serializer.data["state"]
if state == 1 :
user.state = 0
user.save()
myResponse["state"] = 1
data["msg"] = "Logout successfully"
else:
myResponse["state"] = 0
data["msg"] = "This account is not logged in"
except:
myResponse["state"] = 0
data["msg"] = "id error"
finally:
myResponse["data"] = data
return Response(myResponse)
# 获取单个用户信息
class getUserInfo(APIView):
def post(self, request):
myResponse = {}
data = {}
try:
theId = int(request.data["id"])
user = Users.objects.get(pk=theId)
serializer = UsersSerializer(user)
data["userInfo"] = serializer.data
myResponse["state"] = 1
except:
data["msg"] = "id error"
myResponse["state"] = 0
finally:
myResponse["data"] = data
return Response(myResponse)
|
zh
| 0.727815
|
# Create your views here. # 获取所有用户信息 # 注册 # 登陆,若已登陆的账号无法实现再次登陆 # 登出 # 获取单个用户信息
| 2.473277
| 2
|
beginner_source/basics/optimization_tutorial.py
|
Lezcano/tutorials
| 1
|
6627058
|
"""
`Learn the Basics <intro.html>`_ ||
`Quickstart <quickstart_tutorial.html>`_ ||
`Tensors <tensorqs_tutorial.html>`_ ||
`Datasets & DataLoaders <data_tutorial.html>`_ ||
`Transforms <transforms_tutorial.html>`_ ||
`Build Model <buildmodel_tutorial.html>`_ ||
`Autograd <autogradqs_tutorial.html>`_ ||
**Optimization** ||
`Save & Load Model <saveloadrun_tutorial.html>`_
Optimizing Model Parameters
===========================
Now that we have a model and data it's time to train, validate and test our model by optimizing its parameters on
our data. Training a model is an iterative process; in each iteration (called an *epoch*) the model makes a guess about the output, calculates
the error in its guess (*loss*), collects the derivatives of the error with respect to its parameters (as we saw in
the `previous section <autograd_tutorial.html>`_), and **optimizes** these parameters using gradient descent. For a more
detailed walkthrough of this process, check out this video on `backpropagation from 3Blue1Brown <https://www.youtube.com/watch?v=tIeHLnjs5U8>`__.
Prerequisite Code
-----------------
We load the code from the previous sections on `Datasets & DataLoaders <data_tutorial.html>`_
and `Build Model <buildmodel_tutorial.html>`_.
"""
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
train_dataloader = DataLoader(training_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU()
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork()
##############################################
# Hyperparameters
# -----------------
#
# Hyperparameters are adjustable parameters that let you control the model optimization process.
# Different hyperparameter values can impact model training and convergence rates
# (`read more <https://pytorch.org/tutorials/beginner/hyperparameter_tuning_tutorial.html>`__ about hyperparameter tuning)
#
# We define the following hyperparameters for training:
# - **Number of Epochs** - the number times to iterate over the dataset
# - **Batch Size** - the number of data samples seen by the model in each epoch
# - **Learning Rate** - how much to update models parameters at each batch/epoch. Smaller values yield slow learning speed, while large values may result in unpredictable behavior during training.
#
learning_rate = 1e-3
batch_size = 64
epochs = 5
#####################################
# Optimization Loop
# -----------------
#
# Once we set our hyperparameters, we can then train and optimize our model with an optimization loop. Each
# iteration of the optimization loop is called an **epoch**.
#
# Each epoch consists of two main parts:
# - **The Train Loop** - iterate over the training dataset and try to converge to optimal parameters.
# - **The Validation/Test Loop** - iterate over the test dataset to check if model performance is improving.
#
# Let's briefly familiarize ourselves with some of the concepts used in the training loop. Jump ahead to
# see the :ref:`full-impl-label` of the optimization loop.
#
# Loss Function
# ~~~~~~~~~~~~~~~~~
#
# When presented with some training data, our untrained network is likely not to give the correct
# answer. **Loss function** measures the degree of dissimilarity of obtained result to the target value,
# and it is the loss function that we want to minimize during training. To calculate the loss we make a
# prediction using the inputs of our given data sample and compare it against the true data label value.
#
# Common loss functions include `nn.MSELoss <https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html#torch.nn.MSELoss>`_ (Mean Square Error) for regression tasks, and
# `nn.NLLLoss <https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html#torch.nn.NLLLoss>`_ (Negative Log Likelihood) for classification.
# `nn.CrossEntropyLoss <https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss>`_ combines ``nn.LogSoftmax`` and ``nn.NLLLoss``.
#
# We pass our model's output logits to ``nn.CrossEntropyLoss``, which will normalize the logits and compute the prediction error.
# Initialize the loss function
loss_fn = nn.CrossEntropyLoss()
#####################################
# Optimizer
# ~~~~~~~~~~~~~~~~~
#
# Optimization is the process of adjusting model parameters to reduce model error in each training step. **Optimization algorithms** define how this process is performed (in this example we use Stochastic Gradient Descent).
# All optimization logic is encapsulated in the ``optimizer`` object. Here, we use the SGD optimizer; additionally, there are many `different optimizers <https://pytorch.org/docs/stable/optim.html>`_
# available in PyTorch such as ADAM and RMSProp, that work better for different kinds of models and data.
#
# We initialize the optimizer by registering the model's parameters that need to be trained, and passing in the learning rate hyperparameter.
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
#####################################
# Inside the training loop, optimization happens in three steps:
# * Call ``optimizer.zero_grad()`` to reset the gradients of model parameters. Gradients by default add up; to prevent double-counting, we explicitly zero them at each iteration.
# * Backpropagate the prediction loss with a call to ``loss.backwards()``. PyTorch deposits the gradients of the loss w.r.t. each parameter.
# * Once we have our gradients, we call ``optimizer.step()`` to adjust the parameters by the gradients collected in the backward pass.
########################################
# .. _full-impl-label:
#
# Full Implementation
# -----------------------
# We define ``train_loop`` that loops over our optimization code, and ``test_loop`` that
# evaluates the model's performance against our test data.
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
########################################
# We initialize the loss function and optimizer, and pass it to ``train_loop`` and ``test_loop``.
# Feel free to increase the number of epochs to track the model's improving performance.
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")
#################################################################
# Further Reading
# -----------------------
# - `Loss Functions <https://pytorch.org/docs/stable/nn.html#loss-functions>`_
# - `torch.optim <https://pytorch.org/docs/stable/optim.html>`_
# - `Warmstart Training a Model <https://pytorch.org/tutorials/recipes/recipes/warmstarting_model_using_parameters_from_a_different_model.html>`_
#
|
"""
`Learn the Basics <intro.html>`_ ||
`Quickstart <quickstart_tutorial.html>`_ ||
`Tensors <tensorqs_tutorial.html>`_ ||
`Datasets & DataLoaders <data_tutorial.html>`_ ||
`Transforms <transforms_tutorial.html>`_ ||
`Build Model <buildmodel_tutorial.html>`_ ||
`Autograd <autogradqs_tutorial.html>`_ ||
**Optimization** ||
`Save & Load Model <saveloadrun_tutorial.html>`_
Optimizing Model Parameters
===========================
Now that we have a model and data it's time to train, validate and test our model by optimizing its parameters on
our data. Training a model is an iterative process; in each iteration (called an *epoch*) the model makes a guess about the output, calculates
the error in its guess (*loss*), collects the derivatives of the error with respect to its parameters (as we saw in
the `previous section <autograd_tutorial.html>`_), and **optimizes** these parameters using gradient descent. For a more
detailed walkthrough of this process, check out this video on `backpropagation from 3Blue1Brown <https://www.youtube.com/watch?v=tIeHLnjs5U8>`__.
Prerequisite Code
-----------------
We load the code from the previous sections on `Datasets & DataLoaders <data_tutorial.html>`_
and `Build Model <buildmodel_tutorial.html>`_.
"""
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision.transforms import ToTensor, Lambda
training_data = datasets.FashionMNIST(
root="data",
train=True,
download=True,
transform=ToTensor()
)
test_data = datasets.FashionMNIST(
root="data",
train=False,
download=True,
transform=ToTensor()
)
train_dataloader = DataLoader(training_data, batch_size=64)
test_dataloader = DataLoader(test_data, batch_size=64)
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.flatten = nn.Flatten()
self.linear_relu_stack = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU(),
nn.Linear(512, 512),
nn.ReLU(),
nn.Linear(512, 10),
nn.ReLU()
)
def forward(self, x):
x = self.flatten(x)
logits = self.linear_relu_stack(x)
return logits
model = NeuralNetwork()
##############################################
# Hyperparameters
# -----------------
#
# Hyperparameters are adjustable parameters that let you control the model optimization process.
# Different hyperparameter values can impact model training and convergence rates
# (`read more <https://pytorch.org/tutorials/beginner/hyperparameter_tuning_tutorial.html>`__ about hyperparameter tuning)
#
# We define the following hyperparameters for training:
# - **Number of Epochs** - the number times to iterate over the dataset
# - **Batch Size** - the number of data samples seen by the model in each epoch
# - **Learning Rate** - how much to update models parameters at each batch/epoch. Smaller values yield slow learning speed, while large values may result in unpredictable behavior during training.
#
learning_rate = 1e-3
batch_size = 64
epochs = 5
#####################################
# Optimization Loop
# -----------------
#
# Once we set our hyperparameters, we can then train and optimize our model with an optimization loop. Each
# iteration of the optimization loop is called an **epoch**.
#
# Each epoch consists of two main parts:
# - **The Train Loop** - iterate over the training dataset and try to converge to optimal parameters.
# - **The Validation/Test Loop** - iterate over the test dataset to check if model performance is improving.
#
# Let's briefly familiarize ourselves with some of the concepts used in the training loop. Jump ahead to
# see the :ref:`full-impl-label` of the optimization loop.
#
# Loss Function
# ~~~~~~~~~~~~~~~~~
#
# When presented with some training data, our untrained network is likely not to give the correct
# answer. **Loss function** measures the degree of dissimilarity of obtained result to the target value,
# and it is the loss function that we want to minimize during training. To calculate the loss we make a
# prediction using the inputs of our given data sample and compare it against the true data label value.
#
# Common loss functions include `nn.MSELoss <https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html#torch.nn.MSELoss>`_ (Mean Square Error) for regression tasks, and
# `nn.NLLLoss <https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html#torch.nn.NLLLoss>`_ (Negative Log Likelihood) for classification.
# `nn.CrossEntropyLoss <https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss>`_ combines ``nn.LogSoftmax`` and ``nn.NLLLoss``.
#
# We pass our model's output logits to ``nn.CrossEntropyLoss``, which will normalize the logits and compute the prediction error.
# Initialize the loss function
loss_fn = nn.CrossEntropyLoss()
#####################################
# Optimizer
# ~~~~~~~~~~~~~~~~~
#
# Optimization is the process of adjusting model parameters to reduce model error in each training step. **Optimization algorithms** define how this process is performed (in this example we use Stochastic Gradient Descent).
# All optimization logic is encapsulated in the ``optimizer`` object. Here, we use the SGD optimizer; additionally, there are many `different optimizers <https://pytorch.org/docs/stable/optim.html>`_
# available in PyTorch such as ADAM and RMSProp, that work better for different kinds of models and data.
#
# We initialize the optimizer by registering the model's parameters that need to be trained, and passing in the learning rate hyperparameter.
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
#####################################
# Inside the training loop, optimization happens in three steps:
# * Call ``optimizer.zero_grad()`` to reset the gradients of model parameters. Gradients by default add up; to prevent double-counting, we explicitly zero them at each iteration.
# * Backpropagate the prediction loss with a call to ``loss.backwards()``. PyTorch deposits the gradients of the loss w.r.t. each parameter.
# * Once we have our gradients, we call ``optimizer.step()`` to adjust the parameters by the gradients collected in the backward pass.
########################################
# .. _full-impl-label:
#
# Full Implementation
# -----------------------
# We define ``train_loop`` that loops over our optimization code, and ``test_loop`` that
# evaluates the model's performance against our test data.
def train_loop(dataloader, model, loss_fn, optimizer):
size = len(dataloader.dataset)
for batch, (X, y) in enumerate(dataloader):
# Compute prediction and loss
pred = model(X)
loss = loss_fn(pred, y)
# Backpropagation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch % 100 == 0:
loss, current = loss.item(), batch * len(X)
print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
def test_loop(dataloader, model, loss_fn):
size = len(dataloader.dataset)
test_loss, correct = 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = model(X)
test_loss += loss_fn(pred, y).item()
correct += (pred.argmax(1) == y).type(torch.float).sum().item()
test_loss /= size
correct /= size
print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
########################################
# We initialize the loss function and optimizer, and pass it to ``train_loop`` and ``test_loop``.
# Feel free to increase the number of epochs to track the model's improving performance.
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
epochs = 10
for t in range(epochs):
print(f"Epoch {t+1}\n-------------------------------")
train_loop(train_dataloader, model, loss_fn, optimizer)
test_loop(test_dataloader, model, loss_fn)
print("Done!")
#################################################################
# Further Reading
# -----------------------
# - `Loss Functions <https://pytorch.org/docs/stable/nn.html#loss-functions>`_
# - `torch.optim <https://pytorch.org/docs/stable/optim.html>`_
# - `Warmstart Training a Model <https://pytorch.org/tutorials/recipes/recipes/warmstarting_model_using_parameters_from_a_different_model.html>`_
#
|
en
| 0.706274
|
`Learn the Basics <intro.html>`_ || `Quickstart <quickstart_tutorial.html>`_ || `Tensors <tensorqs_tutorial.html>`_ || `Datasets & DataLoaders <data_tutorial.html>`_ || `Transforms <transforms_tutorial.html>`_ || `Build Model <buildmodel_tutorial.html>`_ || `Autograd <autogradqs_tutorial.html>`_ || **Optimization** || `Save & Load Model <saveloadrun_tutorial.html>`_ Optimizing Model Parameters =========================== Now that we have a model and data it's time to train, validate and test our model by optimizing its parameters on our data. Training a model is an iterative process; in each iteration (called an *epoch*) the model makes a guess about the output, calculates the error in its guess (*loss*), collects the derivatives of the error with respect to its parameters (as we saw in the `previous section <autograd_tutorial.html>`_), and **optimizes** these parameters using gradient descent. For a more detailed walkthrough of this process, check out this video on `backpropagation from 3Blue1Brown <https://www.youtube.com/watch?v=tIeHLnjs5U8>`__. Prerequisite Code ----------------- We load the code from the previous sections on `Datasets & DataLoaders <data_tutorial.html>`_ and `Build Model <buildmodel_tutorial.html>`_. ############################################## # Hyperparameters # ----------------- # # Hyperparameters are adjustable parameters that let you control the model optimization process. # Different hyperparameter values can impact model training and convergence rates # (`read more <https://pytorch.org/tutorials/beginner/hyperparameter_tuning_tutorial.html>`__ about hyperparameter tuning) # # We define the following hyperparameters for training: # - **Number of Epochs** - the number times to iterate over the dataset # - **Batch Size** - the number of data samples seen by the model in each epoch # - **Learning Rate** - how much to update models parameters at each batch/epoch. Smaller values yield slow learning speed, while large values may result in unpredictable behavior during training. # ##################################### # Optimization Loop # ----------------- # # Once we set our hyperparameters, we can then train and optimize our model with an optimization loop. Each # iteration of the optimization loop is called an **epoch**. # # Each epoch consists of two main parts: # - **The Train Loop** - iterate over the training dataset and try to converge to optimal parameters. # - **The Validation/Test Loop** - iterate over the test dataset to check if model performance is improving. # # Let's briefly familiarize ourselves with some of the concepts used in the training loop. Jump ahead to # see the :ref:`full-impl-label` of the optimization loop. # # Loss Function # ~~~~~~~~~~~~~~~~~ # # When presented with some training data, our untrained network is likely not to give the correct # answer. **Loss function** measures the degree of dissimilarity of obtained result to the target value, # and it is the loss function that we want to minimize during training. To calculate the loss we make a # prediction using the inputs of our given data sample and compare it against the true data label value. # # Common loss functions include `nn.MSELoss <https://pytorch.org/docs/stable/generated/torch.nn.MSELoss.html#torch.nn.MSELoss>`_ (Mean Square Error) for regression tasks, and # `nn.NLLLoss <https://pytorch.org/docs/stable/generated/torch.nn.NLLLoss.html#torch.nn.NLLLoss>`_ (Negative Log Likelihood) for classification. # `nn.CrossEntropyLoss <https://pytorch.org/docs/stable/generated/torch.nn.CrossEntropyLoss.html#torch.nn.CrossEntropyLoss>`_ combines ``nn.LogSoftmax`` and ``nn.NLLLoss``. # # We pass our model's output logits to ``nn.CrossEntropyLoss``, which will normalize the logits and compute the prediction error. # Initialize the loss function ##################################### # Optimizer # ~~~~~~~~~~~~~~~~~ # # Optimization is the process of adjusting model parameters to reduce model error in each training step. **Optimization algorithms** define how this process is performed (in this example we use Stochastic Gradient Descent). # All optimization logic is encapsulated in the ``optimizer`` object. Here, we use the SGD optimizer; additionally, there are many `different optimizers <https://pytorch.org/docs/stable/optim.html>`_ # available in PyTorch such as ADAM and RMSProp, that work better for different kinds of models and data. # # We initialize the optimizer by registering the model's parameters that need to be trained, and passing in the learning rate hyperparameter. ##################################### # Inside the training loop, optimization happens in three steps: # * Call ``optimizer.zero_grad()`` to reset the gradients of model parameters. Gradients by default add up; to prevent double-counting, we explicitly zero them at each iteration. # * Backpropagate the prediction loss with a call to ``loss.backwards()``. PyTorch deposits the gradients of the loss w.r.t. each parameter. # * Once we have our gradients, we call ``optimizer.step()`` to adjust the parameters by the gradients collected in the backward pass. ######################################## # .. _full-impl-label: # # Full Implementation # ----------------------- # We define ``train_loop`` that loops over our optimization code, and ``test_loop`` that # evaluates the model's performance against our test data. # Compute prediction and loss # Backpropagation ######################################## # We initialize the loss function and optimizer, and pass it to ``train_loop`` and ``test_loop``. # Feel free to increase the number of epochs to track the model's improving performance. ################################################################# # Further Reading # ----------------------- # - `Loss Functions <https://pytorch.org/docs/stable/nn.html#loss-functions>`_ # - `torch.optim <https://pytorch.org/docs/stable/optim.html>`_ # - `Warmstart Training a Model <https://pytorch.org/tutorials/recipes/recipes/warmstarting_model_using_parameters_from_a_different_model.html>`_ #
| 4.023796
| 4
|
mp_calc/app/serverlibrary.py
|
shanghongsim/d2w
| 0
|
6627059
|
<gh_stars>0
def mergesort(array, byfunc=None):
p=0
r=len(array)-1
mergesort_recursive(array, p, r, byfunc)
return array
def merge(array, p, q, r, byfunc):
nleft=q-p+1
nright=r-q
left_array=array[p:q+1]
right_array=array[q+1:r+1]
left=0
right=0
dest=p
if byfunc!=None:
while (left < nleft) and (right<nright):
if byfunc(left_array[left])<=byfunc(right_array[right]):
array[dest]=left_array[left]
left+=1
else:
array[dest]=right_array[right]
right+=1
dest=dest+1
while left<nleft:
array[dest]=left_array[left]
left+=1
dest+=1
while right<nright:
array[dest]=right_array[right]
right+=1
dest+=1
return array
else:
while (left < nleft) and (right<nright):
if left_array[left]<=right_array[right]:
array[dest]=left_array[left]
left+=1
else:
array[dest]=right_array[right]
right+=1
dest=dest+1
while left<nleft:
array[dest]=left_array[left]
left+=1
dest+=1
while right<nright:
array[dest]=right_array[right]
right+=1
dest+=1
return array
def mergesort_recursive(array, p, r, byfunc):
if r-p>=1:
q=(p+r)//2
mergesort_recursive(array, p, q, byfunc)
mergesort_recursive(array, q+1, r, byfunc)
merge(array, p, q, r, byfunc)
return array
class Stack:
def __init__(self):
self.__items = []
def push(self, item):
self.__items.append(item)
def pop(self):
if len(self.__items)>=1:
return self.__items.pop()
def peek(self):
if len(self.__items)>=1:
return self.__items[-1]
@property
def is_empty(self):
return self.__items==[]
@property
def size(self):
return len(self.__items)
class EvaluateExpression:
valid_char = '0123456789+-*/() '
operands = "0123456789"
operators = "(+-*/"
def __init__(self, string=""):
self.expression=string
self.stack = Stack()
@property
def expression(self):
return self.expr
@expression.setter
def expression(self, new_expr):
if isinstance(new_expr, str):
for i in new_expr:
if i not in self.valid_char:
self.expr=""
return self.expr
self.expr=new_expr
return self.expr
self.expr=""
return self.expr
def insert_space(self):
res=""
for val in self.expression:
if val in self.operands:
res+=val
else:
res=res+" "+val+" "
return res
def process_operator(self, operand_stack, operator_stack):
right=int(operand_stack.pop())
left=int(operand_stack.pop())
operator=str(operator_stack.pop())
if operator=="+":
res=right+left
elif operator=="-":
res=left-right
elif operator=="*":
res=left*right
else:
res=left//right
operand_stack.push(str(res))
def evaluate(self):
operand_stack = Stack()
operator_stack = Stack()
expression = self.insert_space()
tokens = expression.split()
for val in self.expression:
if val in self.operands:
operand_stack.push(val)
elif val=="+" or val=="-":
while not operator_stack.is_empty and (operator_stack.peek()!="(" and operator_stack.peek()!=")"):
self.process_operator(operand_stack, operator_stack)
operator_stack.push(val)
elif val=="*" or val=="/":
while not operator_stack.is_empty and (operator_stack.peek()=="*" or operator_stack.peek()=="/"):
self.process_operator(operand_stack, operator_stack)
operator_stack.push(val)
elif val=="(":
operator_stack.push(val)
elif val==")":
while operator_stack.peek()!="(":
self.process_operator(operand_stack, operator_stack)
operator_stack.pop()
# print(operand_stack.peek())
# print(operator_stack.peek())
# print("----")
while not operator_stack.is_empty:
self.process_operator(operand_stack, operator_stack)
return int(operand_stack.peek())
def get_smallest_three(challenge):
records = challenge.records
times = [r for r in records]
mergesort(times, lambda x: x.elapsed_time)
return times[:3]
|
def mergesort(array, byfunc=None):
p=0
r=len(array)-1
mergesort_recursive(array, p, r, byfunc)
return array
def merge(array, p, q, r, byfunc):
nleft=q-p+1
nright=r-q
left_array=array[p:q+1]
right_array=array[q+1:r+1]
left=0
right=0
dest=p
if byfunc!=None:
while (left < nleft) and (right<nright):
if byfunc(left_array[left])<=byfunc(right_array[right]):
array[dest]=left_array[left]
left+=1
else:
array[dest]=right_array[right]
right+=1
dest=dest+1
while left<nleft:
array[dest]=left_array[left]
left+=1
dest+=1
while right<nright:
array[dest]=right_array[right]
right+=1
dest+=1
return array
else:
while (left < nleft) and (right<nright):
if left_array[left]<=right_array[right]:
array[dest]=left_array[left]
left+=1
else:
array[dest]=right_array[right]
right+=1
dest=dest+1
while left<nleft:
array[dest]=left_array[left]
left+=1
dest+=1
while right<nright:
array[dest]=right_array[right]
right+=1
dest+=1
return array
def mergesort_recursive(array, p, r, byfunc):
if r-p>=1:
q=(p+r)//2
mergesort_recursive(array, p, q, byfunc)
mergesort_recursive(array, q+1, r, byfunc)
merge(array, p, q, r, byfunc)
return array
class Stack:
def __init__(self):
self.__items = []
def push(self, item):
self.__items.append(item)
def pop(self):
if len(self.__items)>=1:
return self.__items.pop()
def peek(self):
if len(self.__items)>=1:
return self.__items[-1]
@property
def is_empty(self):
return self.__items==[]
@property
def size(self):
return len(self.__items)
class EvaluateExpression:
valid_char = '0123456789+-*/() '
operands = "0123456789"
operators = "(+-*/"
def __init__(self, string=""):
self.expression=string
self.stack = Stack()
@property
def expression(self):
return self.expr
@expression.setter
def expression(self, new_expr):
if isinstance(new_expr, str):
for i in new_expr:
if i not in self.valid_char:
self.expr=""
return self.expr
self.expr=new_expr
return self.expr
self.expr=""
return self.expr
def insert_space(self):
res=""
for val in self.expression:
if val in self.operands:
res+=val
else:
res=res+" "+val+" "
return res
def process_operator(self, operand_stack, operator_stack):
right=int(operand_stack.pop())
left=int(operand_stack.pop())
operator=str(operator_stack.pop())
if operator=="+":
res=right+left
elif operator=="-":
res=left-right
elif operator=="*":
res=left*right
else:
res=left//right
operand_stack.push(str(res))
def evaluate(self):
operand_stack = Stack()
operator_stack = Stack()
expression = self.insert_space()
tokens = expression.split()
for val in self.expression:
if val in self.operands:
operand_stack.push(val)
elif val=="+" or val=="-":
while not operator_stack.is_empty and (operator_stack.peek()!="(" and operator_stack.peek()!=")"):
self.process_operator(operand_stack, operator_stack)
operator_stack.push(val)
elif val=="*" or val=="/":
while not operator_stack.is_empty and (operator_stack.peek()=="*" or operator_stack.peek()=="/"):
self.process_operator(operand_stack, operator_stack)
operator_stack.push(val)
elif val=="(":
operator_stack.push(val)
elif val==")":
while operator_stack.peek()!="(":
self.process_operator(operand_stack, operator_stack)
operator_stack.pop()
# print(operand_stack.peek())
# print(operator_stack.peek())
# print("----")
while not operator_stack.is_empty:
self.process_operator(operand_stack, operator_stack)
return int(operand_stack.peek())
def get_smallest_three(challenge):
records = challenge.records
times = [r for r in records]
mergesort(times, lambda x: x.elapsed_time)
return times[:3]
|
en
| 0.73746
|
# print(operand_stack.peek()) # print(operator_stack.peek()) # print("----")
| 3.790642
| 4
|
YouTube-Video-Downloader/YouTube-video-downloader-2.py
|
vusalaxndzde/YouTube-video-downloader-youtube_dl
| 2
|
6627060
|
<reponame>vusalaxndzde/YouTube-video-downloader-youtube_dl
import youtube_dl
from pytube import YouTube
from PyQt5 import QtWidgets, QtGui
import sys
class Window(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("YouTube Video Downloader")
self.setMaximumSize(670, 330)
self.setMinimumSize(670, 330)
self.setStyleSheet("background-color: white;")
self.setWindowIcon(QtGui.QIcon("photo/800px-YouTube_social_white_squircle.svg.png"))
self.init_ui()
self.show()
def init_ui(self):
photo1 = QtWidgets.QLabel()
photo1.setPixmap(QtGui.QPixmap("photo/11.png"))
hBox1 = QtWidgets.QHBoxLayout()
hBox1.addStretch()
hBox1.addWidget(photo1)
vBox1 = QtWidgets.QVBoxLayout()
vBox1.addLayout(hBox1)
vBox1.addStretch()
label1 = QtWidgets.QLabel("YouTube Link: ")
label1.setFont(QtGui.QFont("Arial", 16))
self.input = QtWidgets.QLineEdit()
self.input.setFont(QtGui.QFont("Arial", 16))
hBox2 = QtWidgets.QHBoxLayout()
hBox2.addWidget(label1)
hBox2.addWidget(self.input)
vBox2 = QtWidgets.QVBoxLayout()
vBox2.addLayout(hBox2)
hBox3 = QtWidgets.QHBoxLayout()
self.video_info = QtWidgets.QLabel()
self.video_info.setFont(QtGui.QFont("Arial", 10))
self.button = QtWidgets.QPushButton("Download")
self.button.setFont(QtGui.QFont("Arial", 13))
self.info_button = QtWidgets.QPushButton("Information")
self.info_button.setFont(QtGui.QFont("Arial", 13))
hBox3.addWidget(self.video_info)
hBox3.addStretch()
hBox3.addWidget(self.info_button)
hBox3.addWidget(self.button)
vBox2.addLayout(hBox3)
vBox2.addStretch()
photo2 = QtWidgets.QLabel()
photo2.setPixmap(QtGui.QPixmap("photo/800px-YouTube_social_white_squircle.svg.png"))
self.info = QtWidgets.QLabel()
self.info.setFont(QtGui.QFont("Arial", 13))
hBox4 = QtWidgets.QHBoxLayout()
hBox4.addWidget(self.info)
hBox4.addStretch()
hBox4.addWidget(photo2)
vBox = QtWidgets.QVBoxLayout()
vBox.addLayout(vBox1)
vBox.addLayout(vBox2)
vBox.addStretch()
vBox.addLayout(hBox4)
self.setLayout(vBox)
self.info_button.clicked.connect(self.about_video)
self.button.clicked.connect(self.download)
def about_video(self):
try:
Link = str(self.input.text())
video = YouTube(Link)
self.info.setText(" ")
self.video_info.setStyleSheet("color: blue;")
self.video_info.setText("Title: {}\nNumber of views: {}\nLength of video: {} sec\n"
"Author: {}\nAge Restricted: {}".format(video.title, video.views, video.length,
video.author, video.age_restricted))
except:
self.video_info.setText(" ")
self.info.setStyleSheet("color: red;")
self.info.setText("YouTube link was wrong!")
def download(self):
try:
ydl_opts = {'format': 'mp4'}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([self.input.text()])
self.info.setStyleSheet("color: green;")
self.info.setText("Video Downloaded.")
except:
self.video_info.setText(" ")
self.info.setText("Error!")
self.info.setStyleSheet("color: red;")
app = QtWidgets.QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
|
import youtube_dl
from pytube import YouTube
from PyQt5 import QtWidgets, QtGui
import sys
class Window(QtWidgets.QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("YouTube Video Downloader")
self.setMaximumSize(670, 330)
self.setMinimumSize(670, 330)
self.setStyleSheet("background-color: white;")
self.setWindowIcon(QtGui.QIcon("photo/800px-YouTube_social_white_squircle.svg.png"))
self.init_ui()
self.show()
def init_ui(self):
photo1 = QtWidgets.QLabel()
photo1.setPixmap(QtGui.QPixmap("photo/11.png"))
hBox1 = QtWidgets.QHBoxLayout()
hBox1.addStretch()
hBox1.addWidget(photo1)
vBox1 = QtWidgets.QVBoxLayout()
vBox1.addLayout(hBox1)
vBox1.addStretch()
label1 = QtWidgets.QLabel("YouTube Link: ")
label1.setFont(QtGui.QFont("Arial", 16))
self.input = QtWidgets.QLineEdit()
self.input.setFont(QtGui.QFont("Arial", 16))
hBox2 = QtWidgets.QHBoxLayout()
hBox2.addWidget(label1)
hBox2.addWidget(self.input)
vBox2 = QtWidgets.QVBoxLayout()
vBox2.addLayout(hBox2)
hBox3 = QtWidgets.QHBoxLayout()
self.video_info = QtWidgets.QLabel()
self.video_info.setFont(QtGui.QFont("Arial", 10))
self.button = QtWidgets.QPushButton("Download")
self.button.setFont(QtGui.QFont("Arial", 13))
self.info_button = QtWidgets.QPushButton("Information")
self.info_button.setFont(QtGui.QFont("Arial", 13))
hBox3.addWidget(self.video_info)
hBox3.addStretch()
hBox3.addWidget(self.info_button)
hBox3.addWidget(self.button)
vBox2.addLayout(hBox3)
vBox2.addStretch()
photo2 = QtWidgets.QLabel()
photo2.setPixmap(QtGui.QPixmap("photo/800px-YouTube_social_white_squircle.svg.png"))
self.info = QtWidgets.QLabel()
self.info.setFont(QtGui.QFont("Arial", 13))
hBox4 = QtWidgets.QHBoxLayout()
hBox4.addWidget(self.info)
hBox4.addStretch()
hBox4.addWidget(photo2)
vBox = QtWidgets.QVBoxLayout()
vBox.addLayout(vBox1)
vBox.addLayout(vBox2)
vBox.addStretch()
vBox.addLayout(hBox4)
self.setLayout(vBox)
self.info_button.clicked.connect(self.about_video)
self.button.clicked.connect(self.download)
def about_video(self):
try:
Link = str(self.input.text())
video = YouTube(Link)
self.info.setText(" ")
self.video_info.setStyleSheet("color: blue;")
self.video_info.setText("Title: {}\nNumber of views: {}\nLength of video: {} sec\n"
"Author: {}\nAge Restricted: {}".format(video.title, video.views, video.length,
video.author, video.age_restricted))
except:
self.video_info.setText(" ")
self.info.setStyleSheet("color: red;")
self.info.setText("YouTube link was wrong!")
def download(self):
try:
ydl_opts = {'format': 'mp4'}
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download([self.input.text()])
self.info.setStyleSheet("color: green;")
self.info.setText("Video Downloaded.")
except:
self.video_info.setText(" ")
self.info.setText("Error!")
self.info.setStyleSheet("color: red;")
app = QtWidgets.QApplication(sys.argv)
window = Window()
sys.exit(app.exec_())
|
none
| 1
| 2.737255
| 3
|
|
output/models/ms_data/datatypes/facets/nmtokens/nmtokens_pattern001_xsd/nmtokens_pattern001.py
|
tefra/xsdata-w3c-tests
| 1
|
6627061
|
from dataclasses import dataclass, field
from typing import List
@dataclass
class Foo:
class Meta:
name = "foo"
value: List[str] = field(
default_factory=list,
metadata={
"required": True,
"pattern": r"[A-C]{0,2}",
"tokens": True,
}
)
|
from dataclasses import dataclass, field
from typing import List
@dataclass
class Foo:
class Meta:
name = "foo"
value: List[str] = field(
default_factory=list,
metadata={
"required": True,
"pattern": r"[A-C]{0,2}",
"tokens": True,
}
)
|
none
| 1
| 2.950213
| 3
|
|
test/unit/test_api_users.py
|
ldn-softdev/pyeapi
| 126
|
6627062
|
<reponame>ldn-softdev/pyeapi
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from testlib import get_fixture, function
from testlib import EapiConfigUnitTest
import pyeapi.api.users
class TestApiUsers(EapiConfigUnitTest):
def __init__(self, *args, **kwargs):
super(TestApiUsers, self).__init__(*args, **kwargs)
self.instance = pyeapi.api.users.instance(None)
self.config = open(get_fixture('running_config.text')).read()
def test_isprivilege_returns_false(self):
result = pyeapi.api.users.isprivilege('test')
self.assertFalse(result)
def test_get(self):
keys = ['nopassword', 'privilege', 'role', 'secret', 'format', 'sshkey']
result = self.instance.get('test')
self.assertEqual(sorted(keys), sorted(result.keys()))
def test_getall(self):
result = self.instance.getall()
self.assertIsInstance(result, dict)
def test_create_with_nopassword(self):
cmds = 'username test nopassword'
func = function('create', 'test', nopassword=True)
self.eapi_positive_config_test(func, cmds)
def test_create_with_secret_cleartext(self):
cmds = 'username test secret 0 pass'
func = function('create', 'test', secret='pass')
self.eapi_positive_config_test(func, cmds)
def test_create_with_secret_md5(self):
cmds = 'username test secret 5 pass'
func = function('create', 'test', secret='pass', encryption='md5')
self.eapi_positive_config_test(func, cmds)
def test_create_with_secret_sha512(self):
cmds = 'username test secret sha512 pass'
func = function('create', 'test', secret='pass', encryption='sha512')
self.eapi_positive_config_test(func, cmds)
def test_create_with_missing_kwargs(self):
with self.assertRaises(TypeError):
self.instance.create('test')
def test_create_with_invalid_secret_arg(self):
with self.assertRaises(TypeError):
self.instance.create_with_secret('test', 'test', 'test')
def test_delete(self):
with self.assertRaises(TypeError):
self.instance.delete('admin')
def test_delete_admin_exception(self):
cmds = 'no username test'
func = function('delete', 'test')
self.eapi_positive_config_test(func, cmds)
def test_default(self):
cmds = 'default username test'
func = function('default', 'test')
self.eapi_positive_config_test(func, cmds)
def test_set_privilege(self):
cmds = 'username test privilege 8'
func = function('set_privilege', 'test', 8)
self.eapi_positive_config_test(func, cmds)
def test_set_privilege_negate(self):
cmds = 'username test privilege 1'
func = function('set_privilege', 'test')
self.eapi_positive_config_test(func, cmds)
def test_set_privilege_invalid_value(self):
with self.assertRaises(TypeError):
self.instance.set_privilege('test', 16)
def test_set_role(self):
cmds = 'username test role ops'
func = function('set_role', 'test', value='ops')
self.eapi_positive_config_test(func, cmds)
def test_set_role_negate(self):
cmds = 'no username test role'
func = function('set_role', 'test', disable=True)
self.eapi_positive_config_test(func, cmds)
def test_set_role_default(self):
cmds = 'default username test role'
func = function('set_role', 'test', default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_sshkey(self):
cmds = 'username test sshkey newkey'
func = function('set_sshkey', 'test', value='newkey')
self.eapi_positive_config_test(func, cmds)
def test_set_sshkey_negate(self):
cmds = 'no username test sshkey'
func = function('set_sshkey', 'test', disable=True)
self.eapi_positive_config_test(func, cmds)
def test_set_sshkey_default(self):
cmds = 'default username test sshkey'
func = function('set_sshkey', 'test', default=True)
self.eapi_positive_config_test(func, cmds)
if __name__ == '__main__':
unittest.main()
|
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import os
import unittest
sys.path.append(os.path.join(os.path.dirname(__file__), '../lib'))
from testlib import get_fixture, function
from testlib import EapiConfigUnitTest
import pyeapi.api.users
class TestApiUsers(EapiConfigUnitTest):
def __init__(self, *args, **kwargs):
super(TestApiUsers, self).__init__(*args, **kwargs)
self.instance = pyeapi.api.users.instance(None)
self.config = open(get_fixture('running_config.text')).read()
def test_isprivilege_returns_false(self):
result = pyeapi.api.users.isprivilege('test')
self.assertFalse(result)
def test_get(self):
keys = ['nopassword', 'privilege', 'role', 'secret', 'format', 'sshkey']
result = self.instance.get('test')
self.assertEqual(sorted(keys), sorted(result.keys()))
def test_getall(self):
result = self.instance.getall()
self.assertIsInstance(result, dict)
def test_create_with_nopassword(self):
cmds = 'username test nopassword'
func = function('create', 'test', nopassword=True)
self.eapi_positive_config_test(func, cmds)
def test_create_with_secret_cleartext(self):
cmds = 'username test secret 0 pass'
func = function('create', 'test', secret='pass')
self.eapi_positive_config_test(func, cmds)
def test_create_with_secret_md5(self):
cmds = 'username test secret 5 pass'
func = function('create', 'test', secret='pass', encryption='md5')
self.eapi_positive_config_test(func, cmds)
def test_create_with_secret_sha512(self):
cmds = 'username test secret sha512 pass'
func = function('create', 'test', secret='pass', encryption='sha512')
self.eapi_positive_config_test(func, cmds)
def test_create_with_missing_kwargs(self):
with self.assertRaises(TypeError):
self.instance.create('test')
def test_create_with_invalid_secret_arg(self):
with self.assertRaises(TypeError):
self.instance.create_with_secret('test', 'test', 'test')
def test_delete(self):
with self.assertRaises(TypeError):
self.instance.delete('admin')
def test_delete_admin_exception(self):
cmds = 'no username test'
func = function('delete', 'test')
self.eapi_positive_config_test(func, cmds)
def test_default(self):
cmds = 'default username test'
func = function('default', 'test')
self.eapi_positive_config_test(func, cmds)
def test_set_privilege(self):
cmds = 'username test privilege 8'
func = function('set_privilege', 'test', 8)
self.eapi_positive_config_test(func, cmds)
def test_set_privilege_negate(self):
cmds = 'username test privilege 1'
func = function('set_privilege', 'test')
self.eapi_positive_config_test(func, cmds)
def test_set_privilege_invalid_value(self):
with self.assertRaises(TypeError):
self.instance.set_privilege('test', 16)
def test_set_role(self):
cmds = 'username test role ops'
func = function('set_role', 'test', value='ops')
self.eapi_positive_config_test(func, cmds)
def test_set_role_negate(self):
cmds = 'no username test role'
func = function('set_role', 'test', disable=True)
self.eapi_positive_config_test(func, cmds)
def test_set_role_default(self):
cmds = 'default username test role'
func = function('set_role', 'test', default=True)
self.eapi_positive_config_test(func, cmds)
def test_set_sshkey(self):
cmds = 'username test sshkey newkey'
func = function('set_sshkey', 'test', value='newkey')
self.eapi_positive_config_test(func, cmds)
def test_set_sshkey_negate(self):
cmds = 'no username test sshkey'
func = function('set_sshkey', 'test', disable=True)
self.eapi_positive_config_test(func, cmds)
def test_set_sshkey_default(self):
cmds = 'default username test sshkey'
func = function('set_sshkey', 'test', default=True)
self.eapi_positive_config_test(func, cmds)
if __name__ == '__main__':
unittest.main()
|
en
| 0.732862
|
# # Copyright (c) 2015, Arista Networks, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # Neither the name of Arista Networks nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR # BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN # IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #
| 1.371219
| 1
|
gw/work/S190425z-KMTNet-split_phot.py
|
SilverRon/gppy
| 4
|
6627063
|
<gh_stars>1-10
# PHOTOMETRY CODE FOR PYTHON 3.X
# 2019.06.20 CREATED BY <NAME>
# 2019.10.06 MODIFIED BY <NAME>
#============================================================
import os, glob
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack
from astropy.io import ascii
from astropy.io import fits
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs import WCS
from imsng import phot
import time
#============================================================
# FUNCTION
#============================================================
def phot_routine(inim, refcatname, phottype, tra, tdec, path_base='./', aperture='MAG_APER_7', detsig=3.0, frac=0.95):
#------------------------------------------------------------
# HEADER INFO
hdr = fits.getheader(inim)
xcent, ycent= hdr['NAXIS1']/2., hdr['NAXIS2']/2.
try:
w = WCS(inim)
radeg, dedeg= w.all_pix2world(xcent, ycent, 1)
radeg, dedeg= np.asscalar(radeg), np.asscalar(dedeg)
except:
print('BAD WCS INFORMATION?')
radeg,dedeg = hdr['CRVAL1'], hdr['CRVAL2']
#xcent, ycent= w.all_world2pix(radeg, dedeg, 1)
#------------------------------------------------------------
try:
date_obs = hdr['date-obs']
jd = round(Time(date_obs, format='isot', scale='utc').jd, 3)
except:
date_obs = None
jd = None
#------------------------------------------------------------
# NAME INFO
obs, obj = 'KMTNET', inim[:-7]
refmagkey = 'R'
refmagerkey = refmagkey+'err'
indx_obs = np.where(obstbl['obs']==obs)
gain, pixscale = obstbl[indx_obs]['gain'][0], obstbl[indx_obs]['pixelscale'][0]
#------------------------------------------------------------
# REF. CATALOG QUERY
#------------------------------------------------------------
refcatlist = glob.glob(path_refcat+'/*.cat')
#------------------------------------------------------------
if refcatname == 'PS1':
if path_refcat+'/ps1-'+obj+'.cat' not in refcatlist:
querytbl = phot.ps1_query(obj, radeg, dedeg, path_refcat, radius=3.0)
else:
querytbl = ascii.read(path_refcat+'/ps1-'+obj+'.cat')
reftbl, refcat = phot.ps1_Tonry(querytbl, obj)
#------------------------------------------------------------
elif refcatname == 'SDSS':
if path_refcat+'/sdss-'+obj+'.cat' not in refcatlist:
querytbl = phot.sdss_query(obj, radeg, dedeg, path_refcat)
else:
querytbl = ascii.read(path_refcat+'/sdss-'+obj+'.cat')
reftbl, refcat = phot.sdss_Blaton(querytbl, obj)
#------------------------------------------------------------
elif refcatname == 'APASS':
if path_refcat+'/apass-'+obj+'.cat' not in refcatlist:
querytbl = phot.apass_query(obj, radeg, dedeg, path_refcat, radius=2.0)
else:
querytbl = ascii.read(path_refcat+'/apass-'+obj+'.cat')
reftbl, refcat = phot.apass_Blaton(querytbl, obj)
#------------------------------------------------------------
elif refcatname == '2MASS':
if path_refcat+'/2mass-'+obj+'.cat' not in refcatlist:
querytbl = phot.twomass_query(obj, radeg, dedeg, path_refcat, band=refmagkey, radius=1.0)
else:
querytbl = ascii.read(path_refcat+'/2mass-'+obj+'.cat')
reftbl, refcat = querytbl, '2mass-'+obj+'.cat'
#------------------------------------------------------------
# SourceEXtractor
#------------------------------------------------------------
peeing, seeing = phot.psfex(inim, pixscale)
param_secom = dict( inim=inim,
gain=gain, pixscale=pixscale, seeing=seeing,
det_sigma=detsig,
backsize=str(64), backfiltersize=str(3),
psf=True, check=False)
# intbl0, incat = phot.secom(**param_secom)
intbl, incat = phot.secom(**param_secom)
'''
# CENTER POS. & DIST CUT
deldist = phot.sqsum((xcent-intbl0['X_IMAGE']), (ycent-intbl0['Y_IMAGE']))
indx_dist = np.where(deldist < np.sqrt(frac)*(xcent+ycent)/2.)
intbl = intbl0[indx_dist]
intbl.write(incat, format='ascii', overwrite=True)
'''
# MATCHING
param_match = dict( intbl=intbl, reftbl=reftbl,
inra=intbl['ALPHA_J2000'], indec=intbl['DELTA_J2000'],
refra=reftbl['ra'], refdec=reftbl['dec'], sep=2.0)
mtbl = phot.matching(**param_match)
#------------------------------------------------------------
# ZEROPOINT CALCULATION
#------------------------------------------------------------
inmagkey = aperture
inmagerkey = '_'.join(['MAGERR', inmagkey.split('_')[1], inmagkey.split('_')[2]])
param_st4zp = dict( intbl=mtbl,
inmagerkey=aperture,
refmagkey=refmagkey,
refmagerkey=refmagerkey,
refmaglower=10,
refmagupper=16,
refmagerupper=0.1,
inmagerupper=0.1)
param_zpcal = dict( intbl=phot.star4zp(**param_st4zp),
inmagkey=inmagkey, inmagerkey=inmagerkey,
refmagkey=refmagkey, refmagerkey=refmagerkey,
sigma=2.0)
zp, zper, otbl, xtbl = phot.zpcal(**param_zpcal)
#------------------------------------------------------------
# ZEROPOINT PLOT
#------------------------------------------------------------
outname = path_base+'/{0}.{1}.zpcal.png'.format(inim[:-5], inmagkey)
phot.zpplot( outname=outname,
otbl=otbl,xtbl=xtbl,
inmagkey=inmagkey, inmagerkey=inmagerkey,
refmagkey=refmagkey, refmagerkey=refmagerkey,
zp=zp, zper=zper)
param_plot = dict( inim = inim,
numb_list = otbl['NUMBER'],
xim_list = otbl['X_IMAGE'],
yim_list = otbl['Y_IMAGE'],
add = True,
numb_addlist= xtbl['NUMBER'],
xim_addlist = xtbl['X_IMAGE'],
yim_addlist = xtbl['Y_IMAGE'])
try:
phot.plotshow(**param_plot)
except:
print('FAIL TO DRAW ZEROPOINT GRAPH')
pass
#------------------------------------------------------------
# TARGET PHOTOMETRY
#------------------------------------------------------------
skymean, skymed, skysig = phot.bkgest_mask(inim)
aper = 2*peeing
ul = phot.limitmag(detsig, zp, aper, skysig)
#------------------------------------------------------------
# ADD HEADER INFO
#------------------------------------------------------------
phot.puthdr(inim, 'SEEING', round(seeing, 3), hdrcomment='SEEING [arcsec]')
phot.puthdr(inim, 'PEEING', round(peeing, 3), hdrcomment='SEEING [pixel]')
phot.puthdr(inim, 'SKYSIG', round(skysig, 3), hdrcomment='SKY SIGMA VALUE')
phot.puthdr(inim, 'SKYVAL', round(skymed, 3), hdrcomment='SKY MEDIAN VALUE')
phot.puthdr(inim, 'OPTZP', round(zp, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(inim, 'OPTZPERR',round(zper, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(inim, 'OPTUL', round(ul, 3), hdrcomment='2*SEEING 3 sigma limit mag')
phot.puthdr(inim, 'STDNUMB',len(otbl), hdrcomment='# OF STD STARS')
#------------------------------------------------------------
# NORMAL PHOTOMETRY
#------------------------------------------------------------
if phottype == 'normal':
intbl['REAL_'+inmagkey] = zp + intbl[inmagkey]
intbl['REAL_'+inmagerkey] = phot.sqsum(zper, intbl[inmagerkey])
indx_targ = phot.targetfind(tra, tdec, intbl['ALPHA_J2000'], intbl['DELTA_J2000'], sep=seeing)
if indx_targ != None:
mag, mager = intbl[indx_targ]['REAL_'+inmagkey], intbl[indx_targ]['REAL_'+inmagerkey]
else:
mag, mager = -99, -99
#------------------------------------------------------------
# SUBTRACTION PHOTOMETRY
#------------------------------------------------------------
elif phottype == 'subt':
subim = 'hd'+inim
phot.puthdr(subim, 'SEEING', round(seeing, 3), hdrcomment='SEEING [arcsec]')
phot.puthdr(subim, 'PEEING', round(peeing, 3), hdrcomment='SEEING [pixel]')
phot.puthdr(subim, 'SKYSIG', round(skysig, 3), hdrcomment='SKY SIGMA VALUE')
phot.puthdr(subim, 'SKYVAL', round(skymed, 3), hdrcomment='SKY MEDIAN VALUE')
phot.puthdr(subim, 'OPTZP', round(zp, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(subim, 'OPTZPERR',round(zper, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(subim, 'OPTUL', round(ul, 3), hdrcomment='2*SEEING 3 sigma limit mag')
phot.puthdr(subim, 'STDNUMB',len(otbl), hdrcomment='# OF STD STARS')
os.system('cp {} {}'.format(inim[:-5]+'.psf', subim[:-5]+'.psf'))
param_subcom = dict( inim=subim,
gain=gain, pixscale=pixscale, seeing=seeing,
det_sigma=3,
backsize=str(64), backfiltersize=str(3),
psf=True, check=False)
subtbl, subcat = phot.secom(**param_subcom)
subtbl['REAL_'+inmagkey] = zp + subtbl[inmagkey]
subtbl['REAL_'+inmagerkey] = phot.sqsum(zper, subtbl[inmagerkey])
indx_targ = phot.targetfind(tra, tdec, subtbl['ALPHA_J2000'], subtbl['DELTA_J2000'], sep=seeing)
if indx_targ != None:
mag, mager = subtbl[indx_targ]['REAL_'+inmagkey], subtbl[indx_targ]['REAL_'+inmagerkey]
else:
mag, mager = -99, -99
#------------------------------------------------------------
# CALC. DEPTH
#------------------------------------------------------------
elif phottype == 'depth':
mag, mager = -99, -99
onetbl = Table([[inim], [obs], [obj], [round(radeg, 3)], [round(dedeg, 3)], [date_obs], [jd], [refmagkey], [len(otbl)], [round(zp, 3)], [round(zper, 3)], [round(seeing, 3)], [round(skymed, 3)], [round(skysig, 3)], [round(ul, 3)], [mag], [mager]],
names=('image', 'obs', 'obj', 'ra', 'dec', 'date-obs', 'jd', 'filter', 'stdnumb', 'zp', 'zper', 'seeing', 'skyval', 'skysig', 'ul', 'mag', 'magerr'))
return onetbl
#============================================================
# USER SETTING
#============================================================
path_base = './'
path_obs = '/home/sonic/Research/table'
path_refcat = '/home/sonic/Research/cat/refcat'
#------------------------------------------------------------
obstbl = ascii.read(path_obs+'/obs.txt')
# TARGET COORD. [deg]
#------------------------------------------------------------
# IMAGES TO PHOTOMETRY
# INPUT FORMAT : Calib-[OBS]-[TARGET]-[DATE]-[TIME]-[BAND]*.fits
#------------------------------------------------------------
kmtnetkey = 'nn'
imlist = glob.glob('a*{}-4*.fits'.format(kmtnetkey))
# imlist.sort()
for inim in glob.glob('a*{}-*.fits'.format(kmtnetkey)):
if '-4' not in inim:
imlist.append(inim)
for img in imlist: print(img)
photlist = []
refcatname = 'APASS'
phottype = 'depth'
starttime = time.time()
#============================================================
# MAIN COMMAND
#============================================================
for inim in imlist:
try:
plt.ioff()
param_phot = dict( inim=inim, refcatname=refcatname, phottype=phottype,
tra=0, tdec=0, path_base='./', aperture='MAG_APER_7',
detsig=3.0, frac=0.9)
photlist.append(phot_routine(**param_phot))
os.system('rm psf*.fits snap*.fits *.xml seg.fits')
except:
pass
#------------------------------------------------------------
# FINISH
#------------------------------------------------------------
if len(photlist) == 0:
print('PHOTOMETRY FAILED!')
else:
photbl = vstack(photlist)
if 'phot-{}.dat'.format(kmtnetkey) in glob.glob(path_base+'/phot-{}.dat'.format(kmtnetkey)):
os.system('mv {} {}'.format(path_base+'/phot-{}.dat'.format(kmtnetkey), path_base+'/phot-{}.dat.bkg'.format(kmtnetkey)))
photbl.write(path_base+'/phot-{}.dat'.format(kmtnetkey), format='ascii', overwrite=True)
deltime = time.time() - starttime
print('All PROCESS IS DONE.\t('+str(round(deltime, 1))+' sec)')
|
# PHOTOMETRY CODE FOR PYTHON 3.X
# 2019.06.20 CREATED BY <NAME>
# 2019.10.06 MODIFIED BY <NAME>
#============================================================
import os, glob
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, vstack
from astropy.io import ascii
from astropy.io import fits
from astropy.time import Time
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.wcs import WCS
from imsng import phot
import time
#============================================================
# FUNCTION
#============================================================
def phot_routine(inim, refcatname, phottype, tra, tdec, path_base='./', aperture='MAG_APER_7', detsig=3.0, frac=0.95):
#------------------------------------------------------------
# HEADER INFO
hdr = fits.getheader(inim)
xcent, ycent= hdr['NAXIS1']/2., hdr['NAXIS2']/2.
try:
w = WCS(inim)
radeg, dedeg= w.all_pix2world(xcent, ycent, 1)
radeg, dedeg= np.asscalar(radeg), np.asscalar(dedeg)
except:
print('BAD WCS INFORMATION?')
radeg,dedeg = hdr['CRVAL1'], hdr['CRVAL2']
#xcent, ycent= w.all_world2pix(radeg, dedeg, 1)
#------------------------------------------------------------
try:
date_obs = hdr['date-obs']
jd = round(Time(date_obs, format='isot', scale='utc').jd, 3)
except:
date_obs = None
jd = None
#------------------------------------------------------------
# NAME INFO
obs, obj = 'KMTNET', inim[:-7]
refmagkey = 'R'
refmagerkey = refmagkey+'err'
indx_obs = np.where(obstbl['obs']==obs)
gain, pixscale = obstbl[indx_obs]['gain'][0], obstbl[indx_obs]['pixelscale'][0]
#------------------------------------------------------------
# REF. CATALOG QUERY
#------------------------------------------------------------
refcatlist = glob.glob(path_refcat+'/*.cat')
#------------------------------------------------------------
if refcatname == 'PS1':
if path_refcat+'/ps1-'+obj+'.cat' not in refcatlist:
querytbl = phot.ps1_query(obj, radeg, dedeg, path_refcat, radius=3.0)
else:
querytbl = ascii.read(path_refcat+'/ps1-'+obj+'.cat')
reftbl, refcat = phot.ps1_Tonry(querytbl, obj)
#------------------------------------------------------------
elif refcatname == 'SDSS':
if path_refcat+'/sdss-'+obj+'.cat' not in refcatlist:
querytbl = phot.sdss_query(obj, radeg, dedeg, path_refcat)
else:
querytbl = ascii.read(path_refcat+'/sdss-'+obj+'.cat')
reftbl, refcat = phot.sdss_Blaton(querytbl, obj)
#------------------------------------------------------------
elif refcatname == 'APASS':
if path_refcat+'/apass-'+obj+'.cat' not in refcatlist:
querytbl = phot.apass_query(obj, radeg, dedeg, path_refcat, radius=2.0)
else:
querytbl = ascii.read(path_refcat+'/apass-'+obj+'.cat')
reftbl, refcat = phot.apass_Blaton(querytbl, obj)
#------------------------------------------------------------
elif refcatname == '2MASS':
if path_refcat+'/2mass-'+obj+'.cat' not in refcatlist:
querytbl = phot.twomass_query(obj, radeg, dedeg, path_refcat, band=refmagkey, radius=1.0)
else:
querytbl = ascii.read(path_refcat+'/2mass-'+obj+'.cat')
reftbl, refcat = querytbl, '2mass-'+obj+'.cat'
#------------------------------------------------------------
# SourceEXtractor
#------------------------------------------------------------
peeing, seeing = phot.psfex(inim, pixscale)
param_secom = dict( inim=inim,
gain=gain, pixscale=pixscale, seeing=seeing,
det_sigma=detsig,
backsize=str(64), backfiltersize=str(3),
psf=True, check=False)
# intbl0, incat = phot.secom(**param_secom)
intbl, incat = phot.secom(**param_secom)
'''
# CENTER POS. & DIST CUT
deldist = phot.sqsum((xcent-intbl0['X_IMAGE']), (ycent-intbl0['Y_IMAGE']))
indx_dist = np.where(deldist < np.sqrt(frac)*(xcent+ycent)/2.)
intbl = intbl0[indx_dist]
intbl.write(incat, format='ascii', overwrite=True)
'''
# MATCHING
param_match = dict( intbl=intbl, reftbl=reftbl,
inra=intbl['ALPHA_J2000'], indec=intbl['DELTA_J2000'],
refra=reftbl['ra'], refdec=reftbl['dec'], sep=2.0)
mtbl = phot.matching(**param_match)
#------------------------------------------------------------
# ZEROPOINT CALCULATION
#------------------------------------------------------------
inmagkey = aperture
inmagerkey = '_'.join(['MAGERR', inmagkey.split('_')[1], inmagkey.split('_')[2]])
param_st4zp = dict( intbl=mtbl,
inmagerkey=aperture,
refmagkey=refmagkey,
refmagerkey=refmagerkey,
refmaglower=10,
refmagupper=16,
refmagerupper=0.1,
inmagerupper=0.1)
param_zpcal = dict( intbl=phot.star4zp(**param_st4zp),
inmagkey=inmagkey, inmagerkey=inmagerkey,
refmagkey=refmagkey, refmagerkey=refmagerkey,
sigma=2.0)
zp, zper, otbl, xtbl = phot.zpcal(**param_zpcal)
#------------------------------------------------------------
# ZEROPOINT PLOT
#------------------------------------------------------------
outname = path_base+'/{0}.{1}.zpcal.png'.format(inim[:-5], inmagkey)
phot.zpplot( outname=outname,
otbl=otbl,xtbl=xtbl,
inmagkey=inmagkey, inmagerkey=inmagerkey,
refmagkey=refmagkey, refmagerkey=refmagerkey,
zp=zp, zper=zper)
param_plot = dict( inim = inim,
numb_list = otbl['NUMBER'],
xim_list = otbl['X_IMAGE'],
yim_list = otbl['Y_IMAGE'],
add = True,
numb_addlist= xtbl['NUMBER'],
xim_addlist = xtbl['X_IMAGE'],
yim_addlist = xtbl['Y_IMAGE'])
try:
phot.plotshow(**param_plot)
except:
print('FAIL TO DRAW ZEROPOINT GRAPH')
pass
#------------------------------------------------------------
# TARGET PHOTOMETRY
#------------------------------------------------------------
skymean, skymed, skysig = phot.bkgest_mask(inim)
aper = 2*peeing
ul = phot.limitmag(detsig, zp, aper, skysig)
#------------------------------------------------------------
# ADD HEADER INFO
#------------------------------------------------------------
phot.puthdr(inim, 'SEEING', round(seeing, 3), hdrcomment='SEEING [arcsec]')
phot.puthdr(inim, 'PEEING', round(peeing, 3), hdrcomment='SEEING [pixel]')
phot.puthdr(inim, 'SKYSIG', round(skysig, 3), hdrcomment='SKY SIGMA VALUE')
phot.puthdr(inim, 'SKYVAL', round(skymed, 3), hdrcomment='SKY MEDIAN VALUE')
phot.puthdr(inim, 'OPTZP', round(zp, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(inim, 'OPTZPERR',round(zper, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(inim, 'OPTUL', round(ul, 3), hdrcomment='2*SEEING 3 sigma limit mag')
phot.puthdr(inim, 'STDNUMB',len(otbl), hdrcomment='# OF STD STARS')
#------------------------------------------------------------
# NORMAL PHOTOMETRY
#------------------------------------------------------------
if phottype == 'normal':
intbl['REAL_'+inmagkey] = zp + intbl[inmagkey]
intbl['REAL_'+inmagerkey] = phot.sqsum(zper, intbl[inmagerkey])
indx_targ = phot.targetfind(tra, tdec, intbl['ALPHA_J2000'], intbl['DELTA_J2000'], sep=seeing)
if indx_targ != None:
mag, mager = intbl[indx_targ]['REAL_'+inmagkey], intbl[indx_targ]['REAL_'+inmagerkey]
else:
mag, mager = -99, -99
#------------------------------------------------------------
# SUBTRACTION PHOTOMETRY
#------------------------------------------------------------
elif phottype == 'subt':
subim = 'hd'+inim
phot.puthdr(subim, 'SEEING', round(seeing, 3), hdrcomment='SEEING [arcsec]')
phot.puthdr(subim, 'PEEING', round(peeing, 3), hdrcomment='SEEING [pixel]')
phot.puthdr(subim, 'SKYSIG', round(skysig, 3), hdrcomment='SKY SIGMA VALUE')
phot.puthdr(subim, 'SKYVAL', round(skymed, 3), hdrcomment='SKY MEDIAN VALUE')
phot.puthdr(subim, 'OPTZP', round(zp, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(subim, 'OPTZPERR',round(zper, 3), hdrcomment='2*SEEING DIAMETER')
phot.puthdr(subim, 'OPTUL', round(ul, 3), hdrcomment='2*SEEING 3 sigma limit mag')
phot.puthdr(subim, 'STDNUMB',len(otbl), hdrcomment='# OF STD STARS')
os.system('cp {} {}'.format(inim[:-5]+'.psf', subim[:-5]+'.psf'))
param_subcom = dict( inim=subim,
gain=gain, pixscale=pixscale, seeing=seeing,
det_sigma=3,
backsize=str(64), backfiltersize=str(3),
psf=True, check=False)
subtbl, subcat = phot.secom(**param_subcom)
subtbl['REAL_'+inmagkey] = zp + subtbl[inmagkey]
subtbl['REAL_'+inmagerkey] = phot.sqsum(zper, subtbl[inmagerkey])
indx_targ = phot.targetfind(tra, tdec, subtbl['ALPHA_J2000'], subtbl['DELTA_J2000'], sep=seeing)
if indx_targ != None:
mag, mager = subtbl[indx_targ]['REAL_'+inmagkey], subtbl[indx_targ]['REAL_'+inmagerkey]
else:
mag, mager = -99, -99
#------------------------------------------------------------
# CALC. DEPTH
#------------------------------------------------------------
elif phottype == 'depth':
mag, mager = -99, -99
onetbl = Table([[inim], [obs], [obj], [round(radeg, 3)], [round(dedeg, 3)], [date_obs], [jd], [refmagkey], [len(otbl)], [round(zp, 3)], [round(zper, 3)], [round(seeing, 3)], [round(skymed, 3)], [round(skysig, 3)], [round(ul, 3)], [mag], [mager]],
names=('image', 'obs', 'obj', 'ra', 'dec', 'date-obs', 'jd', 'filter', 'stdnumb', 'zp', 'zper', 'seeing', 'skyval', 'skysig', 'ul', 'mag', 'magerr'))
return onetbl
#============================================================
# USER SETTING
#============================================================
path_base = './'
path_obs = '/home/sonic/Research/table'
path_refcat = '/home/sonic/Research/cat/refcat'
#------------------------------------------------------------
obstbl = ascii.read(path_obs+'/obs.txt')
# TARGET COORD. [deg]
#------------------------------------------------------------
# IMAGES TO PHOTOMETRY
# INPUT FORMAT : Calib-[OBS]-[TARGET]-[DATE]-[TIME]-[BAND]*.fits
#------------------------------------------------------------
kmtnetkey = 'nn'
imlist = glob.glob('a*{}-4*.fits'.format(kmtnetkey))
# imlist.sort()
for inim in glob.glob('a*{}-*.fits'.format(kmtnetkey)):
if '-4' not in inim:
imlist.append(inim)
for img in imlist: print(img)
photlist = []
refcatname = 'APASS'
phottype = 'depth'
starttime = time.time()
#============================================================
# MAIN COMMAND
#============================================================
for inim in imlist:
try:
plt.ioff()
param_phot = dict( inim=inim, refcatname=refcatname, phottype=phottype,
tra=0, tdec=0, path_base='./', aperture='MAG_APER_7',
detsig=3.0, frac=0.9)
photlist.append(phot_routine(**param_phot))
os.system('rm psf*.fits snap*.fits *.xml seg.fits')
except:
pass
#------------------------------------------------------------
# FINISH
#------------------------------------------------------------
if len(photlist) == 0:
print('PHOTOMETRY FAILED!')
else:
photbl = vstack(photlist)
if 'phot-{}.dat'.format(kmtnetkey) in glob.glob(path_base+'/phot-{}.dat'.format(kmtnetkey)):
os.system('mv {} {}'.format(path_base+'/phot-{}.dat'.format(kmtnetkey), path_base+'/phot-{}.dat.bkg'.format(kmtnetkey)))
photbl.write(path_base+'/phot-{}.dat'.format(kmtnetkey), format='ascii', overwrite=True)
deltime = time.time() - starttime
print('All PROCESS IS DONE.\t('+str(round(deltime, 1))+' sec)')
|
en
| 0.116911
|
# PHOTOMETRY CODE FOR PYTHON 3.X # 2019.06.20 CREATED BY <NAME> # 2019.10.06 MODIFIED BY <NAME> #============================================================ #============================================================ # FUNCTION #============================================================ #------------------------------------------------------------ # HEADER INFO #xcent, ycent= w.all_world2pix(radeg, dedeg, 1) #------------------------------------------------------------ #------------------------------------------------------------ # NAME INFO #------------------------------------------------------------ # REF. CATALOG QUERY #------------------------------------------------------------ #------------------------------------------------------------ #------------------------------------------------------------ #------------------------------------------------------------ #------------------------------------------------------------ #------------------------------------------------------------ # SourceEXtractor #------------------------------------------------------------ # intbl0, incat = phot.secom(**param_secom) # CENTER POS. & DIST CUT deldist = phot.sqsum((xcent-intbl0['X_IMAGE']), (ycent-intbl0['Y_IMAGE'])) indx_dist = np.where(deldist < np.sqrt(frac)*(xcent+ycent)/2.) intbl = intbl0[indx_dist] intbl.write(incat, format='ascii', overwrite=True) # MATCHING #------------------------------------------------------------ # ZEROPOINT CALCULATION #------------------------------------------------------------ #------------------------------------------------------------ # ZEROPOINT PLOT #------------------------------------------------------------ #------------------------------------------------------------ # TARGET PHOTOMETRY #------------------------------------------------------------ #------------------------------------------------------------ # ADD HEADER INFO #------------------------------------------------------------ #------------------------------------------------------------ # NORMAL PHOTOMETRY #------------------------------------------------------------ #------------------------------------------------------------ # SUBTRACTION PHOTOMETRY #------------------------------------------------------------ #------------------------------------------------------------ # CALC. DEPTH #------------------------------------------------------------ #============================================================ # USER SETTING #============================================================ #------------------------------------------------------------ # TARGET COORD. [deg] #------------------------------------------------------------ # IMAGES TO PHOTOMETRY # INPUT FORMAT : Calib-[OBS]-[TARGET]-[DATE]-[TIME]-[BAND]*.fits #------------------------------------------------------------ # imlist.sort() #============================================================ # MAIN COMMAND #============================================================ #------------------------------------------------------------ # FINISH #------------------------------------------------------------
| 2.360709
| 2
|
kvlog/__init__.py
|
magicray/keyvaluestore
| 0
|
6627064
|
<filename>kvlog/__init__.py<gh_stars>0
import json
import urllib.parse
import urllib.request
class Client():
def __init__(self, servers):
self.servers = servers
self.leader = None
def server_list(self):
servers = [self.leader] if self.leader else []
servers.extend(self.servers)
return servers
def state(self):
result = dict()
for ip, port in self.servers:
try:
server = '{}:{}'.format(ip, port)
with urllib.request.urlopen('http://' + server) as r:
result[server] = json.loads(r.read())
except Exception:
pass
return result
def put(self, key, value):
value = value if type(value) is bytes else value.encode()
for ip, port in self.server_list():
try:
url = 'http://{}:{}/{}'.format(ip, port, key)
req = urllib.request.Request(url, data=value)
with urllib.request.urlopen(req) as r:
self.leader = (ip, port)
return dict(status=r.headers['KVLOG_STATUS'],
version=r.headers['KVLOG_VERSION'],
committed=r.headers['KVLOG_COMMITTED'])
except Exception:
pass
def get(self, key):
for ip, port in self.server_list():
try:
url = 'http://{}:{}/{}'.format(ip, port, key)
with urllib.request.urlopen(url) as r:
self.leader = (ip, port)
return dict(key=key, value=r.read(),
lock=r.headers['KVLOG_LOCK'],
version=r.headers['KVLOG_VERSION'])
except Exception:
pass
|
<filename>kvlog/__init__.py<gh_stars>0
import json
import urllib.parse
import urllib.request
class Client():
def __init__(self, servers):
self.servers = servers
self.leader = None
def server_list(self):
servers = [self.leader] if self.leader else []
servers.extend(self.servers)
return servers
def state(self):
result = dict()
for ip, port in self.servers:
try:
server = '{}:{}'.format(ip, port)
with urllib.request.urlopen('http://' + server) as r:
result[server] = json.loads(r.read())
except Exception:
pass
return result
def put(self, key, value):
value = value if type(value) is bytes else value.encode()
for ip, port in self.server_list():
try:
url = 'http://{}:{}/{}'.format(ip, port, key)
req = urllib.request.Request(url, data=value)
with urllib.request.urlopen(req) as r:
self.leader = (ip, port)
return dict(status=r.headers['KVLOG_STATUS'],
version=r.headers['KVLOG_VERSION'],
committed=r.headers['KVLOG_COMMITTED'])
except Exception:
pass
def get(self, key):
for ip, port in self.server_list():
try:
url = 'http://{}:{}/{}'.format(ip, port, key)
with urllib.request.urlopen(url) as r:
self.leader = (ip, port)
return dict(key=key, value=r.read(),
lock=r.headers['KVLOG_LOCK'],
version=r.headers['KVLOG_VERSION'])
except Exception:
pass
|
none
| 1
| 2.742887
| 3
|
|
main.py
|
TeKraft/OwlNightLong
| 0
|
6627065
|
from preprocessing import *
from processingData import *
from postprocessing import *
from saveMap import *
import os
# dataPath = os.path.join('C:\\','Users','s_slim01','Downloads','movebank','movebank','eagle_owl','Eagle owl Reinhard Vohwinkel MPIO','points.shp')
#dataPath = os.path.join('/home','torben','Documents','uni','Master','SS_2018','PyGIS','final_submission','movebank','eagle_owl','Eagle owl Reinhard Vohwinkel MPIO','points.shp')
# dataPath = os.path.join('C:\\', 'Users', 'hans-', \
# 'Documents', 'Master', '2.Semester', 'PythonInGIS', \
# 'FinalAssignment', 'data', 'movebank', 'movebank', 'eagle_owl', 'Eagle owl Reinhard Vohwinkel MPIO', 'points.shp')
# dataPath = os.path.join('C:\\', 'Users', 'pglah', 'Documents', 'movebank', 'movebank', 'eagle_owl', 'Eagle owl Reinhard Vohwinkel MPIO', 'points.shp')
datadir = os.path.join('C:\\','Users','Tanja','Desktop','Master','PythonInGIS')
dataPath = os.path.join(datadir, 'data','points.shp')
rasterPath = os.path.join(datadir, 'OwlNightLong','raster','dtk_reprojected_clipped.tif')
outputPath = os.path.join(datadir, 'OwlNightLong','map.png')
rasterData = openRaster(rasterPath)
shpData = openFile(dataPath,'ESRI Shapefile')
owlIds = getOwlIDs(shpData)
saveMap(shpData,rasterData,outputPath,'Wistia','YlGn')
averageDistanceAllOwls = []
averageDistanceAllOwlsMonth = []
owlIdsSorted = sorted(owlIds, key=lambda x: x)
counter = 0
for owl in owlIdsSorted:
print()
print(owl + ' ' + str(counter+1) + '/' + str(len(owlIdsSorted)))
if (owl != "3897"):
# if (owl == "4046" or owl == "3894"): # use to reduce processing time
singleOwl = owlDistanceAndTime(owl,shpData)
#interval = 3600000 # 60 min => 1000 * 60 * 60 // 1000 = 1 sec
month = 0
while(month < 13):
distancePerHour = calcDistPerHour(singleOwl, month)
xyzHour = distHour(distancePerHour)
# plotAverages(xyz, owl)
if (month == 0):
averageDistanceAllOwls.append((owl, xyzHour))
else:
if ( len(averageDistanceAllOwlsMonth) < month ):
averageDistanceAllOwlsMonth.append([])
averageDistanceAllOwlsMonth[month-1].append((owl, xyzHour))
month += 1
else:
print('no data')
counter += 1
# print(averageDistanceAllOwls)
"""
averageDistanceAllOwls
[
('id', [(avg, hour), (avg, hour), ...]),
('id', [(avg, hour), (avg, hour), ...])
]
"""
avgDistances = adjustEntryPosition(averageDistanceAllOwls)# average for each owl
# xyzAll = prepareXYZDataForPlotting(avgDistances)
# plotAverages(xyzAll, 'OwlNightLong')
hourBased = hourBasedAverageAllOwls(avgDistances)
data = prepareXYDataForPlotting(hourBased)
plotAverages(data, 0)
saveAsCSV(hourBased, 0)
for idx, monthData in enumerate(averageDistanceAllOwlsMonth):
avgDistancesMonth = adjustEntryPosition(monthData)# average for each owl
# xyzAll = prepareXYZDataForPlotting(avgDistances)
# plotAverages(xyzAll, 'OwlNightLong')
hourBasedMonth = hourBasedAverageAllOwls(avgDistancesMonth)
dataMonth = prepareXYDataForPlotting(hourBasedMonth)
plotAverages(dataMonth, idx+1)
saveAsCSV(hourBasedMonth, idx+1)
|
from preprocessing import *
from processingData import *
from postprocessing import *
from saveMap import *
import os
# dataPath = os.path.join('C:\\','Users','s_slim01','Downloads','movebank','movebank','eagle_owl','Eagle owl Reinhard Vohwinkel MPIO','points.shp')
#dataPath = os.path.join('/home','torben','Documents','uni','Master','SS_2018','PyGIS','final_submission','movebank','eagle_owl','Eagle owl Reinhard Vohwinkel MPIO','points.shp')
# dataPath = os.path.join('C:\\', 'Users', 'hans-', \
# 'Documents', 'Master', '2.Semester', 'PythonInGIS', \
# 'FinalAssignment', 'data', 'movebank', 'movebank', 'eagle_owl', 'Eagle owl Reinhard Vohwinkel MPIO', 'points.shp')
# dataPath = os.path.join('C:\\', 'Users', 'pglah', 'Documents', 'movebank', 'movebank', 'eagle_owl', 'Eagle owl Reinhard Vohwinkel MPIO', 'points.shp')
datadir = os.path.join('C:\\','Users','Tanja','Desktop','Master','PythonInGIS')
dataPath = os.path.join(datadir, 'data','points.shp')
rasterPath = os.path.join(datadir, 'OwlNightLong','raster','dtk_reprojected_clipped.tif')
outputPath = os.path.join(datadir, 'OwlNightLong','map.png')
rasterData = openRaster(rasterPath)
shpData = openFile(dataPath,'ESRI Shapefile')
owlIds = getOwlIDs(shpData)
saveMap(shpData,rasterData,outputPath,'Wistia','YlGn')
averageDistanceAllOwls = []
averageDistanceAllOwlsMonth = []
owlIdsSorted = sorted(owlIds, key=lambda x: x)
counter = 0
for owl in owlIdsSorted:
print()
print(owl + ' ' + str(counter+1) + '/' + str(len(owlIdsSorted)))
if (owl != "3897"):
# if (owl == "4046" or owl == "3894"): # use to reduce processing time
singleOwl = owlDistanceAndTime(owl,shpData)
#interval = 3600000 # 60 min => 1000 * 60 * 60 // 1000 = 1 sec
month = 0
while(month < 13):
distancePerHour = calcDistPerHour(singleOwl, month)
xyzHour = distHour(distancePerHour)
# plotAverages(xyz, owl)
if (month == 0):
averageDistanceAllOwls.append((owl, xyzHour))
else:
if ( len(averageDistanceAllOwlsMonth) < month ):
averageDistanceAllOwlsMonth.append([])
averageDistanceAllOwlsMonth[month-1].append((owl, xyzHour))
month += 1
else:
print('no data')
counter += 1
# print(averageDistanceAllOwls)
"""
averageDistanceAllOwls
[
('id', [(avg, hour), (avg, hour), ...]),
('id', [(avg, hour), (avg, hour), ...])
]
"""
avgDistances = adjustEntryPosition(averageDistanceAllOwls)# average for each owl
# xyzAll = prepareXYZDataForPlotting(avgDistances)
# plotAverages(xyzAll, 'OwlNightLong')
hourBased = hourBasedAverageAllOwls(avgDistances)
data = prepareXYDataForPlotting(hourBased)
plotAverages(data, 0)
saveAsCSV(hourBased, 0)
for idx, monthData in enumerate(averageDistanceAllOwlsMonth):
avgDistancesMonth = adjustEntryPosition(monthData)# average for each owl
# xyzAll = prepareXYZDataForPlotting(avgDistances)
# plotAverages(xyzAll, 'OwlNightLong')
hourBasedMonth = hourBasedAverageAllOwls(avgDistancesMonth)
dataMonth = prepareXYDataForPlotting(hourBasedMonth)
plotAverages(dataMonth, idx+1)
saveAsCSV(hourBasedMonth, idx+1)
|
en
| 0.353913
|
# dataPath = os.path.join('C:\\','Users','s_slim01','Downloads','movebank','movebank','eagle_owl','Eagle owl Reinhard Vohwinkel MPIO','points.shp') #dataPath = os.path.join('/home','torben','Documents','uni','Master','SS_2018','PyGIS','final_submission','movebank','eagle_owl','Eagle owl Reinhard Vohwinkel MPIO','points.shp') # dataPath = os.path.join('C:\\', 'Users', 'hans-', \ # 'Documents', 'Master', '2.Semester', 'PythonInGIS', \ # 'FinalAssignment', 'data', 'movebank', 'movebank', 'eagle_owl', 'Eagle owl Reinhard Vohwinkel MPIO', 'points.shp') # dataPath = os.path.join('C:\\', 'Users', 'pglah', 'Documents', 'movebank', 'movebank', 'eagle_owl', 'Eagle owl Reinhard Vohwinkel MPIO', 'points.shp') # if (owl == "4046" or owl == "3894"): # use to reduce processing time #interval = 3600000 # 60 min => 1000 * 60 * 60 // 1000 = 1 sec # plotAverages(xyz, owl) # print(averageDistanceAllOwls) averageDistanceAllOwls
[
('id', [(avg, hour), (avg, hour), ...]),
('id', [(avg, hour), (avg, hour), ...])
] # average for each owl # xyzAll = prepareXYZDataForPlotting(avgDistances) # plotAverages(xyzAll, 'OwlNightLong') # average for each owl # xyzAll = prepareXYZDataForPlotting(avgDistances) # plotAverages(xyzAll, 'OwlNightLong')
| 2.111828
| 2
|
vqgan/config.py
|
davisyoshida/vqgan-haiku
| 0
|
6627066
|
<filename>vqgan/config.py
from collections import namedtuple
import haiku as hk
VQGanConfig = namedtuple('VQGanConfig', [
'learning_rate',
'resolution',
'no_downscale_layers',
'embed_dim',
'n_embed',
'ch_mult',
'num_res_blocks',
'channels',
'temb_channels',
'dropout',
'z_channels',
'out_channels',
'attn_resolutions',
'beta',
'disc_weight',
'disc_start_step',
'codebook_weight',
'l1_weight',
'percep_weight',
'emb_init_scale',
'warmup_steps'
])
DEFAULT_CONFIG = VQGanConfig(
learning_rate=4.5e-6,
resolution=128,
no_downscale_layers=2,
embed_dim=256,
n_embed=1024,
ch_mult=(1, 1, 2, 2, 4),
channels=128,
num_res_blocks=2,
attn_resolutions=[16],
temb_channels=-1,
dropout=0.,
z_channels=256,
out_channels=3,
beta=0.25,
disc_weight=0.25,
disc_start_step=10000,
codebook_weight=1.,
l1_weight=1.,
percep_weight=1.,
emb_init_scale=0.75,
warmup_steps=1000,
)
class ConfigModule(hk.Module):
def __init__(self, config : VQGanConfig, name=None):
super().__init__(name)
self.config = config
|
<filename>vqgan/config.py
from collections import namedtuple
import haiku as hk
VQGanConfig = namedtuple('VQGanConfig', [
'learning_rate',
'resolution',
'no_downscale_layers',
'embed_dim',
'n_embed',
'ch_mult',
'num_res_blocks',
'channels',
'temb_channels',
'dropout',
'z_channels',
'out_channels',
'attn_resolutions',
'beta',
'disc_weight',
'disc_start_step',
'codebook_weight',
'l1_weight',
'percep_weight',
'emb_init_scale',
'warmup_steps'
])
DEFAULT_CONFIG = VQGanConfig(
learning_rate=4.5e-6,
resolution=128,
no_downscale_layers=2,
embed_dim=256,
n_embed=1024,
ch_mult=(1, 1, 2, 2, 4),
channels=128,
num_res_blocks=2,
attn_resolutions=[16],
temb_channels=-1,
dropout=0.,
z_channels=256,
out_channels=3,
beta=0.25,
disc_weight=0.25,
disc_start_step=10000,
codebook_weight=1.,
l1_weight=1.,
percep_weight=1.,
emb_init_scale=0.75,
warmup_steps=1000,
)
class ConfigModule(hk.Module):
def __init__(self, config : VQGanConfig, name=None):
super().__init__(name)
self.config = config
|
none
| 1
| 2.016654
| 2
|
|
tensorflow_datasets/summarization/samsum.py
|
ChAnYaNG97/datasets
| 1
|
6627067
|
<filename>tensorflow_datasets/summarization/samsum.py<gh_stars>1-10
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAMSum dataset."""
import json
import os
from typing import Dict, Iterator, List, Text, Tuple
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{gliwa2019samsum,
title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization},
author={<NAME> <NAME> <NAME> <NAME>},
journal={arXiv preprint arXiv:1911.12237},
year={2019}
}
"""
_DESCRIPTION = """
SAMSum Corpus contains over 16k chat dialogues with manually annotated
summaries.
There are two features:
- dialogue: text of dialogue.
- summary: human written summary of the dialogue.
- id: id of a example.
"""
_DOCUMENT = "dialogue"
_SUMMARY = "summary"
_ID = "id"
class Samsum(tfds.core.GeneratorBasedBuilder):
"""SAMSum dataset builder."""
VERSION = tfds.core.Version("1.0.0")
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
Download https://arxiv.org/src/1911.12237v2/anc/corpus.7z, decompress and
place train.json, val.json and test.json in the manual follder.
"""
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
_DOCUMENT: tfds.features.Text(),
_SUMMARY: tfds.features.Text(),
_ID: tfds.features.Text(),
}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://arxiv.org/src/1911.12237v2/anc",
citation=_CITATION,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> List[tfds.core.SplitGenerator]:
"""Returns SplitGenerators."""
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "train.json")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "val.json")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "test.json")
},
),
]
def _generate_examples(self,
path: Text = None
) -> Iterator[Tuple[Text, Dict[Text, Text]]]:
"""Yields examples."""
with tf.io.gfile.GFile(path, "rb") as f:
for example in json.load(f):
yield example[_ID], example
|
<filename>tensorflow_datasets/summarization/samsum.py<gh_stars>1-10
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SAMSum dataset."""
import json
import os
from typing import Dict, Iterator, List, Text, Tuple
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@article{gliwa2019samsum,
title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization},
author={<NAME> <NAME> <NAME> <NAME>},
journal={arXiv preprint arXiv:1911.12237},
year={2019}
}
"""
_DESCRIPTION = """
SAMSum Corpus contains over 16k chat dialogues with manually annotated
summaries.
There are two features:
- dialogue: text of dialogue.
- summary: human written summary of the dialogue.
- id: id of a example.
"""
_DOCUMENT = "dialogue"
_SUMMARY = "summary"
_ID = "id"
class Samsum(tfds.core.GeneratorBasedBuilder):
"""SAMSum dataset builder."""
VERSION = tfds.core.Version("1.0.0")
MANUAL_DOWNLOAD_INSTRUCTIONS = """\
Download https://arxiv.org/src/1911.12237v2/anc/corpus.7z, decompress and
place train.json, val.json and test.json in the manual follder.
"""
def _info(self) -> tfds.core.DatasetInfo:
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict({
_DOCUMENT: tfds.features.Text(),
_SUMMARY: tfds.features.Text(),
_ID: tfds.features.Text(),
}),
supervised_keys=(_DOCUMENT, _SUMMARY),
homepage="https://arxiv.org/src/1911.12237v2/anc",
citation=_CITATION,
)
def _split_generators(
self, dl_manager: tfds.download.DownloadManager
) -> List[tfds.core.SplitGenerator]:
"""Returns SplitGenerators."""
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "train.json")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "val.json")
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
"path": os.path.join(dl_manager.manual_dir, "test.json")
},
),
]
def _generate_examples(self,
path: Text = None
) -> Iterator[Tuple[Text, Dict[Text, Text]]]:
"""Yields examples."""
with tf.io.gfile.GFile(path, "rb") as f:
for example in json.load(f):
yield example[_ID], example
|
en
| 0.737646
|
# coding=utf-8 # Copyright 2020 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. SAMSum dataset. @article{gliwa2019samsum, title={SAMSum Corpus: A Human-annotated Dialogue Dataset for Abstractive Summarization}, author={<NAME> <NAME> <NAME> <NAME>}, journal={arXiv preprint arXiv:1911.12237}, year={2019} } SAMSum Corpus contains over 16k chat dialogues with manually annotated summaries. There are two features: - dialogue: text of dialogue. - summary: human written summary of the dialogue. - id: id of a example. SAMSum dataset builder. \ Download https://arxiv.org/src/1911.12237v2/anc/corpus.7z, decompress and place train.json, val.json and test.json in the manual follder. Returns SplitGenerators. Yields examples.
| 1.942754
| 2
|
tests/explainers/test_linear.py
|
NunoEdgarGFlowHub/shap
| 1
|
6627068
|
<reponame>NunoEdgarGFlowHub/shap<filename>tests/explainers/test_linear.py
import matplotlib
import numpy as np
matplotlib.use('Agg')
import shap
def test_tied_pair():
np.random.seed(0)
beta = np.array([1, 0, 0])
mu = np.zeros(3)
Sigma = np.array([[1, 0.999999, 0], [0.999999, 1, 0], [0, 0, 1]])
X = np.ones((1,3))
explainer = shap.LinearExplainer((beta, 0), (mu, Sigma), feature_dependence="correlation")
assert np.abs(explainer.shap_values(X) - np.array([0.5, 0.5, 0])).max() < 0.05
def test_tied_triple():
np.random.seed(0)
beta = np.array([0, 1, 0, 0])
mu = 1*np.ones(4)
Sigma = np.array([[1, 0.999999, 0.999999, 0], [0.999999, 1, 0.999999, 0], [0.999999, 0.999999, 1, 0], [0, 0, 0, 1]])
X = 2*np.ones((1,4))
explainer = shap.LinearExplainer((beta, 0), (mu, Sigma), feature_dependence="correlation")
assert explainer.expected_value == 1
assert np.abs(explainer.shap_values(X) - np.array([0.33333, 0.33333, 0.33333, 0])).max() < 0.05
def test_sklearn_linear():
np.random.seed(0)
from sklearn.linear_model import Ridge
import shap
# train linear model
X,y = shap.datasets.boston()
model = Ridge(0.1)
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.LinearExplainer(model, X)
assert np.abs(explainer.expected_value - model.predict(X).mean()) < 1e-6
explainer.shap_values(X)
def test_perfect_colinear():
import shap
from sklearn.linear_model import LinearRegression
import numpy as np
X,y = shap.datasets.boston()
X.iloc[:,0] = X.iloc[:,4] # test duplicated features
X.iloc[:,5] = X.iloc[:,6] - X.iloc[:,6] # test multiple colinear features
X.iloc[:,3] = 0 # test null features
model = LinearRegression()
model.fit(X, y)
explainer = shap.LinearExplainer(model, X, feature_dependence="correlation")
shap_values = explainer.shap_values(X)
assert np.abs(shap_values.sum(1) - model.predict(X) + model.predict(X).mean()).sum() < 1e-7
def test_shape_values_linear_many_features():
from sklearn.linear_model import Ridge
np.random.seed(0)
coef = np.array([1, 2]).T
# generate linear data
X = np.random.normal(1, 10, size=(1000, len(coef)))
y = np.dot(X, coef) + 1 + np.random.normal(scale=0.1, size=1000)
# train linear model
model = Ridge(0.1)
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.LinearExplainer(model, X)
values = explainer.shap_values(X)
assert values.shape == (1000, 2)
expected = (X - X.mean(0)) * coef
np.testing.assert_allclose(expected - values, 0, atol=0.01)
def test_single_feature():
""" Make sure things work with a univariate linear regression.
"""
import sklearn.linear_model
np.random.seed(0)
# generate linear data
X = np.random.normal(1, 10, size=(1000, 1))
y = 2 * X[:, 0] + 1 + np.random.normal(scale=0.1, size=1000)
# train linear model
model = sklearn.linear_model.Ridge(0.1)
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.LinearExplainer(model, X)
shap_values = explainer.shap_values(X)
assert np.abs(explainer.expected_value - model.predict(X).mean()) < 1e-6
assert np.max(np.abs(explainer.expected_value + shap_values.sum(1) - model.predict(X))) < 1e-6
|
import matplotlib
import numpy as np
matplotlib.use('Agg')
import shap
def test_tied_pair():
np.random.seed(0)
beta = np.array([1, 0, 0])
mu = np.zeros(3)
Sigma = np.array([[1, 0.999999, 0], [0.999999, 1, 0], [0, 0, 1]])
X = np.ones((1,3))
explainer = shap.LinearExplainer((beta, 0), (mu, Sigma), feature_dependence="correlation")
assert np.abs(explainer.shap_values(X) - np.array([0.5, 0.5, 0])).max() < 0.05
def test_tied_triple():
np.random.seed(0)
beta = np.array([0, 1, 0, 0])
mu = 1*np.ones(4)
Sigma = np.array([[1, 0.999999, 0.999999, 0], [0.999999, 1, 0.999999, 0], [0.999999, 0.999999, 1, 0], [0, 0, 0, 1]])
X = 2*np.ones((1,4))
explainer = shap.LinearExplainer((beta, 0), (mu, Sigma), feature_dependence="correlation")
assert explainer.expected_value == 1
assert np.abs(explainer.shap_values(X) - np.array([0.33333, 0.33333, 0.33333, 0])).max() < 0.05
def test_sklearn_linear():
np.random.seed(0)
from sklearn.linear_model import Ridge
import shap
# train linear model
X,y = shap.datasets.boston()
model = Ridge(0.1)
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.LinearExplainer(model, X)
assert np.abs(explainer.expected_value - model.predict(X).mean()) < 1e-6
explainer.shap_values(X)
def test_perfect_colinear():
import shap
from sklearn.linear_model import LinearRegression
import numpy as np
X,y = shap.datasets.boston()
X.iloc[:,0] = X.iloc[:,4] # test duplicated features
X.iloc[:,5] = X.iloc[:,6] - X.iloc[:,6] # test multiple colinear features
X.iloc[:,3] = 0 # test null features
model = LinearRegression()
model.fit(X, y)
explainer = shap.LinearExplainer(model, X, feature_dependence="correlation")
shap_values = explainer.shap_values(X)
assert np.abs(shap_values.sum(1) - model.predict(X) + model.predict(X).mean()).sum() < 1e-7
def test_shape_values_linear_many_features():
from sklearn.linear_model import Ridge
np.random.seed(0)
coef = np.array([1, 2]).T
# generate linear data
X = np.random.normal(1, 10, size=(1000, len(coef)))
y = np.dot(X, coef) + 1 + np.random.normal(scale=0.1, size=1000)
# train linear model
model = Ridge(0.1)
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.LinearExplainer(model, X)
values = explainer.shap_values(X)
assert values.shape == (1000, 2)
expected = (X - X.mean(0)) * coef
np.testing.assert_allclose(expected - values, 0, atol=0.01)
def test_single_feature():
""" Make sure things work with a univariate linear regression.
"""
import sklearn.linear_model
np.random.seed(0)
# generate linear data
X = np.random.normal(1, 10, size=(1000, 1))
y = 2 * X[:, 0] + 1 + np.random.normal(scale=0.1, size=1000)
# train linear model
model = sklearn.linear_model.Ridge(0.1)
model.fit(X, y)
# explain the model's predictions using SHAP values
explainer = shap.LinearExplainer(model, X)
shap_values = explainer.shap_values(X)
assert np.abs(explainer.expected_value - model.predict(X).mean()) < 1e-6
assert np.max(np.abs(explainer.expected_value + shap_values.sum(1) - model.predict(X))) < 1e-6
|
en
| 0.627322
|
# train linear model # explain the model's predictions using SHAP values # test duplicated features # test multiple colinear features # test null features # generate linear data # train linear model # explain the model's predictions using SHAP values Make sure things work with a univariate linear regression. # generate linear data # train linear model # explain the model's predictions using SHAP values
| 2.79897
| 3
|
support/retro_contest/agent.py
|
hermesdt/retro-contest
| 51
|
6627069
|
import argparse
import gym_remote.exceptions as gre
import gym_remote.client as grc
import os
import sys
import traceback
from pkg_resources import EntryPoint
def make(socketdir='tmp/sock'):
env = grc.RemoteEnv(socketdir)
return env
def run(agent=None, socketdir='tmp/sock', daemonize=False, args=[]):
if daemonize:
pid = os.fork()
if pid > 0:
return
if agent is None:
print('Running agent: random_agent')
agent = random_agent
elif not callable(agent):
print('Running agent: %s' % agent)
entrypoint = EntryPoint.parse('entry=' + agent)
agent = entrypoint.load(False)
else:
print('Running agent: %r' % agent)
env = make(socketdir)
try:
agent(env, *args)
except gre.GymRemoteError:
pass
def random_agent(env, *args):
env.reset()
while True:
action = env.action_space.sample()
try:
ob, reward, done, _ = env.step(action)
except gre.ResetError:
done = True
if done:
env.reset()
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Run support code for OpenAI Retro Contest remote environment')
parser.add_argument('--daemonize', '-d', action='store_true', default=False, help='Daemonize (background) the process')
parser.add_argument('entry', type=str, nargs='?', help='Entry point to create an agent')
parser.add_argument('args', nargs='*', help='Optional arguments to the agent')
args = parser.parse_args(argv)
run(agent=args.entry, daemonize=args.daemonize, args=args.args)
if __name__ == '__main__':
main()
|
import argparse
import gym_remote.exceptions as gre
import gym_remote.client as grc
import os
import sys
import traceback
from pkg_resources import EntryPoint
def make(socketdir='tmp/sock'):
env = grc.RemoteEnv(socketdir)
return env
def run(agent=None, socketdir='tmp/sock', daemonize=False, args=[]):
if daemonize:
pid = os.fork()
if pid > 0:
return
if agent is None:
print('Running agent: random_agent')
agent = random_agent
elif not callable(agent):
print('Running agent: %s' % agent)
entrypoint = EntryPoint.parse('entry=' + agent)
agent = entrypoint.load(False)
else:
print('Running agent: %r' % agent)
env = make(socketdir)
try:
agent(env, *args)
except gre.GymRemoteError:
pass
def random_agent(env, *args):
env.reset()
while True:
action = env.action_space.sample()
try:
ob, reward, done, _ = env.step(action)
except gre.ResetError:
done = True
if done:
env.reset()
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(description='Run support code for OpenAI Retro Contest remote environment')
parser.add_argument('--daemonize', '-d', action='store_true', default=False, help='Daemonize (background) the process')
parser.add_argument('entry', type=str, nargs='?', help='Entry point to create an agent')
parser.add_argument('args', nargs='*', help='Optional arguments to the agent')
args = parser.parse_args(argv)
run(agent=args.entry, daemonize=args.daemonize, args=args.args)
if __name__ == '__main__':
main()
|
none
| 1
| 2.091244
| 2
|
|
gpuexperiments/occupancy_dyn.py
|
hughperkins/gpu-experiments
| 2
|
6627070
|
<filename>gpuexperiments/occupancy_dyn.py
"""
Try using dynamic shared memory, see if gets optimized away, or affects occupancy
"""
from __future__ import print_function, division
import os
from os.path import join
import time
import string
import jinja2
import numpy as np
import pyopencl as cl
import subprocess
from gpuexperiments.callkernel import call_cl_kernel
from gpuexperiments.timecheck import inittime, timecheck
import lib_clgpuexp
from lib_clgpuexp import clearComputeCache, getPtx, timeKernel, buildKernel, initClGpu
from lib_clgpuexp import dumpSass
code_template = r"""
kernel void {{name}} (global float *data, global float *out{% if shared %}, local float *F{% endif %}) {
{% for j in range(ilp) %}
float a{{j}} = data[{{j}}];
{% endfor %}
float b = data[0];
float c = data[1];
#pragma unroll 256
for(int i = 0; i < {{its / ilp}}; i++) {
{% for j in range(ilp) %}
{% if fma %}
a{{j}} = fma(a{{j}}, b, c);
{% else %}
a{{j}} = a{{j}} * b + c;
{% endif %}
{% endfor %}
}
float a = 0.0f;
{% for j in range(ilp) %}
a += a{{j}};
{% endfor %}
out[0] = a;
}
"""
experiments = [
{'name': 'k1_g{grid}_b{block}_s{shared}', 'code': code_template, 'options': '', 'template_args': {'fma': True, 'ilp': 1}, 'block': 32, 'grid': 1024},
{'name': 'k1_g{grid}_b{block}_s{shared}', 'code': code_template, 'options': '', 'template_args': {'fma': True, 'ilp': 1}, 'block': 64, 'grid': 1024}
]
initClGpu()
compute_units = lib_clgpuexp.device.get_info(cl.device_info.MAX_COMPUTE_UNITS)
maxShared = lib_clgpuexp.device.get_info(cl.device_info.LOCAL_MEM_SIZE) // 1024
compute_capability = (
lib_clgpuexp.device.get_info(cl.device_info.COMPUTE_CAPABILITY_MAJOR_NV),
lib_clgpuexp.device.get_info(cl.device_info.COMPUTE_CAPABILITY_MINOR_NV)
)
deviceName = lib_clgpuexp.device.get_info(cl.device_info.NAME)
deviceNameSimple = deviceName.replace('GeForce', '').replace('GTX', '').strip().replace(' ', '').lower()
print('deviceName', deviceName, 'compute capability', compute_capability)
print('compute units', compute_units, 'max shared memory', maxShared)
shared_memory_per_sm = None
# data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
if compute_capability[0] == 5:
if compute_capability[1] == 0:
shared_memory_per_sm = 65536
elif compute_capability[1] == 2:
shared_memory_per_sm = 98304
else:
raise Exception('compute capability %s not recognized' % compute_capability)
else:
raise Exception('compute capability %s not recognized' % compute_capability)
assert shared_memory_per_sm is not None
times = []
for experiment in experiments:
template = jinja2.Template(experiment['code'], undefined=jinja2.StrictUndefined)
#for block in range(128,1024+128,128):
block = experiment['block']
grid = experiment['grid'] # 1024
ilp = experiment['template_args']['ilp']
for shared in range(0, maxShared, 4):
if shared == 0:
occupancy = 32
else:
occupancy = 64 // shared
its = 100000000 // grid * 32 // block
its *= compute_units
if grid == 1: # modify its, since it will only run on one sm
its = its // compute_units
its = its * occupancy // 32
its = (its // 256 // ilp) * 256 * ilp
name = experiment['name'].format(shared=shared, grid=grid, block=block)
clearComputeCache()
add_args = []
if shared > 0:
add_args.append(cl.LocalMemory(shared*1024))
source = template.render(name=name, its=its, type='float', shared=shared > 0, **experiment['template_args'])
# print('source', source)
try:
kernel = buildKernel(name, source, options=experiment['options'])
for it in range(3):
t = timeKernel(name, kernel, grid_x=grid, block_x=block, add_args=add_args)
except Exception as e:
print(e)
break
flops = its * block / (t/1000) * 2 * grid
times.append({'name': name, 'time': t, 'flops': flops})
print(getPtx(name))
dumpSass(name)
# try varying occupancy, rather than varying shared memory
# assume shared memory per sm = 65536 bytes (as per sm5.0)
# assume full occupancy is 16 blocks per sm, but I'm not sure why...
full_occupancy_bsm = 32
X = np.arange(2, full_occupancy_bsm + 2, 2)
Y = np.zeros((X.shape[0]), dtype=np.float32)
# for blocks_per_sm in range(2, full_occupancy_bsm + 2, 2):
i = 0
for blocks_per_sm in X:
shared_bytes = shared_memory_per_sm // blocks_per_sm
shared_bytes = (shared_bytes // 256) * 256
print('occupancy', occupancy)
print('shared_bytes', shared_bytes)
if shared_bytes >= maxShared * 1024:
print('exceeds maximum block local memory => skipping')
continue
actual_blocks_per_sm = shared_memory_per_sm // shared_bytes
occupancy = actual_blocks_per_sm / full_occupancy_bsm * 100
template = jinja2.Template(code_template, undefined=jinja2.StrictUndefined)
block = 32
grid = 1024
ilp = 1
its = 100000000 // grid * 32 // block * compute_units
its = (its // 256 // ilp) * 256 * ilp
its = its * blocks_per_sm // full_occupancy_bsm
name = 'kernel_bsm{bsm}'.format(bsm=blocks_per_sm)
clearComputeCache()
add_args = []
if shared_bytes > 0:
add_args.append(cl.LocalMemory(shared_bytes))
source = template.render(name=name, its=its, type='float', shared=shared_bytes > 0, fma=True, ilp=1)
try:
kernel = buildKernel(name, source)
for it in range(3):
t = timeKernel(name, kernel, grid_x=grid, block_x=block, add_args=add_args)
# print(getPtx(name))
except Exception as e:
print(e)
break
flops = its * block / (t/1000) * 2 * grid
Y[i] = flops / 1000 / 1000 / 1000
times.append({'name': name, 'time': t, 'flops': flops})
i += 1
f = open('/tmp/occupancy_dyn_%s.tsv' % deviceNameSimple, 'w')
line = 'name\ttot ms\tgflops'
print(line)
f.write(line + '\n')
for time_info in times:
line = '%s\t%.1f\t%.0f' % (time_info['name'], time_info['time'], time_info.get('flops', '') / 1000 / 1000 / 1000)
print(line)
f.write(line + '\n')
f.close()
|
<filename>gpuexperiments/occupancy_dyn.py
"""
Try using dynamic shared memory, see if gets optimized away, or affects occupancy
"""
from __future__ import print_function, division
import os
from os.path import join
import time
import string
import jinja2
import numpy as np
import pyopencl as cl
import subprocess
from gpuexperiments.callkernel import call_cl_kernel
from gpuexperiments.timecheck import inittime, timecheck
import lib_clgpuexp
from lib_clgpuexp import clearComputeCache, getPtx, timeKernel, buildKernel, initClGpu
from lib_clgpuexp import dumpSass
code_template = r"""
kernel void {{name}} (global float *data, global float *out{% if shared %}, local float *F{% endif %}) {
{% for j in range(ilp) %}
float a{{j}} = data[{{j}}];
{% endfor %}
float b = data[0];
float c = data[1];
#pragma unroll 256
for(int i = 0; i < {{its / ilp}}; i++) {
{% for j in range(ilp) %}
{% if fma %}
a{{j}} = fma(a{{j}}, b, c);
{% else %}
a{{j}} = a{{j}} * b + c;
{% endif %}
{% endfor %}
}
float a = 0.0f;
{% for j in range(ilp) %}
a += a{{j}};
{% endfor %}
out[0] = a;
}
"""
experiments = [
{'name': 'k1_g{grid}_b{block}_s{shared}', 'code': code_template, 'options': '', 'template_args': {'fma': True, 'ilp': 1}, 'block': 32, 'grid': 1024},
{'name': 'k1_g{grid}_b{block}_s{shared}', 'code': code_template, 'options': '', 'template_args': {'fma': True, 'ilp': 1}, 'block': 64, 'grid': 1024}
]
initClGpu()
compute_units = lib_clgpuexp.device.get_info(cl.device_info.MAX_COMPUTE_UNITS)
maxShared = lib_clgpuexp.device.get_info(cl.device_info.LOCAL_MEM_SIZE) // 1024
compute_capability = (
lib_clgpuexp.device.get_info(cl.device_info.COMPUTE_CAPABILITY_MAJOR_NV),
lib_clgpuexp.device.get_info(cl.device_info.COMPUTE_CAPABILITY_MINOR_NV)
)
deviceName = lib_clgpuexp.device.get_info(cl.device_info.NAME)
deviceNameSimple = deviceName.replace('GeForce', '').replace('GTX', '').strip().replace(' ', '').lower()
print('deviceName', deviceName, 'compute capability', compute_capability)
print('compute units', compute_units, 'max shared memory', maxShared)
shared_memory_per_sm = None
# data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls
if compute_capability[0] == 5:
if compute_capability[1] == 0:
shared_memory_per_sm = 65536
elif compute_capability[1] == 2:
shared_memory_per_sm = 98304
else:
raise Exception('compute capability %s not recognized' % compute_capability)
else:
raise Exception('compute capability %s not recognized' % compute_capability)
assert shared_memory_per_sm is not None
times = []
for experiment in experiments:
template = jinja2.Template(experiment['code'], undefined=jinja2.StrictUndefined)
#for block in range(128,1024+128,128):
block = experiment['block']
grid = experiment['grid'] # 1024
ilp = experiment['template_args']['ilp']
for shared in range(0, maxShared, 4):
if shared == 0:
occupancy = 32
else:
occupancy = 64 // shared
its = 100000000 // grid * 32 // block
its *= compute_units
if grid == 1: # modify its, since it will only run on one sm
its = its // compute_units
its = its * occupancy // 32
its = (its // 256 // ilp) * 256 * ilp
name = experiment['name'].format(shared=shared, grid=grid, block=block)
clearComputeCache()
add_args = []
if shared > 0:
add_args.append(cl.LocalMemory(shared*1024))
source = template.render(name=name, its=its, type='float', shared=shared > 0, **experiment['template_args'])
# print('source', source)
try:
kernel = buildKernel(name, source, options=experiment['options'])
for it in range(3):
t = timeKernel(name, kernel, grid_x=grid, block_x=block, add_args=add_args)
except Exception as e:
print(e)
break
flops = its * block / (t/1000) * 2 * grid
times.append({'name': name, 'time': t, 'flops': flops})
print(getPtx(name))
dumpSass(name)
# try varying occupancy, rather than varying shared memory
# assume shared memory per sm = 65536 bytes (as per sm5.0)
# assume full occupancy is 16 blocks per sm, but I'm not sure why...
full_occupancy_bsm = 32
X = np.arange(2, full_occupancy_bsm + 2, 2)
Y = np.zeros((X.shape[0]), dtype=np.float32)
# for blocks_per_sm in range(2, full_occupancy_bsm + 2, 2):
i = 0
for blocks_per_sm in X:
shared_bytes = shared_memory_per_sm // blocks_per_sm
shared_bytes = (shared_bytes // 256) * 256
print('occupancy', occupancy)
print('shared_bytes', shared_bytes)
if shared_bytes >= maxShared * 1024:
print('exceeds maximum block local memory => skipping')
continue
actual_blocks_per_sm = shared_memory_per_sm // shared_bytes
occupancy = actual_blocks_per_sm / full_occupancy_bsm * 100
template = jinja2.Template(code_template, undefined=jinja2.StrictUndefined)
block = 32
grid = 1024
ilp = 1
its = 100000000 // grid * 32 // block * compute_units
its = (its // 256 // ilp) * 256 * ilp
its = its * blocks_per_sm // full_occupancy_bsm
name = 'kernel_bsm{bsm}'.format(bsm=blocks_per_sm)
clearComputeCache()
add_args = []
if shared_bytes > 0:
add_args.append(cl.LocalMemory(shared_bytes))
source = template.render(name=name, its=its, type='float', shared=shared_bytes > 0, fma=True, ilp=1)
try:
kernel = buildKernel(name, source)
for it in range(3):
t = timeKernel(name, kernel, grid_x=grid, block_x=block, add_args=add_args)
# print(getPtx(name))
except Exception as e:
print(e)
break
flops = its * block / (t/1000) * 2 * grid
Y[i] = flops / 1000 / 1000 / 1000
times.append({'name': name, 'time': t, 'flops': flops})
i += 1
f = open('/tmp/occupancy_dyn_%s.tsv' % deviceNameSimple, 'w')
line = 'name\ttot ms\tgflops'
print(line)
f.write(line + '\n')
for time_info in times:
line = '%s\t%.1f\t%.0f' % (time_info['name'], time_info['time'], time_info.get('flops', '') / 1000 / 1000 / 1000)
print(line)
f.write(line + '\n')
f.close()
|
en
| 0.465757
|
Try using dynamic shared memory, see if gets optimized away, or affects occupancy kernel void {{name}} (global float *data, global float *out{% if shared %}, local float *F{% endif %}) { {% for j in range(ilp) %} float a{{j}} = data[{{j}}]; {% endfor %} float b = data[0]; float c = data[1]; #pragma unroll 256 for(int i = 0; i < {{its / ilp}}; i++) { {% for j in range(ilp) %} {% if fma %} a{{j}} = fma(a{{j}}, b, c); {% else %} a{{j}} = a{{j}} * b + c; {% endif %} {% endfor %} } float a = 0.0f; {% for j in range(ilp) %} a += a{{j}}; {% endfor %} out[0] = a; } # data comes from http://developer.download.nvidia.com/compute/cuda/CUDA_Occupancy_calculator.xls #for block in range(128,1024+128,128): # 1024 # modify its, since it will only run on one sm # print('source', source) # try varying occupancy, rather than varying shared memory # assume shared memory per sm = 65536 bytes (as per sm5.0) # assume full occupancy is 16 blocks per sm, but I'm not sure why... # for blocks_per_sm in range(2, full_occupancy_bsm + 2, 2): # print(getPtx(name))
| 1.766144
| 2
|
python/DL_for_HTT/common/model_inputs/GENleg_with_METcov_j1j2jr_Nnu_Npu.py
|
lucastorterotot/DL_for_HTT_mass
| 1
|
6627071
|
inputs = [
"leg1_pt_gen",
"leg1_eta_gen",
"leg1_phi_gen",
"leg2_pt_gen",
"leg2_eta_gen",
"leg2_phi_gen",
"jet1_pt_reco",
"jet1_eta_reco",
"jet1_phi_reco",
"jet2_pt_reco",
"jet2_eta_reco",
"jet2_phi_reco",
"remaining_jets_pt_reco",
"remaining_jets_eta_reco",
"remaining_jets_phi_reco",
"remaining_jets_N_reco",
"MET_pt_gen",
"MET_phi_gen",
"MET_covXX_reco",
"MET_covXY_reco",
"MET_covYY_reco",
# "MET_significance_reco",
"mT1_gen",
"mT2_gen",
"mTtt_gen",
"mTtot_gen",
"PU_npvsGood_reco",
"N_neutrinos_reco",
]
|
inputs = [
"leg1_pt_gen",
"leg1_eta_gen",
"leg1_phi_gen",
"leg2_pt_gen",
"leg2_eta_gen",
"leg2_phi_gen",
"jet1_pt_reco",
"jet1_eta_reco",
"jet1_phi_reco",
"jet2_pt_reco",
"jet2_eta_reco",
"jet2_phi_reco",
"remaining_jets_pt_reco",
"remaining_jets_eta_reco",
"remaining_jets_phi_reco",
"remaining_jets_N_reco",
"MET_pt_gen",
"MET_phi_gen",
"MET_covXX_reco",
"MET_covXY_reco",
"MET_covYY_reco",
# "MET_significance_reco",
"mT1_gen",
"mT2_gen",
"mTtt_gen",
"mTtot_gen",
"PU_npvsGood_reco",
"N_neutrinos_reco",
]
|
en
| 0.349516
|
# "MET_significance_reco",
| 1.209551
| 1
|
accounts/models.py
|
joshgoshbgosh/ccs-final-project
| 1
|
6627072
|
<reponame>joshgoshbgosh/ccs-final-project
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
username = models.CharField(max_length=255)
class Profile(models.Model):
# https://docs.djangoproject.com/en/3.1/topics/db/examples/one_to_one/#one-to-one-relationships
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="profile", blank=True)
phone_number = models.CharField(max_length=12, blank=True)
def __str__(self):
return self.user.username
|
from django.db import models
from django.conf import settings
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
username = models.CharField(max_length=255)
class Profile(models.Model):
# https://docs.djangoproject.com/en/3.1/topics/db/examples/one_to_one/#one-to-one-relationships
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name="profile", blank=True)
phone_number = models.CharField(max_length=12, blank=True)
def __str__(self):
return self.user.username
|
en
| 0.740046
|
# https://docs.djangoproject.com/en/3.1/topics/db/examples/one_to_one/#one-to-one-relationships
| 2.500115
| 3
|
qiskit_acqua/ising/testgraphpartition.py
|
adcorcol/qiskit-acqua
| 1
|
6627073
|
<reponame>adcorcol/qiskit-acqua
from qiskit_acqua import Operator, run_algorithm, get_algorithm_instance
from qiskit_acqua.input import get_input_instance
from qiskit_acqua.ising import graphpartition
import numpy as np
# w = maxcut.parse_gset_format('sample.maxcut')
# qubitOp, offset = maxcut.get_maxcut_qubitops(w)
# algo_input = get_input_instance('EnergyInput')
# algo_input.qubit_op = qubitOp
algo_input = get_input_instance('EnergyInput')
if True:
np.random.seed(100)
w = graphpartition.random_graph(4, edge_prob=0.8, weight_range=10)
qubitOp, offset = graphpartition.get_graphpartition_qubitops(w)
algo_input.qubit_op = qubitOp
print(w)
to_be_tested_algos = ['ExactEigensolver', 'CPLEX', 'VQE']
operational_algos = []
for algo in to_be_tested_algos:
try:
get_algorithm_instance(algo)
operational_algos.append(algo)
except:
print("{} is unavailable, please check your setting.".format(algo))
print(operational_algos)
if 'ExactEigensolver' not in operational_algos:
print("ExactEigensolver is not in operational algorithms.")
else:
algorithm_cfg = {
'name': 'ExactEigensolver',
}
params = {
'problem': {'name': 'ising'},
'algorithm': algorithm_cfg
}
result = run_algorithm(params,algo_input)
# print('objective function:', maxcut.maxcut_obj(result, offset))
x = graphpartition.sample_most_likely(result['eigvecs'][0])
print('solution:', graphpartition.get_graph_solution(x))
print('solution objective:', graphpartition.objective_value(x, w))
# brute-force way.
def bitfield(n, L):
result = np.binary_repr(n, L)
return [int(digit) for digit in result] # [2:] to chop off the "0b" part
L = len(x)
max = 2**len(x)
minimal_conf = None
minimal_v = np.inf
for i in range(max):
cur = bitfield(i, L)
how_many_nonzero = np.count_nonzero(cur)
if how_many_nonzero *2 != L: # not balanced
continue
cur_v = graphpartition.objective_value(np.array(cur), w)
if cur_v < minimal_v:
minimal_v = cur_v
minimal_conf = cur
print("minimal assigment:", minimal_conf)
print("minimal objective", minimal_v)
# if 'VQE' not in operational_algos:
# print("VQE is not in operational algorithms.")
# else:
# algorithm_cfg = {
# 'name': 'VQE',
# 'operator_mode': 'matrix'
# }
#
# optimizer_cfg = {
# 'name': 'L_BFGS_B',
# 'maxfun': 6000
# }
#
# var_form_cfg = {
# 'name': 'RYRZ',
# 'depth': 3,
# 'entanglement': 'linear'
# }
#
# params = {
# 'problem': {'name': 'ising'},
# 'algorithm': algorithm_cfg,
# 'optimizer': optimizer_cfg,
# 'variational_form': var_form_cfg,
# 'backend': {'name': 'local_statevector_simulator'}
# }
#
# result = run_algorithm(params,algo_input)
#
# x = maxcut.sample_most_likely(len(w), result['eigvecs'][0])
# print('energy:', result['energy'])
# print('time:', result['eval_time'])
# print('maxcut objective:', result['energy'] + offset)
# print('solution:', maxcut.get_graph_solution(x))
# print('solution objective:', maxcut.maxcut_value(x, w))
|
from qiskit_acqua import Operator, run_algorithm, get_algorithm_instance
from qiskit_acqua.input import get_input_instance
from qiskit_acqua.ising import graphpartition
import numpy as np
# w = maxcut.parse_gset_format('sample.maxcut')
# qubitOp, offset = maxcut.get_maxcut_qubitops(w)
# algo_input = get_input_instance('EnergyInput')
# algo_input.qubit_op = qubitOp
algo_input = get_input_instance('EnergyInput')
if True:
np.random.seed(100)
w = graphpartition.random_graph(4, edge_prob=0.8, weight_range=10)
qubitOp, offset = graphpartition.get_graphpartition_qubitops(w)
algo_input.qubit_op = qubitOp
print(w)
to_be_tested_algos = ['ExactEigensolver', 'CPLEX', 'VQE']
operational_algos = []
for algo in to_be_tested_algos:
try:
get_algorithm_instance(algo)
operational_algos.append(algo)
except:
print("{} is unavailable, please check your setting.".format(algo))
print(operational_algos)
if 'ExactEigensolver' not in operational_algos:
print("ExactEigensolver is not in operational algorithms.")
else:
algorithm_cfg = {
'name': 'ExactEigensolver',
}
params = {
'problem': {'name': 'ising'},
'algorithm': algorithm_cfg
}
result = run_algorithm(params,algo_input)
# print('objective function:', maxcut.maxcut_obj(result, offset))
x = graphpartition.sample_most_likely(result['eigvecs'][0])
print('solution:', graphpartition.get_graph_solution(x))
print('solution objective:', graphpartition.objective_value(x, w))
# brute-force way.
def bitfield(n, L):
result = np.binary_repr(n, L)
return [int(digit) for digit in result] # [2:] to chop off the "0b" part
L = len(x)
max = 2**len(x)
minimal_conf = None
minimal_v = np.inf
for i in range(max):
cur = bitfield(i, L)
how_many_nonzero = np.count_nonzero(cur)
if how_many_nonzero *2 != L: # not balanced
continue
cur_v = graphpartition.objective_value(np.array(cur), w)
if cur_v < minimal_v:
minimal_v = cur_v
minimal_conf = cur
print("minimal assigment:", minimal_conf)
print("minimal objective", minimal_v)
# if 'VQE' not in operational_algos:
# print("VQE is not in operational algorithms.")
# else:
# algorithm_cfg = {
# 'name': 'VQE',
# 'operator_mode': 'matrix'
# }
#
# optimizer_cfg = {
# 'name': 'L_BFGS_B',
# 'maxfun': 6000
# }
#
# var_form_cfg = {
# 'name': 'RYRZ',
# 'depth': 3,
# 'entanglement': 'linear'
# }
#
# params = {
# 'problem': {'name': 'ising'},
# 'algorithm': algorithm_cfg,
# 'optimizer': optimizer_cfg,
# 'variational_form': var_form_cfg,
# 'backend': {'name': 'local_statevector_simulator'}
# }
#
# result = run_algorithm(params,algo_input)
#
# x = maxcut.sample_most_likely(len(w), result['eigvecs'][0])
# print('energy:', result['energy'])
# print('time:', result['eval_time'])
# print('maxcut objective:', result['energy'] + offset)
# print('solution:', maxcut.get_graph_solution(x))
# print('solution objective:', maxcut.maxcut_value(x, w))
|
en
| 0.203574
|
# w = maxcut.parse_gset_format('sample.maxcut') # qubitOp, offset = maxcut.get_maxcut_qubitops(w) # algo_input = get_input_instance('EnergyInput') # algo_input.qubit_op = qubitOp # print('objective function:', maxcut.maxcut_obj(result, offset)) # brute-force way. # [2:] to chop off the "0b" part # not balanced # if 'VQE' not in operational_algos: # print("VQE is not in operational algorithms.") # else: # algorithm_cfg = { # 'name': 'VQE', # 'operator_mode': 'matrix' # } # # optimizer_cfg = { # 'name': 'L_BFGS_B', # 'maxfun': 6000 # } # # var_form_cfg = { # 'name': 'RYRZ', # 'depth': 3, # 'entanglement': 'linear' # } # # params = { # 'problem': {'name': 'ising'}, # 'algorithm': algorithm_cfg, # 'optimizer': optimizer_cfg, # 'variational_form': var_form_cfg, # 'backend': {'name': 'local_statevector_simulator'} # } # # result = run_algorithm(params,algo_input) # # x = maxcut.sample_most_likely(len(w), result['eigvecs'][0]) # print('energy:', result['energy']) # print('time:', result['eval_time']) # print('maxcut objective:', result['energy'] + offset) # print('solution:', maxcut.get_graph_solution(x)) # print('solution objective:', maxcut.maxcut_value(x, w))
| 2.144346
| 2
|
utils/nn/modules/attention.py
|
roshanr11/Research-DCST
| 5
|
6627074
|
<gh_stars>1-10
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class BiAAttention(nn.Module):
'''
Bi-Affine attention layer.
'''
def __init__(self, input_size_encoder, input_size_decoder, num_labels, biaffine=True, **kwargs):
'''
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
'''
super(BiAAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.input_size_decoder))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.input_size_encoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W_d)
nn.init.xavier_uniform_(self.W_e)
nn.init.constant_(self.b, 0.)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
'''
Args:
input_d: Tensor
the decoder input tensor with shape = [batch_size, length_decoder, input_size]
input_e: Tensor
the child input tensor with shape = [batch_size, length_encoder, input_size]
mask_d: Tensor or None
the mask tensor for decoder with shape = [batch_size, length_decoder]
mask_e: Tensor or None
the mask tensor for encoder with shape = [batch_size, length_encoder]
Returns: Tensor
the energy tensor with shape = [batch_size, num_label, length, length]
'''
assert input_d.size(0) == input_e.size(0), 'batch sizes of encoder and decoder are requires to be equal.'
batch_size, length_decoder, _ = input_d.size()
_, length_encoder, _ = input_e.size()
# compute decoder part: [num_label, input_size_decoder] * [batch_size, input_size_decoder, length_decoder]
# the output shape is [batch_size, num_label, length_decoder]
out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3)
# compute decoder part: [num_label, input_size_encoder] * [batch_size, input_size_encoder, length_encoder]
# the output shape is [batch_size, num_label, length_encoder]
out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2)
# output shape [batch_size, num_label, length_decoder, length_encoder]
if self.biaffine:
# compute bi-affine part
# [batch_size, 1, length_decoder, input_size_decoder] * [num_labels, input_size_decoder, input_size_encoder]
# output shape [batch_size, num_label, length_decoder, input_size_encoder]
output = torch.matmul(input_d.unsqueeze(1), self.U)
# [batch_size, num_label, length_decoder, input_size_encoder] * [batch_size, 1, input_size_encoder, length_encoder]
# output shape [batch_size, num_label, length_decoder, length_encoder]
output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3))
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3) * mask_e.unsqueeze(1).unsqueeze(2)
return output
class ConcatAttention(nn.Module):
'''
Concatenate attention layer.
'''
# TODO test it!
def __init__(self, input_size_encoder, input_size_decoder, hidden_size, num_labels, **kwargs):
'''
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
hidden_size: int
the dimension of the hidden.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
'''
super(ConcatAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.hidden_size = hidden_size
self.num_labels = num_labels
self.W_d = Parameter(torch.Tensor(self.input_size_decoder, self.hidden_size))
self.W_e = Parameter(torch.Tensor(self.input_size_encoder, self.hidden_size))
self.b = Parameter(torch.Tensor(self.hidden_size))
self.v = Parameter(torch.Tensor(self.hidden_size, self.num_labels))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform(self.W_d)
nn.init.xavier_uniform(self.W_e)
nn.init.xavier_uniform(self.v)
nn.init.constant(self.b, 0.)
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
'''
Args:
input_d: Tensor
the decoder input tensor with shape = [batch_size, length_decoder, input_size]
input_e: Tensor
the child input tensor with shape = [batch_size, length_encoder, input_size]
mask_d: Tensor or None
the mask tensor for decoder with shape = [batch_size, length_decoder]
mask_e: Tensor or None
the mask tensor for encoder with shape = [batch_size, length_encoder]
Returns: Tensor
the energy tensor with shape = [batch_size, num_label, length, length]
'''
assert input_d.size(0) == input_e.size(0), 'batch sizes of encoder and decoder are requires to be equal.'
batch_size, length_decoder, _ = input_d.size()
_, length_encoder, _ = input_e.size()
# compute decoder part: [batch_size, length_decoder, input_size_decoder] * [input_size_decoder, hidden_size]
# the output shape is [batch_size, length_decoder, hidden_size]
# then --> [batch_size, 1, length_decoder, hidden_size]
out_d = torch.matmul(input_d, self.W_d).unsqueeze(1)
# compute decoder part: [batch_size, length_encoder, input_size_encoder] * [input_size_encoder, hidden_size]
# the output shape is [batch_size, length_encoder, hidden_size]
# then --> [batch_size, length_encoder, 1, hidden_size]
out_e = torch.matmul(input_e, self.W_e).unsqueeze(2)
# add them together [batch_size, length_encoder, length_decoder, hidden_size]
out = torch.tanh(out_d + out_e + self.b)
# product with v
# [batch_size, length_encoder, length_decoder, hidden_size] * [hidden, num_label]
# [batch_size, length_encoder, length_decoder, num_labels]
# then --> [batch_size, num_labels, length_decoder, length_encoder]
return torch.matmul(out, self.v).transpose(1, 3)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class BiAAttention(nn.Module):
'''
Bi-Affine attention layer.
'''
def __init__(self, input_size_encoder, input_size_decoder, num_labels, biaffine=True, **kwargs):
'''
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
'''
super(BiAAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.num_labels = num_labels
self.biaffine = biaffine
self.W_d = Parameter(torch.Tensor(self.num_labels, self.input_size_decoder))
self.W_e = Parameter(torch.Tensor(self.num_labels, self.input_size_encoder))
self.b = Parameter(torch.Tensor(self.num_labels, 1, 1))
if self.biaffine:
self.U = Parameter(torch.Tensor(self.num_labels, self.input_size_decoder, self.input_size_encoder))
else:
self.register_parameter('U', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform_(self.W_d)
nn.init.xavier_uniform_(self.W_e)
nn.init.constant_(self.b, 0.)
if self.biaffine:
nn.init.xavier_uniform_(self.U)
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
'''
Args:
input_d: Tensor
the decoder input tensor with shape = [batch_size, length_decoder, input_size]
input_e: Tensor
the child input tensor with shape = [batch_size, length_encoder, input_size]
mask_d: Tensor or None
the mask tensor for decoder with shape = [batch_size, length_decoder]
mask_e: Tensor or None
the mask tensor for encoder with shape = [batch_size, length_encoder]
Returns: Tensor
the energy tensor with shape = [batch_size, num_label, length, length]
'''
assert input_d.size(0) == input_e.size(0), 'batch sizes of encoder and decoder are requires to be equal.'
batch_size, length_decoder, _ = input_d.size()
_, length_encoder, _ = input_e.size()
# compute decoder part: [num_label, input_size_decoder] * [batch_size, input_size_decoder, length_decoder]
# the output shape is [batch_size, num_label, length_decoder]
out_d = torch.matmul(self.W_d, input_d.transpose(1, 2)).unsqueeze(3)
# compute decoder part: [num_label, input_size_encoder] * [batch_size, input_size_encoder, length_encoder]
# the output shape is [batch_size, num_label, length_encoder]
out_e = torch.matmul(self.W_e, input_e.transpose(1, 2)).unsqueeze(2)
# output shape [batch_size, num_label, length_decoder, length_encoder]
if self.biaffine:
# compute bi-affine part
# [batch_size, 1, length_decoder, input_size_decoder] * [num_labels, input_size_decoder, input_size_encoder]
# output shape [batch_size, num_label, length_decoder, input_size_encoder]
output = torch.matmul(input_d.unsqueeze(1), self.U)
# [batch_size, num_label, length_decoder, input_size_encoder] * [batch_size, 1, input_size_encoder, length_encoder]
# output shape [batch_size, num_label, length_decoder, length_encoder]
output = torch.matmul(output, input_e.unsqueeze(1).transpose(2, 3))
output = output + out_d + out_e + self.b
else:
output = out_d + out_d + self.b
if mask_d is not None:
output = output * mask_d.unsqueeze(1).unsqueeze(3) * mask_e.unsqueeze(1).unsqueeze(2)
return output
class ConcatAttention(nn.Module):
'''
Concatenate attention layer.
'''
# TODO test it!
def __init__(self, input_size_encoder, input_size_decoder, hidden_size, num_labels, **kwargs):
'''
Args:
input_size_encoder: int
the dimension of the encoder input.
input_size_decoder: int
the dimension of the decoder input.
hidden_size: int
the dimension of the hidden.
num_labels: int
the number of labels of the crf layer
biaffine: bool
if apply bi-affine parameter.
**kwargs:
'''
super(ConcatAttention, self).__init__()
self.input_size_encoder = input_size_encoder
self.input_size_decoder = input_size_decoder
self.hidden_size = hidden_size
self.num_labels = num_labels
self.W_d = Parameter(torch.Tensor(self.input_size_decoder, self.hidden_size))
self.W_e = Parameter(torch.Tensor(self.input_size_encoder, self.hidden_size))
self.b = Parameter(torch.Tensor(self.hidden_size))
self.v = Parameter(torch.Tensor(self.hidden_size, self.num_labels))
self.reset_parameters()
def reset_parameters(self):
nn.init.xavier_uniform(self.W_d)
nn.init.xavier_uniform(self.W_e)
nn.init.xavier_uniform(self.v)
nn.init.constant(self.b, 0.)
def forward(self, input_d, input_e, mask_d=None, mask_e=None):
'''
Args:
input_d: Tensor
the decoder input tensor with shape = [batch_size, length_decoder, input_size]
input_e: Tensor
the child input tensor with shape = [batch_size, length_encoder, input_size]
mask_d: Tensor or None
the mask tensor for decoder with shape = [batch_size, length_decoder]
mask_e: Tensor or None
the mask tensor for encoder with shape = [batch_size, length_encoder]
Returns: Tensor
the energy tensor with shape = [batch_size, num_label, length, length]
'''
assert input_d.size(0) == input_e.size(0), 'batch sizes of encoder and decoder are requires to be equal.'
batch_size, length_decoder, _ = input_d.size()
_, length_encoder, _ = input_e.size()
# compute decoder part: [batch_size, length_decoder, input_size_decoder] * [input_size_decoder, hidden_size]
# the output shape is [batch_size, length_decoder, hidden_size]
# then --> [batch_size, 1, length_decoder, hidden_size]
out_d = torch.matmul(input_d, self.W_d).unsqueeze(1)
# compute decoder part: [batch_size, length_encoder, input_size_encoder] * [input_size_encoder, hidden_size]
# the output shape is [batch_size, length_encoder, hidden_size]
# then --> [batch_size, length_encoder, 1, hidden_size]
out_e = torch.matmul(input_e, self.W_e).unsqueeze(2)
# add them together [batch_size, length_encoder, length_decoder, hidden_size]
out = torch.tanh(out_d + out_e + self.b)
# product with v
# [batch_size, length_encoder, length_decoder, hidden_size] * [hidden, num_label]
# [batch_size, length_encoder, length_decoder, num_labels]
# then --> [batch_size, num_labels, length_decoder, length_encoder]
return torch.matmul(out, self.v).transpose(1, 3)
|
en
| 0.546189
|
Bi-Affine attention layer. Args: input_size_encoder: int the dimension of the encoder input. input_size_decoder: int the dimension of the decoder input. num_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: Args: input_d: Tensor the decoder input tensor with shape = [batch_size, length_decoder, input_size] input_e: Tensor the child input tensor with shape = [batch_size, length_encoder, input_size] mask_d: Tensor or None the mask tensor for decoder with shape = [batch_size, length_decoder] mask_e: Tensor or None the mask tensor for encoder with shape = [batch_size, length_encoder] Returns: Tensor the energy tensor with shape = [batch_size, num_label, length, length] # compute decoder part: [num_label, input_size_decoder] * [batch_size, input_size_decoder, length_decoder] # the output shape is [batch_size, num_label, length_decoder] # compute decoder part: [num_label, input_size_encoder] * [batch_size, input_size_encoder, length_encoder] # the output shape is [batch_size, num_label, length_encoder] # output shape [batch_size, num_label, length_decoder, length_encoder] # compute bi-affine part # [batch_size, 1, length_decoder, input_size_decoder] * [num_labels, input_size_decoder, input_size_encoder] # output shape [batch_size, num_label, length_decoder, input_size_encoder] # [batch_size, num_label, length_decoder, input_size_encoder] * [batch_size, 1, input_size_encoder, length_encoder] # output shape [batch_size, num_label, length_decoder, length_encoder] Concatenate attention layer. # TODO test it! Args: input_size_encoder: int the dimension of the encoder input. input_size_decoder: int the dimension of the decoder input. hidden_size: int the dimension of the hidden. num_labels: int the number of labels of the crf layer biaffine: bool if apply bi-affine parameter. **kwargs: Args: input_d: Tensor the decoder input tensor with shape = [batch_size, length_decoder, input_size] input_e: Tensor the child input tensor with shape = [batch_size, length_encoder, input_size] mask_d: Tensor or None the mask tensor for decoder with shape = [batch_size, length_decoder] mask_e: Tensor or None the mask tensor for encoder with shape = [batch_size, length_encoder] Returns: Tensor the energy tensor with shape = [batch_size, num_label, length, length] # compute decoder part: [batch_size, length_decoder, input_size_decoder] * [input_size_decoder, hidden_size] # the output shape is [batch_size, length_decoder, hidden_size] # then --> [batch_size, 1, length_decoder, hidden_size] # compute decoder part: [batch_size, length_encoder, input_size_encoder] * [input_size_encoder, hidden_size] # the output shape is [batch_size, length_encoder, hidden_size] # then --> [batch_size, length_encoder, 1, hidden_size] # add them together [batch_size, length_encoder, length_decoder, hidden_size] # product with v # [batch_size, length_encoder, length_decoder, hidden_size] * [hidden, num_label] # [batch_size, length_encoder, length_decoder, num_labels] # then --> [batch_size, num_labels, length_decoder, length_encoder]
| 2.61953
| 3
|
up/tasks/det/plugins/condinst/models/postprocess/__init__.py
|
ModelTC/EOD
| 196
|
6627075
|
<gh_stars>100-1000
from .condinst_postprocess import * # noqa
from .condinst_predictor import * # noqa
from .condinst_supervisor import * # noqa
|
from .condinst_postprocess import * # noqa
from .condinst_predictor import * # noqa
from .condinst_supervisor import * # noqa
|
uz
| 0.446344
|
# noqa # noqa # noqa
| 0.904537
| 1
|
riddles/admin.py
|
dan-brown/RiddleBase
| 1
|
6627076
|
from django.contrib import admin
from riddles.models import Riddle, RiddleCategory, RiddleState, RiddleType
admin.site.register(RiddleCategory)
admin.site.register(RiddleType)
admin.site.register(Riddle)
admin.site.register(RiddleState)
|
from django.contrib import admin
from riddles.models import Riddle, RiddleCategory, RiddleState, RiddleType
admin.site.register(RiddleCategory)
admin.site.register(RiddleType)
admin.site.register(Riddle)
admin.site.register(RiddleState)
|
none
| 1
| 1.276761
| 1
|
|
tests/test_consistentpangenomevariations.py
|
iqbal-lab-org/pangenome_variations
| 0
|
6627077
|
from unittest import TestCase
from unittest.mock import Mock, PropertyMock, patch
from collections import defaultdict
import pandas as pd
from io import StringIO
from src.ConsistentPangenomeVariations import ConsistentPangenomeVariations, InconsistentPangenomeVariations
from src.DeduplicatedVariationsDataframe import DeduplicatedVariationsDataframe
from src.PangenomeVariations import PangenomeVariations
from src.PangenomeVariation import PangenomeVariation
from src.PairwiseVariation import PairwiseVariation
from src.VarifierDataframe import VarifierDataframe
from src.AlleleMPHF import AlleleMPHF
class TestConsistentPangenomeVariations(TestCase):
def setUp(self) -> None:
self.dummy_consistent_pangenome_variations = ConsistentPangenomeVariations(PangenomeVariations(), filter_for_biallelic=False)
def test___constructor___filter_inconsistent_variations_out(self):
# setup
consistent_pangenome_variations = []
alleles_to_consistent_pangenome_variations = {}
for i in range(3):
consistent_pangenome_variation = Mock()
consistent_pangenome_variation.is_consistent.return_value = True
consistent_pangenome_variation.alleles = [f"consistent_pangenome_variation_{i}.alleles"]
alleles_to_consistent_pangenome_variations[
f"consistent_pangenome_variation_{i}.alleles"] = consistent_pangenome_variation
consistent_pangenome_variations.append(consistent_pangenome_variation)
inconsistent_pangenome_variations = []
for _ in range(3):
inconsistent_pangenome_variation = Mock()
inconsistent_pangenome_variation.is_consistent.return_value = False
inconsistent_pangenome_variations.append(inconsistent_pangenome_variation)
list_of_pangenome_variations = [
consistent_pangenome_variations[0],
consistent_pangenome_variations[1],
inconsistent_pangenome_variations[0],
inconsistent_pangenome_variations[1],
consistent_pangenome_variations[2],
inconsistent_pangenome_variations[2]
]
pangenome_variations = PangenomeVariations()
pangenome_variations._pangenome_variations = list_of_pangenome_variations
actual_consistent_pangenome_variations = ConsistentPangenomeVariations(pangenome_variations, filter_for_biallelic=False)
self.assertListEqual(actual_consistent_pangenome_variations.consistent_pangenome_variations,
consistent_pangenome_variations)
self.assertDictEqual(actual_consistent_pangenome_variations.alleles_to_consistent_pangenome_variations,
alleles_to_consistent_pangenome_variations)
self.assertEqual(actual_consistent_pangenome_variations.number_of_pangenome_variations, 6)
self.assertEqual(actual_consistent_pangenome_variations.number_of_consistent_pangenome_variations, 3)
def test___constructor___filter_inconsistent_variations_out___keep_only_biallelic_ones(self):
# setup
biallelic_consistent_pangenome_variations = []
non_biallelic_consistent_pangenome_variations = []
alleles_to_biallelic_consistent_pangenome_variations = {}
number_of_alleles_in_each_consistent_variation = [3, 1, 2, 4, 2]
for consistent_variation_index, number_of_alleles in enumerate(number_of_alleles_in_each_consistent_variation):
consistent_pangenome_variation = Mock(get_number_of_different_allele_sequences=Mock(return_value=number_of_alleles))
consistent_pangenome_variation.is_consistent.return_value = True
consistent_pangenome_variation.alleles = [
f"consistent_pangenome_variation_{consistent_variation_index}.allele_{allele_index}"
for allele_index in range(number_of_alleles)]
if number_of_alleles == 2:
for allele in consistent_pangenome_variation.alleles:
alleles_to_biallelic_consistent_pangenome_variations[allele] = consistent_pangenome_variation
biallelic_consistent_pangenome_variations.append(consistent_pangenome_variation)
else:
non_biallelic_consistent_pangenome_variations.append(consistent_pangenome_variation)
inconsistent_pangenome_variations = []
for _ in range(3):
inconsistent_pangenome_variation = Mock()
inconsistent_pangenome_variation.is_consistent.return_value = False
inconsistent_pangenome_variations.append(inconsistent_pangenome_variation)
list_of_pangenome_variations = [
biallelic_consistent_pangenome_variations[0],
inconsistent_pangenome_variations[0],
non_biallelic_consistent_pangenome_variations[0],
inconsistent_pangenome_variations[1],
inconsistent_pangenome_variations[2],
biallelic_consistent_pangenome_variations[1],
non_biallelic_consistent_pangenome_variations[1],
non_biallelic_consistent_pangenome_variations[2],
]
pangenome_variations = PangenomeVariations()
pangenome_variations._pangenome_variations = list_of_pangenome_variations
actual_consistent_pangenome_variations = ConsistentPangenomeVariations(pangenome_variations, filter_for_biallelic=True)
self.assertListEqual(actual_consistent_pangenome_variations.consistent_pangenome_variations,
biallelic_consistent_pangenome_variations)
self.assertDictEqual(actual_consistent_pangenome_variations.alleles_to_consistent_pangenome_variations,
alleles_to_biallelic_consistent_pangenome_variations)
self.assertEqual(actual_consistent_pangenome_variations.number_of_pangenome_variations, 8)
self.assertEqual(actual_consistent_pangenome_variations.number_of_consistent_pangenome_variations, 5)
self.assertEqual(actual_consistent_pangenome_variations.number_of_consistent_biallelic_pangenome_variations, 2)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_1": "CPV1", "allele_2": "CPV1"}))
def test___get_consistent_pangenome_variation___both_alleles_present_and_in_same_CPV(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
actual = self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
expected = "CPV1"
self.assertEqual(actual, expected)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_1": "CPV1", "allele_2": "CPV2"}))
def test___get_consistent_pangenome_variation___both_alleles_present_but_in_different_CPVs(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
with self.assertRaises(InconsistentPangenomeVariations):
self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_1": "CPV1"}))
def test___get_consistent_pangenome_variation___only_first_allele_present(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
with self.assertRaises(InconsistentPangenomeVariations):
self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_2": "CPV1"}))
def test___get_consistent_pangenome_variation___only_second_allele_present(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
with self.assertRaises(InconsistentPangenomeVariations):
self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None))
def test___get_consistent_pangenome_variation___no_alleles_present(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
actual = self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
expected = None
self.assertEqual(actual, expected)
@patch.object(PangenomeVariation, "get_number_of_alleles", side_effect=[2, 4])
@patch.object(PangenomeVariation, "get_allele_index", side_effect=[1, 0, 2, 3])
@patch.object(PangenomeVariation, "get_number_of_different_allele_sequences", side_effect=[2, 3])
@patch.object(PangenomeVariation, "get_allele_sequence_index", side_effect=[0, 0, 2, 1])
@patch.object(PangenomeVariation, "get_number_of_samples", side_effect=[10, 15])
@patch.object(ConsistentPangenomeVariations, "get_consistent_pangenome_variation",
side_effect=[None, PangenomeVariation(0, []), PangenomeVariation(1, []), None])
@patch.object(PairwiseVariation, PairwiseVariation.get_PairwiseVariation_from_VarifierDataframe.__name__,
return_value=[Mock(), Mock(), Mock(), Mock()])
def test____get_DeduplicatedVariationsDataframe(self, *mocks):
snps_df = pd.read_csv(StringIO(
"""dummy
0
1
2
3
"""
))
dummy_allele_mphf = AlleleMPHF()
actual = self.dummy_consistent_pangenome_variations._get_DeduplicatedVariationsDataframe("ref", "query",
snps_df, dummy_allele_mphf)
expected = DeduplicatedVariationsDataframe(pd.read_csv(StringIO(
"""dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id,nb_of_samples
0,ref,query,False,-1,-1,-1,-1,-1,-1,-1,-1
1,ref,query,True,0,2,1,0,2,0,0,10
2,ref,query,True,1,4,2,3,3,2,1,15
3,ref,query,False,-1,-1,-1,-1,-1,-1,-1,-1
"""
)))
self.assertTrue(actual.equals(expected))
@patch.object(VarifierDataframe,
VarifierDataframe.get_ref_and_query_from_VarifierDataframe_filepath.__name__,
return_value=(None, None))
@patch.object(VarifierDataframe, VarifierDataframe.load_pickled.__name__)
@patch.object(ConsistentPangenomeVariations,
ConsistentPangenomeVariations._get_DeduplicatedVariationsDataframe.__name__)
def test___build_DeduplicatedVariationsDataframe_from_VarifierDataframe(self, _enrich_VarifierDataframe_mock, *other_mocks):
_enrich_VarifierDataframe_mock.return_value = pd.read_csv(StringIO(
"""dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id
0,ref,query,False,-1,-1,-1,-1,-1,-1,-1
1,ref,query,True,0,2,1,0,2,0,0
2,ref,query,True,1,4,2,3,3,2,1
3,ref,query,False,-1,-1,-1,-1,-1,-1,-1
"""
))
dummy_allele_mphf = AlleleMPHF()
actual = self.dummy_consistent_pangenome_variations.build_DeduplicatedVariationsDataframe_from_VarifierDataframe(
"dummy", dummy_allele_mphf)
expected = DeduplicatedVariationsDataframe(pd.read_csv(StringIO(
"""dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id
1,ref,query,True,0,2,1,0,2,0,0
2,ref,query,True,1,4,2,3,3,2,1
"""
)))
self.assertTrue(actual.equals(expected))
|
from unittest import TestCase
from unittest.mock import Mock, PropertyMock, patch
from collections import defaultdict
import pandas as pd
from io import StringIO
from src.ConsistentPangenomeVariations import ConsistentPangenomeVariations, InconsistentPangenomeVariations
from src.DeduplicatedVariationsDataframe import DeduplicatedVariationsDataframe
from src.PangenomeVariations import PangenomeVariations
from src.PangenomeVariation import PangenomeVariation
from src.PairwiseVariation import PairwiseVariation
from src.VarifierDataframe import VarifierDataframe
from src.AlleleMPHF import AlleleMPHF
class TestConsistentPangenomeVariations(TestCase):
def setUp(self) -> None:
self.dummy_consistent_pangenome_variations = ConsistentPangenomeVariations(PangenomeVariations(), filter_for_biallelic=False)
def test___constructor___filter_inconsistent_variations_out(self):
# setup
consistent_pangenome_variations = []
alleles_to_consistent_pangenome_variations = {}
for i in range(3):
consistent_pangenome_variation = Mock()
consistent_pangenome_variation.is_consistent.return_value = True
consistent_pangenome_variation.alleles = [f"consistent_pangenome_variation_{i}.alleles"]
alleles_to_consistent_pangenome_variations[
f"consistent_pangenome_variation_{i}.alleles"] = consistent_pangenome_variation
consistent_pangenome_variations.append(consistent_pangenome_variation)
inconsistent_pangenome_variations = []
for _ in range(3):
inconsistent_pangenome_variation = Mock()
inconsistent_pangenome_variation.is_consistent.return_value = False
inconsistent_pangenome_variations.append(inconsistent_pangenome_variation)
list_of_pangenome_variations = [
consistent_pangenome_variations[0],
consistent_pangenome_variations[1],
inconsistent_pangenome_variations[0],
inconsistent_pangenome_variations[1],
consistent_pangenome_variations[2],
inconsistent_pangenome_variations[2]
]
pangenome_variations = PangenomeVariations()
pangenome_variations._pangenome_variations = list_of_pangenome_variations
actual_consistent_pangenome_variations = ConsistentPangenomeVariations(pangenome_variations, filter_for_biallelic=False)
self.assertListEqual(actual_consistent_pangenome_variations.consistent_pangenome_variations,
consistent_pangenome_variations)
self.assertDictEqual(actual_consistent_pangenome_variations.alleles_to_consistent_pangenome_variations,
alleles_to_consistent_pangenome_variations)
self.assertEqual(actual_consistent_pangenome_variations.number_of_pangenome_variations, 6)
self.assertEqual(actual_consistent_pangenome_variations.number_of_consistent_pangenome_variations, 3)
def test___constructor___filter_inconsistent_variations_out___keep_only_biallelic_ones(self):
# setup
biallelic_consistent_pangenome_variations = []
non_biallelic_consistent_pangenome_variations = []
alleles_to_biallelic_consistent_pangenome_variations = {}
number_of_alleles_in_each_consistent_variation = [3, 1, 2, 4, 2]
for consistent_variation_index, number_of_alleles in enumerate(number_of_alleles_in_each_consistent_variation):
consistent_pangenome_variation = Mock(get_number_of_different_allele_sequences=Mock(return_value=number_of_alleles))
consistent_pangenome_variation.is_consistent.return_value = True
consistent_pangenome_variation.alleles = [
f"consistent_pangenome_variation_{consistent_variation_index}.allele_{allele_index}"
for allele_index in range(number_of_alleles)]
if number_of_alleles == 2:
for allele in consistent_pangenome_variation.alleles:
alleles_to_biallelic_consistent_pangenome_variations[allele] = consistent_pangenome_variation
biallelic_consistent_pangenome_variations.append(consistent_pangenome_variation)
else:
non_biallelic_consistent_pangenome_variations.append(consistent_pangenome_variation)
inconsistent_pangenome_variations = []
for _ in range(3):
inconsistent_pangenome_variation = Mock()
inconsistent_pangenome_variation.is_consistent.return_value = False
inconsistent_pangenome_variations.append(inconsistent_pangenome_variation)
list_of_pangenome_variations = [
biallelic_consistent_pangenome_variations[0],
inconsistent_pangenome_variations[0],
non_biallelic_consistent_pangenome_variations[0],
inconsistent_pangenome_variations[1],
inconsistent_pangenome_variations[2],
biallelic_consistent_pangenome_variations[1],
non_biallelic_consistent_pangenome_variations[1],
non_biallelic_consistent_pangenome_variations[2],
]
pangenome_variations = PangenomeVariations()
pangenome_variations._pangenome_variations = list_of_pangenome_variations
actual_consistent_pangenome_variations = ConsistentPangenomeVariations(pangenome_variations, filter_for_biallelic=True)
self.assertListEqual(actual_consistent_pangenome_variations.consistent_pangenome_variations,
biallelic_consistent_pangenome_variations)
self.assertDictEqual(actual_consistent_pangenome_variations.alleles_to_consistent_pangenome_variations,
alleles_to_biallelic_consistent_pangenome_variations)
self.assertEqual(actual_consistent_pangenome_variations.number_of_pangenome_variations, 8)
self.assertEqual(actual_consistent_pangenome_variations.number_of_consistent_pangenome_variations, 5)
self.assertEqual(actual_consistent_pangenome_variations.number_of_consistent_biallelic_pangenome_variations, 2)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_1": "CPV1", "allele_2": "CPV1"}))
def test___get_consistent_pangenome_variation___both_alleles_present_and_in_same_CPV(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
actual = self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
expected = "CPV1"
self.assertEqual(actual, expected)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_1": "CPV1", "allele_2": "CPV2"}))
def test___get_consistent_pangenome_variation___both_alleles_present_but_in_different_CPVs(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
with self.assertRaises(InconsistentPangenomeVariations):
self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_1": "CPV1"}))
def test___get_consistent_pangenome_variation___only_first_allele_present(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
with self.assertRaises(InconsistentPangenomeVariations):
self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None, {"allele_2": "CPV1"}))
def test___get_consistent_pangenome_variation___only_second_allele_present(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
with self.assertRaises(InconsistentPangenomeVariations):
self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
@patch.object(ConsistentPangenomeVariations, "alleles_to_consistent_pangenome_variations",
new_callable=PropertyMock,
return_value=defaultdict(lambda: None))
def test___get_consistent_pangenome_variation___no_alleles_present(self, *mocks):
pairwise_variation = Mock(allele_1="allele_1", allele_2="allele_2")
actual = self.dummy_consistent_pangenome_variations.get_consistent_pangenome_variation(pairwise_variation)
expected = None
self.assertEqual(actual, expected)
@patch.object(PangenomeVariation, "get_number_of_alleles", side_effect=[2, 4])
@patch.object(PangenomeVariation, "get_allele_index", side_effect=[1, 0, 2, 3])
@patch.object(PangenomeVariation, "get_number_of_different_allele_sequences", side_effect=[2, 3])
@patch.object(PangenomeVariation, "get_allele_sequence_index", side_effect=[0, 0, 2, 1])
@patch.object(PangenomeVariation, "get_number_of_samples", side_effect=[10, 15])
@patch.object(ConsistentPangenomeVariations, "get_consistent_pangenome_variation",
side_effect=[None, PangenomeVariation(0, []), PangenomeVariation(1, []), None])
@patch.object(PairwiseVariation, PairwiseVariation.get_PairwiseVariation_from_VarifierDataframe.__name__,
return_value=[Mock(), Mock(), Mock(), Mock()])
def test____get_DeduplicatedVariationsDataframe(self, *mocks):
snps_df = pd.read_csv(StringIO(
"""dummy
0
1
2
3
"""
))
dummy_allele_mphf = AlleleMPHF()
actual = self.dummy_consistent_pangenome_variations._get_DeduplicatedVariationsDataframe("ref", "query",
snps_df, dummy_allele_mphf)
expected = DeduplicatedVariationsDataframe(pd.read_csv(StringIO(
"""dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id,nb_of_samples
0,ref,query,False,-1,-1,-1,-1,-1,-1,-1,-1
1,ref,query,True,0,2,1,0,2,0,0,10
2,ref,query,True,1,4,2,3,3,2,1,15
3,ref,query,False,-1,-1,-1,-1,-1,-1,-1,-1
"""
)))
self.assertTrue(actual.equals(expected))
@patch.object(VarifierDataframe,
VarifierDataframe.get_ref_and_query_from_VarifierDataframe_filepath.__name__,
return_value=(None, None))
@patch.object(VarifierDataframe, VarifierDataframe.load_pickled.__name__)
@patch.object(ConsistentPangenomeVariations,
ConsistentPangenomeVariations._get_DeduplicatedVariationsDataframe.__name__)
def test___build_DeduplicatedVariationsDataframe_from_VarifierDataframe(self, _enrich_VarifierDataframe_mock, *other_mocks):
_enrich_VarifierDataframe_mock.return_value = pd.read_csv(StringIO(
"""dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id
0,ref,query,False,-1,-1,-1,-1,-1,-1,-1
1,ref,query,True,0,2,1,0,2,0,0
2,ref,query,True,1,4,2,3,3,2,1
3,ref,query,False,-1,-1,-1,-1,-1,-1,-1
"""
))
dummy_allele_mphf = AlleleMPHF()
actual = self.dummy_consistent_pangenome_variations.build_DeduplicatedVariationsDataframe_from_VarifierDataframe(
"dummy", dummy_allele_mphf)
expected = DeduplicatedVariationsDataframe(pd.read_csv(StringIO(
"""dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id
1,ref,query,True,0,2,1,0,2,0,0
2,ref,query,True,1,4,2,3,3,2,1
"""
)))
self.assertTrue(actual.equals(expected))
|
en
| 0.295022
|
# setup # setup dummy 0 1 2 3 dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id,nb_of_samples 0,ref,query,False,-1,-1,-1,-1,-1,-1,-1,-1 1,ref,query,True,0,2,1,0,2,0,0,10 2,ref,query,True,1,4,2,3,3,2,1,15 3,ref,query,False,-1,-1,-1,-1,-1,-1,-1,-1 dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id 0,ref,query,False,-1,-1,-1,-1,-1,-1,-1 1,ref,query,True,0,2,1,0,2,0,0 2,ref,query,True,1,4,2,3,3,2,1 3,ref,query,False,-1,-1,-1,-1,-1,-1,-1 dummy,ref_genome,query_genome,present_in_a_consistent_pangenome_variation,pangenome_variation_id,number_of_alleles,ref_allele_id,query_allele_id,number_of_different_allele_sequences,ref_allele_sequence_id,query_allele_sequence_id 1,ref,query,True,0,2,1,0,2,0,0 2,ref,query,True,1,4,2,3,3,2,1
| 2.577021
| 3
|
local/lib/python3.6/site-packages/pgadmin4/pgadmin/browser/server_groups/servers/databases/schemas/tables/rules/__init__.py
|
sahilsdei/django_ecommerce
| 2
|
6627078
|
<gh_stars>1-10
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements Rule Node"""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases.schemas as schemas
from flask import render_template, make_response, request, jsonify
from flask_babelex import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.utils import \
parse_rule_definition
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
from pgadmin.utils import IS_PY2
# If we are in Python3
if not IS_PY2:
unicode = str
class RuleModule(CollectionNodeModule):
"""
class RuleModule(CollectionNodeModule):
A rule collection Node which inherits CollectionNodeModule
class and define methods:
get_nodes - To generate collection node.
script_load - tells when to load js file.
csssnppets - add css to page
"""
NODE_TYPE = 'rule'
COLLECTION_LABEL = gettext("Rules")
def __init__(self, *args, **kwargs):
self.min_ver = None
self.max_ver = None
super(RuleModule, self).__init__(*args, **kwargs)
def BackendSupported(self, manager, **kwargs):
"""
Load this module if tid is view, we will not load it under
material view
"""
if super(RuleModule, self).BackendSupported(manager, **kwargs):
conn = manager.connection(did=kwargs['did'])
if 'vid' not in kwargs:
return True
self.template_path = 'rules/sql'
SQL = render_template("/".join(
[self.template_path, 'backend_support.sql']
), vid=kwargs['vid'])
status, res = conn.execute_scalar(SQL)
# check if any errors
if not status:
return internal_server_error(errormsg=res)
# Check tid is view not material view
# then true, othewise false
if res is True:
return res
else:
return res
def get_nodes(self, gid, sid, did, scid, **kwargs):
"""
Generate the collection node
"""
assert ('tid' in kwargs or 'vid' in kwargs)
yield self.generate_browser_collection_node(
kwargs['tid'] if 'tid' in kwargs else kwargs['vid']
)
@property
def node_inode(self):
"""
If a node has children return True otherwise False
"""
return False
@property
def script_load(self):
"""
Load the module script for rule, when any of the database nodes are
initialized.
"""
return schemas.SchemaModule.NODE_TYPE
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
snippets = [
render_template(
"browser/css/collection.css",
node_type=self.node_type,
_=gettext
),
render_template(
"rules/css/rule.css",
node_type=self.node_type,
_=gettext
)
]
for submodule in self.submodules:
snippets.extend(submodule.csssnippets)
return snippets
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
# Create blueprint of RuleModule.
blueprint = RuleModule(__name__)
class RuleView(PGChildNodeView):
"""
This is a class for rule node which inherits the
properties and methods from PGChildNodeView class and define
various methods to list, create, update and delete rule.
Variables:
---------
* node_type - tells which type of node it is
* parent_ids - id with its type and name of parent nodes
* ids - id with type and name of extension module being used.
* operations - function routes mappings defined.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'rid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'children': [{
'get': 'children'
}],
'delete': [{'delete': 'delete'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'configs': [{'get': 'configs'}]
})
def module_js(self):
"""
This property defines whether Javascript exists for this node.
"""
return make_response(
render_template(
"rules/js/rules.js",
_=gettext
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
This function will behave as a decorator which will check the
database connection before running a view. It will also attach
manager, conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(
PG_DEFAULT_DRIVER).connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.datlastsysoid = self.manager.db_info[
kwargs['did']
]['datlastsysoid'] if self.manager.db_info is not None and \
kwargs['did'] in self.manager.db_info else 0
self.template_path = 'rules/sql'
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid, did, scid, tid):
"""
Fetch all rule properties and render into properties tab
"""
# fetch schema name by schema id
SQL = render_template("/".join(
[self.template_path, 'properties.sql']), tid=tid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def node(self, gid, sid, did, scid, tid, rid):
"""
return single node
"""
SQL = render_template("/".join(
[self.template_path, 'nodes.sql']), rid=rid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
if len(rset['rows']) == 0:
return gone(gettext("""Could not find the rule in the table."""))
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon="icon-rule"
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
List all the rules under the Rules Collection node
"""
res = []
SQL = render_template("/".join(
[self.template_path, 'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon="icon-rule"
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def properties(self, gid, sid, did, scid, tid, rid):
"""
Fetch the properties of an individual rule and render in properties tab
"""
SQL = render_template("/".join(
[self.template_path, 'properties.sql']
), rid=rid, datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the rule in the table."""))
return ajax_response(
response=parse_rule_definition(res),
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid):
"""
This function will create a new rule object
"""
required_args = [
'name',
]
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for arg in required_args:
if arg not in data:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
"Could not find the required parameter (%s)." % arg
)
)
try:
SQL = render_template("/".join(
[self.template_path, 'create.sql']), data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
# Fetch the rule id against rule name to display node
# in tree browser
SQL = render_template("/".join(
[self.template_path, 'rule_id.sql']), rule_name=data['name'])
status, rule_id = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=rule_id)
return jsonify(
node=self.blueprint.generate_browser_node(
rule_id,
tid,
data['name'],
icon="icon-rule"
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, rid):
"""
This function will update a rule object
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
SQL, name = self.getSQL(gid, sid, data, tid, rid)
if not isinstance(SQL, (str, unicode)):
return SQL
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return jsonify(
node=self.blueprint.generate_browser_node(
rid,
tid,
name,
icon="icon-%s" % self.node_type
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def delete(self, gid, sid, did, scid, tid, rid):
"""
This function will drop a rule object
"""
# Below will decide if it's simple drop or drop with cascade call
cascade = True if self.cmd == 'delete' else False
try:
# Get name for rule from did
SQL = render_template("/".join(
[self.template_path, 'delete.sql']), rid=rid)
status, res_data = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res_data)
if not res_data['rows']:
return make_json_response(
success=0,
errormsg=gettext(
'Error: Object not found.'
),
info=gettext(
'The specified rule could not be found.\n'
)
)
# drop rule
rset = res_data['rows'][0]
SQL = render_template("/".join(
[self.template_path, 'delete.sql']),
rulename=rset['rulename'],
relname=rset['relname'],
nspname=rset['nspname'],
cascade=cascade
)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Rule dropped"),
data={
'id': tid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def msql(self, gid, sid, did, scid, tid, rid=None):
"""
This function returns modified SQL
"""
data = request.args
sql, name = self.getSQL(gid, sid, data, tid, rid)
if not isinstance(sql, (str, unicode)):
return sql
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
@check_precondition
def sql(self, gid, sid, did, scid, tid, rid):
"""
This function will generate sql to render into the sql panel
"""
SQL = render_template("/".join(
[self.template_path, 'properties.sql']), rid=rid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the rule in the table."""))
res_data = parse_rule_definition(res)
SQL = render_template("/".join(
[self.template_path, 'create.sql']),
data=res_data, display_comments=True)
return ajax_response(response=SQL)
def getSQL(self, gid, sid, data, tid, rid):
"""
This function will generate sql from model data
"""
if rid is not None:
SQL = render_template("/".join(
[self.template_path, 'properties.sql']), rid=rid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("""Could not find the rule in the table.""")
)
res_data = parse_rule_definition(res)
old_data = res_data
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data
)
else:
SQL = render_template("/".join(
[self.template_path, 'create.sql']), data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, rid):
"""
This function gets the dependents and returns an ajax response
for the rule node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
tid: View ID
rid: Rule ID
"""
dependents_result = self.get_dependents(self.conn, rid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, rid):
"""
This function gets the dependencies and returns sn ajax response
for the rule node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
tid: View ID
rid: Rule ID
"""
dependencies_result = self.get_dependencies(self.conn, rid)
return ajax_response(
response=dependencies_result,
status=200
)
RuleView.register_node_view(blueprint)
|
##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2018, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
"""Implements Rule Node"""
import simplejson as json
from functools import wraps
import pgadmin.browser.server_groups.servers.databases.schemas as schemas
from flask import render_template, make_response, request, jsonify
from flask_babelex import gettext
from pgadmin.browser.collection import CollectionNodeModule
from pgadmin.browser.server_groups.servers.databases.schemas.utils import \
parse_rule_definition
from pgadmin.browser.utils import PGChildNodeView
from pgadmin.utils.ajax import make_json_response, internal_server_error, \
make_response as ajax_response, gone
from pgadmin.utils.driver import get_driver
from config import PG_DEFAULT_DRIVER
from pgadmin.utils import IS_PY2
# If we are in Python3
if not IS_PY2:
unicode = str
class RuleModule(CollectionNodeModule):
"""
class RuleModule(CollectionNodeModule):
A rule collection Node which inherits CollectionNodeModule
class and define methods:
get_nodes - To generate collection node.
script_load - tells when to load js file.
csssnppets - add css to page
"""
NODE_TYPE = 'rule'
COLLECTION_LABEL = gettext("Rules")
def __init__(self, *args, **kwargs):
self.min_ver = None
self.max_ver = None
super(RuleModule, self).__init__(*args, **kwargs)
def BackendSupported(self, manager, **kwargs):
"""
Load this module if tid is view, we will not load it under
material view
"""
if super(RuleModule, self).BackendSupported(manager, **kwargs):
conn = manager.connection(did=kwargs['did'])
if 'vid' not in kwargs:
return True
self.template_path = 'rules/sql'
SQL = render_template("/".join(
[self.template_path, 'backend_support.sql']
), vid=kwargs['vid'])
status, res = conn.execute_scalar(SQL)
# check if any errors
if not status:
return internal_server_error(errormsg=res)
# Check tid is view not material view
# then true, othewise false
if res is True:
return res
else:
return res
def get_nodes(self, gid, sid, did, scid, **kwargs):
"""
Generate the collection node
"""
assert ('tid' in kwargs or 'vid' in kwargs)
yield self.generate_browser_collection_node(
kwargs['tid'] if 'tid' in kwargs else kwargs['vid']
)
@property
def node_inode(self):
"""
If a node has children return True otherwise False
"""
return False
@property
def script_load(self):
"""
Load the module script for rule, when any of the database nodes are
initialized.
"""
return schemas.SchemaModule.NODE_TYPE
@property
def csssnippets(self):
"""
Returns a snippet of css to include in the page
"""
snippets = [
render_template(
"browser/css/collection.css",
node_type=self.node_type,
_=gettext
),
render_template(
"rules/css/rule.css",
node_type=self.node_type,
_=gettext
)
]
for submodule in self.submodules:
snippets.extend(submodule.csssnippets)
return snippets
@property
def module_use_template_javascript(self):
"""
Returns whether Jinja2 template is used for generating the javascript
module.
"""
return False
# Create blueprint of RuleModule.
blueprint = RuleModule(__name__)
class RuleView(PGChildNodeView):
"""
This is a class for rule node which inherits the
properties and methods from PGChildNodeView class and define
various methods to list, create, update and delete rule.
Variables:
---------
* node_type - tells which type of node it is
* parent_ids - id with its type and name of parent nodes
* ids - id with type and name of extension module being used.
* operations - function routes mappings defined.
"""
node_type = blueprint.node_type
parent_ids = [
{'type': 'int', 'id': 'gid'},
{'type': 'int', 'id': 'sid'},
{'type': 'int', 'id': 'did'},
{'type': 'int', 'id': 'scid'},
{'type': 'int', 'id': 'tid'}
]
ids = [
{'type': 'int', 'id': 'rid'}
]
operations = dict({
'obj': [
{'get': 'properties', 'delete': 'delete', 'put': 'update'},
{'get': 'list', 'post': 'create'}
],
'children': [{
'get': 'children'
}],
'delete': [{'delete': 'delete'}],
'nodes': [{'get': 'node'}, {'get': 'nodes'}],
'sql': [{'get': 'sql'}],
'msql': [{'get': 'msql'}, {'get': 'msql'}],
'stats': [{'get': 'statistics'}],
'dependency': [{'get': 'dependencies'}],
'dependent': [{'get': 'dependents'}],
'module.js': [{}, {}, {'get': 'module_js'}],
'configs': [{'get': 'configs'}]
})
def module_js(self):
"""
This property defines whether Javascript exists for this node.
"""
return make_response(
render_template(
"rules/js/rules.js",
_=gettext
),
200, {'Content-Type': 'application/x-javascript'}
)
def check_precondition(f):
"""
This function will behave as a decorator which will check the
database connection before running a view. It will also attach
manager, conn & template_path properties to self
"""
@wraps(f)
def wrap(*args, **kwargs):
# Here args[0] will hold self & kwargs will hold gid,sid,did
self = args[0]
self.manager = get_driver(
PG_DEFAULT_DRIVER).connection_manager(kwargs['sid'])
self.conn = self.manager.connection(did=kwargs['did'])
self.datlastsysoid = self.manager.db_info[
kwargs['did']
]['datlastsysoid'] if self.manager.db_info is not None and \
kwargs['did'] in self.manager.db_info else 0
self.template_path = 'rules/sql'
return f(*args, **kwargs)
return wrap
@check_precondition
def list(self, gid, sid, did, scid, tid):
"""
Fetch all rule properties and render into properties tab
"""
# fetch schema name by schema id
SQL = render_template("/".join(
[self.template_path, 'properties.sql']), tid=tid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
return ajax_response(
response=res['rows'],
status=200
)
@check_precondition
def node(self, gid, sid, did, scid, tid, rid):
"""
return single node
"""
SQL = render_template("/".join(
[self.template_path, 'nodes.sql']), rid=rid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
if len(rset['rows']) == 0:
return gone(gettext("""Could not find the rule in the table."""))
res = self.blueprint.generate_browser_node(
rset['rows'][0]['oid'],
tid,
rset['rows'][0]['name'],
icon="icon-rule"
)
return make_json_response(
data=res,
status=200
)
@check_precondition
def nodes(self, gid, sid, did, scid, tid):
"""
List all the rules under the Rules Collection node
"""
res = []
SQL = render_template("/".join(
[self.template_path, 'nodes.sql']), tid=tid)
status, rset = self.conn.execute_2darray(SQL)
if not status:
return internal_server_error(errormsg=rset)
for row in rset['rows']:
res.append(
self.blueprint.generate_browser_node(
row['oid'],
tid,
row['name'],
icon="icon-rule"
))
return make_json_response(
data=res,
status=200
)
@check_precondition
def properties(self, gid, sid, did, scid, tid, rid):
"""
Fetch the properties of an individual rule and render in properties tab
"""
SQL = render_template("/".join(
[self.template_path, 'properties.sql']
), rid=rid, datlastsysoid=self.datlastsysoid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the rule in the table."""))
return ajax_response(
response=parse_rule_definition(res),
status=200
)
@check_precondition
def create(self, gid, sid, did, scid, tid):
"""
This function will create a new rule object
"""
required_args = [
'name',
]
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
for arg in required_args:
if arg not in data:
return make_json_response(
status=410,
success=0,
errormsg=gettext(
"Could not find the required parameter (%s)." % arg
)
)
try:
SQL = render_template("/".join(
[self.template_path, 'create.sql']), data=data)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
# Fetch the rule id against rule name to display node
# in tree browser
SQL = render_template("/".join(
[self.template_path, 'rule_id.sql']), rule_name=data['name'])
status, rule_id = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=rule_id)
return jsonify(
node=self.blueprint.generate_browser_node(
rule_id,
tid,
data['name'],
icon="icon-rule"
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def update(self, gid, sid, did, scid, tid, rid):
"""
This function will update a rule object
"""
data = request.form if request.form else json.loads(
request.data, encoding='utf-8'
)
try:
SQL, name = self.getSQL(gid, sid, data, tid, rid)
if not isinstance(SQL, (str, unicode)):
return SQL
SQL = SQL.strip('\n').strip(' ')
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return jsonify(
node=self.blueprint.generate_browser_node(
rid,
tid,
name,
icon="icon-%s" % self.node_type
)
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def delete(self, gid, sid, did, scid, tid, rid):
"""
This function will drop a rule object
"""
# Below will decide if it's simple drop or drop with cascade call
cascade = True if self.cmd == 'delete' else False
try:
# Get name for rule from did
SQL = render_template("/".join(
[self.template_path, 'delete.sql']), rid=rid)
status, res_data = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res_data)
if not res_data['rows']:
return make_json_response(
success=0,
errormsg=gettext(
'Error: Object not found.'
),
info=gettext(
'The specified rule could not be found.\n'
)
)
# drop rule
rset = res_data['rows'][0]
SQL = render_template("/".join(
[self.template_path, 'delete.sql']),
rulename=rset['rulename'],
relname=rset['relname'],
nspname=rset['nspname'],
cascade=cascade
)
status, res = self.conn.execute_scalar(SQL)
if not status:
return internal_server_error(errormsg=res)
return make_json_response(
success=1,
info=gettext("Rule dropped"),
data={
'id': tid,
'sid': sid,
'gid': gid,
'did': did
}
)
except Exception as e:
return internal_server_error(errormsg=str(e))
@check_precondition
def msql(self, gid, sid, did, scid, tid, rid=None):
"""
This function returns modified SQL
"""
data = request.args
sql, name = self.getSQL(gid, sid, data, tid, rid)
if not isinstance(sql, (str, unicode)):
return sql
sql = sql.strip('\n').strip(' ')
if sql == '':
sql = "--modified SQL"
return make_json_response(
data=sql,
status=200
)
@check_precondition
def sql(self, gid, sid, did, scid, tid, rid):
"""
This function will generate sql to render into the sql panel
"""
SQL = render_template("/".join(
[self.template_path, 'properties.sql']), rid=rid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(gettext("""Could not find the rule in the table."""))
res_data = parse_rule_definition(res)
SQL = render_template("/".join(
[self.template_path, 'create.sql']),
data=res_data, display_comments=True)
return ajax_response(response=SQL)
def getSQL(self, gid, sid, data, tid, rid):
"""
This function will generate sql from model data
"""
if rid is not None:
SQL = render_template("/".join(
[self.template_path, 'properties.sql']), rid=rid)
status, res = self.conn.execute_dict(SQL)
if not status:
return internal_server_error(errormsg=res)
if len(res['rows']) == 0:
return gone(
gettext("""Could not find the rule in the table.""")
)
res_data = parse_rule_definition(res)
old_data = res_data
SQL = render_template(
"/".join([self.template_path, 'update.sql']),
data=data, o_data=old_data
)
else:
SQL = render_template("/".join(
[self.template_path, 'create.sql']), data=data)
return SQL, data['name'] if 'name' in data else old_data['name']
@check_precondition
def dependents(self, gid, sid, did, scid, tid, rid):
"""
This function gets the dependents and returns an ajax response
for the rule node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
tid: View ID
rid: Rule ID
"""
dependents_result = self.get_dependents(self.conn, rid)
return ajax_response(
response=dependents_result,
status=200
)
@check_precondition
def dependencies(self, gid, sid, did, scid, tid, rid):
"""
This function gets the dependencies and returns sn ajax response
for the rule node.
Args:
gid: Server Group ID
sid: Server ID
did: Database ID
tid: View ID
rid: Rule ID
"""
dependencies_result = self.get_dependencies(self.conn, rid)
return ajax_response(
response=dependencies_result,
status=200
)
RuleView.register_node_view(blueprint)
|
en
| 0.708253
|
########################################################################## # # pgAdmin 4 - PostgreSQL Tools # # Copyright (C) 2013 - 2018, The pgAdmin Development Team # This software is released under the PostgreSQL Licence # ########################################################################## Implements Rule Node # If we are in Python3 class RuleModule(CollectionNodeModule): A rule collection Node which inherits CollectionNodeModule class and define methods: get_nodes - To generate collection node. script_load - tells when to load js file. csssnppets - add css to page Load this module if tid is view, we will not load it under material view # check if any errors # Check tid is view not material view # then true, othewise false Generate the collection node If a node has children return True otherwise False Load the module script for rule, when any of the database nodes are initialized. Returns a snippet of css to include in the page Returns whether Jinja2 template is used for generating the javascript module. # Create blueprint of RuleModule. This is a class for rule node which inherits the properties and methods from PGChildNodeView class and define various methods to list, create, update and delete rule. Variables: --------- * node_type - tells which type of node it is * parent_ids - id with its type and name of parent nodes * ids - id with type and name of extension module being used. * operations - function routes mappings defined. This property defines whether Javascript exists for this node. This function will behave as a decorator which will check the database connection before running a view. It will also attach manager, conn & template_path properties to self # Here args[0] will hold self & kwargs will hold gid,sid,did Fetch all rule properties and render into properties tab # fetch schema name by schema id return single node Could not find the rule in the table. List all the rules under the Rules Collection node Fetch the properties of an individual rule and render in properties tab Could not find the rule in the table. This function will create a new rule object # Fetch the rule id against rule name to display node # in tree browser This function will update a rule object This function will drop a rule object # Below will decide if it's simple drop or drop with cascade call # Get name for rule from did # drop rule This function returns modified SQL This function will generate sql to render into the sql panel Could not find the rule in the table. This function will generate sql from model data Could not find the rule in the table. This function gets the dependents and returns an ajax response for the rule node. Args: gid: Server Group ID sid: Server ID did: Database ID tid: View ID rid: Rule ID This function gets the dependencies and returns sn ajax response for the rule node. Args: gid: Server Group ID sid: Server ID did: Database ID tid: View ID rid: Rule ID
| 1.752408
| 2
|
corrscope/channel.py
|
Lactozilla/corrscope
| 0
|
6627079
|
<gh_stars>0
from enum import unique, auto
from os.path import abspath
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union, Dict, Any
import attr
from ruamel.yaml.comments import CommentedMap
from corrscope.config import (
DumpableAttrs,
Alias,
CorrError,
evolve_compat,
TypedEnumDump,
)
from corrscope.triggers import MainTriggerConfig
from corrscope.util import coalesce
from corrscope.wave import Wave, FlattenOrStr
if TYPE_CHECKING:
from corrscope.corrscope import Config
class ChannelConfig(DumpableAttrs):
wav_path: str
label: str = ""
# Supplying a dict inherits attributes from global trigger.
# TODO test channel-specific triggers
trigger: Union[MainTriggerConfig, Dict[str, Any], None] = attr.Factory(dict)
# Multiplies how wide the window is, in milliseconds.
trigger_width: int = 1
render_width: int = 1
# Overrides global amplification.
amplification: Optional[float] = None
# Stereo config
trigger_stereo: Optional[FlattenOrStr] = None
render_stereo: Optional[FlattenOrStr] = None
line_color: Optional[str] = None
# region Legacy Fields
trigger_width_ratio = Alias("trigger_width")
render_width_ratio = Alias("render_width")
# endregion
@unique
class DefaultLabel(TypedEnumDump):
NoLabel = 0
FileName = auto()
Number = auto()
class Channel:
# trigger_samp is unneeded, since __init__ (not CorrScope) constructs triggers.
_render_samp: int
# Product of corr_cfg.trigger/render_subsampling and trigger/render_width.
_trigger_stride: int
render_stride: int
def __init__(self, cfg: ChannelConfig, corr_cfg: "Config", channel_idx: int = 0):
"""channel_idx counts from 0."""
self.cfg = cfg
self.label = cfg.label
if not self.label:
if corr_cfg.default_label is DefaultLabel.FileName:
self.label = Path(cfg.wav_path).stem
elif corr_cfg.default_label is DefaultLabel.Number:
self.label = str(channel_idx + 1)
# Create a Wave object.
wave = Wave(
abspath(cfg.wav_path),
amplification=coalesce(cfg.amplification, corr_cfg.amplification),
)
# Flatten wave stereo for trigger and render.
tflat = coalesce(cfg.trigger_stereo, corr_cfg.trigger_stereo)
rflat = coalesce(cfg.render_stereo, corr_cfg.render_stereo)
self.trigger_wave = wave.with_flatten(tflat, return_channels=False)
self.render_wave = wave.with_flatten(rflat, return_channels=True)
# `subsampling` increases `stride` and decreases `nsamp`.
# `width` increases `stride` without changing `nsamp`.
tsub = corr_cfg.trigger_subsampling
tw = cfg.trigger_width
rsub = corr_cfg.render_subsampling
rw = cfg.render_width
# nsamp = orig / subsampling
# stride = subsampling * width
def calculate_nsamp(width_ms, sub):
width_s = width_ms / 1000
return round(width_s * wave.smp_s / sub)
trigger_samp = calculate_nsamp(corr_cfg.trigger_ms, tsub)
self._render_samp = calculate_nsamp(corr_cfg.render_ms, rsub)
self._trigger_stride = tsub * tw
self.render_stride = rsub * rw
# Create a Trigger object.
if isinstance(cfg.trigger, MainTriggerConfig):
tcfg = cfg.trigger
elif isinstance(
cfg.trigger, (CommentedMap, dict)
): # CommentedMap may/not be subclass of dict.
tcfg = evolve_compat(corr_cfg.trigger, **cfg.trigger)
elif cfg.trigger is None:
tcfg = corr_cfg.trigger
else:
raise CorrError(
f"invalid per-channel trigger {cfg.trigger}, type={type(cfg.trigger)}, "
f"must be (*)TriggerConfig, dict, or None"
)
self.trigger = tcfg(
wave=self.trigger_wave,
tsamp=trigger_samp,
stride=self._trigger_stride,
fps=corr_cfg.fps,
wave_idx=channel_idx,
)
def get_render_around(self, trigger_sample: int):
return self.render_wave.get_around(
trigger_sample, self._render_samp, self.render_stride
)
|
from enum import unique, auto
from os.path import abspath
from pathlib import Path
from typing import TYPE_CHECKING, Optional, Union, Dict, Any
import attr
from ruamel.yaml.comments import CommentedMap
from corrscope.config import (
DumpableAttrs,
Alias,
CorrError,
evolve_compat,
TypedEnumDump,
)
from corrscope.triggers import MainTriggerConfig
from corrscope.util import coalesce
from corrscope.wave import Wave, FlattenOrStr
if TYPE_CHECKING:
from corrscope.corrscope import Config
class ChannelConfig(DumpableAttrs):
wav_path: str
label: str = ""
# Supplying a dict inherits attributes from global trigger.
# TODO test channel-specific triggers
trigger: Union[MainTriggerConfig, Dict[str, Any], None] = attr.Factory(dict)
# Multiplies how wide the window is, in milliseconds.
trigger_width: int = 1
render_width: int = 1
# Overrides global amplification.
amplification: Optional[float] = None
# Stereo config
trigger_stereo: Optional[FlattenOrStr] = None
render_stereo: Optional[FlattenOrStr] = None
line_color: Optional[str] = None
# region Legacy Fields
trigger_width_ratio = Alias("trigger_width")
render_width_ratio = Alias("render_width")
# endregion
@unique
class DefaultLabel(TypedEnumDump):
NoLabel = 0
FileName = auto()
Number = auto()
class Channel:
# trigger_samp is unneeded, since __init__ (not CorrScope) constructs triggers.
_render_samp: int
# Product of corr_cfg.trigger/render_subsampling and trigger/render_width.
_trigger_stride: int
render_stride: int
def __init__(self, cfg: ChannelConfig, corr_cfg: "Config", channel_idx: int = 0):
"""channel_idx counts from 0."""
self.cfg = cfg
self.label = cfg.label
if not self.label:
if corr_cfg.default_label is DefaultLabel.FileName:
self.label = Path(cfg.wav_path).stem
elif corr_cfg.default_label is DefaultLabel.Number:
self.label = str(channel_idx + 1)
# Create a Wave object.
wave = Wave(
abspath(cfg.wav_path),
amplification=coalesce(cfg.amplification, corr_cfg.amplification),
)
# Flatten wave stereo for trigger and render.
tflat = coalesce(cfg.trigger_stereo, corr_cfg.trigger_stereo)
rflat = coalesce(cfg.render_stereo, corr_cfg.render_stereo)
self.trigger_wave = wave.with_flatten(tflat, return_channels=False)
self.render_wave = wave.with_flatten(rflat, return_channels=True)
# `subsampling` increases `stride` and decreases `nsamp`.
# `width` increases `stride` without changing `nsamp`.
tsub = corr_cfg.trigger_subsampling
tw = cfg.trigger_width
rsub = corr_cfg.render_subsampling
rw = cfg.render_width
# nsamp = orig / subsampling
# stride = subsampling * width
def calculate_nsamp(width_ms, sub):
width_s = width_ms / 1000
return round(width_s * wave.smp_s / sub)
trigger_samp = calculate_nsamp(corr_cfg.trigger_ms, tsub)
self._render_samp = calculate_nsamp(corr_cfg.render_ms, rsub)
self._trigger_stride = tsub * tw
self.render_stride = rsub * rw
# Create a Trigger object.
if isinstance(cfg.trigger, MainTriggerConfig):
tcfg = cfg.trigger
elif isinstance(
cfg.trigger, (CommentedMap, dict)
): # CommentedMap may/not be subclass of dict.
tcfg = evolve_compat(corr_cfg.trigger, **cfg.trigger)
elif cfg.trigger is None:
tcfg = corr_cfg.trigger
else:
raise CorrError(
f"invalid per-channel trigger {cfg.trigger}, type={type(cfg.trigger)}, "
f"must be (*)TriggerConfig, dict, or None"
)
self.trigger = tcfg(
wave=self.trigger_wave,
tsamp=trigger_samp,
stride=self._trigger_stride,
fps=corr_cfg.fps,
wave_idx=channel_idx,
)
def get_render_around(self, trigger_sample: int):
return self.render_wave.get_around(
trigger_sample, self._render_samp, self.render_stride
)
|
en
| 0.719044
|
# Supplying a dict inherits attributes from global trigger. # TODO test channel-specific triggers # Multiplies how wide the window is, in milliseconds. # Overrides global amplification. # Stereo config # region Legacy Fields # endregion # trigger_samp is unneeded, since __init__ (not CorrScope) constructs triggers. # Product of corr_cfg.trigger/render_subsampling and trigger/render_width. channel_idx counts from 0. # Create a Wave object. # Flatten wave stereo for trigger and render. # `subsampling` increases `stride` and decreases `nsamp`. # `width` increases `stride` without changing `nsamp`. # nsamp = orig / subsampling # stride = subsampling * width # Create a Trigger object. # CommentedMap may/not be subclass of dict.
| 2.059231
| 2
|
tensor2tensor/models/research/aligned.py
|
xueeinstein/tensor2tensor
| 0
|
6627080
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single stack of transformations with no masking.
Produces output aligned with inputs.
Configurable using hyperparameters to use some combination of convolutions,
attention, mixtures of experts, etc.
A good problem for this model is languagemodel_wiki_scramble1k50 .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import diet
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
ModeKeys = tf.estimator.ModeKeys # pylint: disable=invalid-name
def _should_preprocess(layer_type):
return layer_type not in ["timing", "pos_emb", "att_memory_efficient"]
def _should_postprocess(layer_type):
return layer_type not in ["timing", "pos_emb"]
@registry.register_model
class Aligned(t2t_model.T2TModel):
"""Attention net. See file docstring."""
@property
def use_body_sharded(self):
return True
def body_sharded(self, sharded_features):
# Remove dropout if not training
hparams = self._hparams
dp = self._data_parallelism
x = dp(tf.squeeze, sharded_features["inputs"], 2)
def preprocess(x):
return dp(common_layers.layer_preprocess, x, hparams)
def postprocess(x, y):
return dp(common_layers.layer_postprocess, x, y, hparams)
x = dp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)
extra_loss = 0.0
ffn_hidden_sizes = [int(s) for s in hparams.ffn_hidden_sizes.split(",")]
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
if hparams.mask_right:
def _bias(x):
return common_attention.attention_bias_lower_triangle(
common_layers.shape_list(x)[1])
bias = dp(_bias, x)
else:
bias = tf.zeros([1, 1, 1, 1])
if hparams.diet_experts:
hsize, = moe_hidden_sizes
def _diet_expert(x):
return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params())
expert_fn = _diet_expert
else:
expert_fn = expert_utils.ffn_expert_fn(
hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)
batch_coordinate = dp(get_batch_coordinate, x)
layers = hparams.layers.strip(",").split(",")
for layer_num, layer_type in enumerate(layers):
with tf.variable_scope("%s_%d" % (layer_type, layer_num)):
if _should_preprocess(layer_type):
x = preprocess(x)
if layer_type == "timing":
y = dp(common_attention.add_timing_signal_nd, x)
elif layer_type == "pos_emb":
y = dp(
common_attention.add_positional_embedding_nd,
x,
hparams.max_length,
name="pos_emb")
elif layer_type == "att":
y = dp(
common_attention.multihead_attention,
x,
None,
bias, # bias
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout)
elif layer_type == "att_grouped":
multiplicative_overhead = (
hparams.multiplicative_overhead if hparams.mode == ModeKeys.TRAIN
else hparams.multiplicative_overhead_eval)
y, loss = dp(
common_attention.grouped_attention_multihead,
x,
x,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
num_groups=hparams.attention_num_groups,
memory_target_density=hparams.memory_target_density,
multiplicative_overhead=multiplicative_overhead,
make_image_summary=hparams.attention_image_summary,
mask_right=hparams.mask_right,
)
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "att_memory_efficient":
assert hparams.layer_preprocess_sequence == "n"
y = dp(common_attention.multihead_self_attention_memory_efficient, x,
bias, hparams.num_heads)
elif layer_type == "att_local":
y = dp(
common_attention.multihead_attention,
x,
None,
None, # bias
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=("local_mask_right"
if hparams.mask_right else "local_unmasked"),
block_length=hparams.local_attention_window,
block_width=hparams.local_attention_window)
elif layer_type == "att_pseudolocal":
# This is an inefficient implementation of local attention, for the
# purpose of testing model quality.
def _pseudolocal_bias(x):
return common_attention.attention_bias_local(
common_layers.shape_list(x)[1], hparams.local_attention_window,
0 if hparams.mask_right else hparams.local_attention_window)
pseudolocal_bias = dp(_pseudolocal_bias, x)
y = dp(common_attention.multihead_attention, x, None,
pseudolocal_bias, hparams.attention_key_channels or
hparams.hidden_size, hparams.attention_value_channels or
hparams.hidden_size, hparams.hidden_size, hparams.num_heads,
hparams.attention_dropout)
elif layer_type == "att_local_expert":
y, loss = dp(
common_attention.local_expert_attention,
x,
k=hparams.attention_moe_k,
loss_coef=hparams.attention_load_balance,
attention_num_experts=hparams.attention_num_experts,
train=hparams.mode == ModeKeys.TRAIN,
batch_coordinate=batch_coordinate,
mask_right=hparams.mask_right,
split_batch=bool(hparams.attention_split_batch),
attention_kq_size=hparams.attention_kq_size,
attention_v_size=hparams.attention_v_size)
# TODO(avaswani, epot, noam): Do we need to divide by num shards ?
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "att_lsh":
if hparams.lsh_truncated:
attention_fn = common_attention.multihead_attention_sparse_truncated
else:
attention_fn = common_attention.multihead_attention_sparse_dot_prod
y, loss = dp(
attention_fn,
x,
None,
None, # Bias is computed inside
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
# Additional parameters
bi=[
common_attention.BatchInfo(
coordinates=batch_coordinate[i],
order=None, # No future mask
) for i in range(dp.n)
],
use_map_fn=False,
experts_params=dict(nb_hyperplanes=4,))
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "moe":
y, loss = expert_utils.distributed_moe(
dp,
self._ps_devices,
x,
hparams.mode == ModeKeys.TRAIN,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
extra_loss += loss
elif layer_type == "ffn":
y = dp(
expert_utils.ffn_expert_fn(hparams.hidden_size, ffn_hidden_sizes,
hparams.hidden_size),
dp(expert_utils.flatten_all_but_last, x))
y = dp(expert_utils.reshape_like, y, x)
elif layer_type == "conv":
y = dp(
common_layers.conv1d,
x,
hparams.hidden_size,
hparams.kernel_height,
activation=tf.nn.relu,
padding="SAME",
)
else:
assert False, "unknown sublayer %s" % layer_type
if _should_postprocess(layer_type):
x = postprocess(x, y)
else:
x = y
x = preprocess(x)
decoder_output = dp(tf.expand_dims, x, 2)
return decoder_output, extra_loss
def get_batch_coordinate(x):
"""Return a flat int32 tensor of shape [1, batch_size*length, 1]."""
# Compute the batch coordinate before flattening all batches
batch_coordinate = tf.expand_dims(
common_attention.coordinate_tensor(
common_layers.shape_list(x)[:-1], axis=0),
axis=-1)
return batch_coordinate
@registry.register_hparams
def aligned_base():
"""Set of hyperparameters.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 5000
hparams.max_length = 0
hparams.min_length_bucket = 1024
hparams.dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.label_smoothing = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.shared_embedding_and_softmax_weights = True
hparams.add_hparam("ffn_hidden_sizes", "2048") # Add new ones like this.
hparams.moe_num_experts = 32
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.add_hparam("layers", "timing," + "conv,att,ffn," * 2)
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
# moe params. local attention moe.
hparams.add_hparam("attention_local", False)
hparams.add_hparam("attention_moe_k", 2)
hparams.add_hparam("attention_num_experts", 16)
hparams.add_hparam("attention_split_batch", False)
# Key, query and value dimensions for the attention
hparams.add_hparam("attention_kq_size", 128)
hparams.add_hparam("attention_v_size", 256)
# Loss coef for load balancing
hparams.add_hparam("attention_load_balance", 2e-2)
hparams.add_hparam("diet_experts", False)
hparams.add_hparam("memory_efficient_ffn", False)
hparams.add_hparam("local_attention_window", 128)
hparams.add_hparam("attention_num_groups", 8)
hparams.add_hparam("memory_target_density", 2.0)
hparams.add_hparam("multiplicative_overhead", 1.25)
hparams.add_hparam("multiplicative_overhead_eval", 2.0)
hparams.add_hparam("attention_image_summary", True)
# LSH params
hparams.add_hparam("lsh_truncated", True)
# For testing right-masking.
# This is not implemented in all layers.
hparams.add_hparam("mask_right", False)
return hparams
@registry.register_hparams
def aligned_memory_efficient():
"""Use multihead_self_attention_memory_efficient.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.59
8.7 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.02
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_memory_efficient,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local_expert():
"""Use local_expert_attention.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.72
10.2 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.27
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_local_expert,ffn," * 2
return hparams
@registry.register_hparams
def aligned_grouped():
"""Use local_expert_attention.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.63
10.2 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.04
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_grouped,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local():
"""Use local attention code.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
12.8 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.08
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_local,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local_1k():
"""Use local attention code, attend to full sequence.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
7.5 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = aligned_local()
hparams.local_attention_window = 1024
return hparams
@registry.register_hparams
def aligned_pseudolocal():
"""Use a bias to simulate local attention. attention radius 128.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.06
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_pseudolocal,ffn," * 2
return hparams
@registry.register_hparams
def aligned_pseudolocal_256():
"""Use a bias to simulate local attention. attentio radius 256.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.56
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.05
Returns:
a hparams object
"""
hparams = aligned_pseudolocal()
hparams.local_attention_window = 256
return hparams
@registry.register_hparams
def aligned_no_timing():
"""No timing signal.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.75
12.3 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.39
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "conv,att,ffn," * 2
return hparams
@registry.register_hparams
def aligned_no_att():
"""No attention at all.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.89
20.8 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.70
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "conv,ffn," * 2
return hparams
@registry.register_hparams
def aligned_pos_emb():
"""positional embedding insead of timing signal.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.67
12.1 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "pos_emb," + "conv,att,ffn," * 2
return hparams
@registry.register_hparams
def aligned_moe():
"""mixture of experts instead of ffn.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.62
6.7 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 1.94
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att,moe," * 2
return hparams
@registry.register_hparams
def aligned_lsh():
"""Use multihead_attention_sparse_dot_prod.
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_lsh,ffn," * 2
return hparams
@registry.register_hparams
def aligned_8k():
"""version for languagemodel_wiki_scramble8k50.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.93
1.5 steps/sec on P100
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.batch_size = 8192
return hparams
@registry.register_hparams
def aligned_8k_grouped():
"""version for languagemodel_wiki_scramble8k50.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92
3.3 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15
Returns:
a hparams object
"""
hparams = aligned_grouped()
hparams.batch_size = 8192
# hparams.attention_image_summary = False
hparams.num_groups = 16
hparams.multiplicative_overhead = 1.1
return hparams
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Single stack of transformations with no masking.
Produces output aligned with inputs.
Configurable using hyperparameters to use some combination of convolutions,
attention, mixtures of experts, etc.
A good problem for this model is languagemodel_wiki_scramble1k50 .
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
from tensor2tensor.layers import common_attention
from tensor2tensor.layers import common_hparams
from tensor2tensor.layers import common_layers
from tensor2tensor.utils import diet
from tensor2tensor.utils import expert_utils
from tensor2tensor.utils import registry
from tensor2tensor.utils import t2t_model
import tensorflow as tf
ModeKeys = tf.estimator.ModeKeys # pylint: disable=invalid-name
def _should_preprocess(layer_type):
return layer_type not in ["timing", "pos_emb", "att_memory_efficient"]
def _should_postprocess(layer_type):
return layer_type not in ["timing", "pos_emb"]
@registry.register_model
class Aligned(t2t_model.T2TModel):
"""Attention net. See file docstring."""
@property
def use_body_sharded(self):
return True
def body_sharded(self, sharded_features):
# Remove dropout if not training
hparams = self._hparams
dp = self._data_parallelism
x = dp(tf.squeeze, sharded_features["inputs"], 2)
def preprocess(x):
return dp(common_layers.layer_preprocess, x, hparams)
def postprocess(x, y):
return dp(common_layers.layer_postprocess, x, y, hparams)
x = dp(tf.nn.dropout, x, 1.0 - hparams.layer_prepostprocess_dropout)
extra_loss = 0.0
ffn_hidden_sizes = [int(s) for s in hparams.ffn_hidden_sizes.split(",")]
moe_hidden_sizes = [int(s) for s in hparams.moe_hidden_sizes.split(",")]
if hparams.mask_right:
def _bias(x):
return common_attention.attention_bias_lower_triangle(
common_layers.shape_list(x)[1])
bias = dp(_bias, x)
else:
bias = tf.zeros([1, 1, 1, 1])
if hparams.diet_experts:
hsize, = moe_hidden_sizes
def _diet_expert(x):
return diet.diet_expert(x, hsize, diet.diet_adam_optimizer_params())
expert_fn = _diet_expert
else:
expert_fn = expert_utils.ffn_expert_fn(
hparams.hidden_size, moe_hidden_sizes, hparams.hidden_size)
batch_coordinate = dp(get_batch_coordinate, x)
layers = hparams.layers.strip(",").split(",")
for layer_num, layer_type in enumerate(layers):
with tf.variable_scope("%s_%d" % (layer_type, layer_num)):
if _should_preprocess(layer_type):
x = preprocess(x)
if layer_type == "timing":
y = dp(common_attention.add_timing_signal_nd, x)
elif layer_type == "pos_emb":
y = dp(
common_attention.add_positional_embedding_nd,
x,
hparams.max_length,
name="pos_emb")
elif layer_type == "att":
y = dp(
common_attention.multihead_attention,
x,
None,
bias, # bias
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout)
elif layer_type == "att_grouped":
multiplicative_overhead = (
hparams.multiplicative_overhead if hparams.mode == ModeKeys.TRAIN
else hparams.multiplicative_overhead_eval)
y, loss = dp(
common_attention.grouped_attention_multihead,
x,
x,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
num_groups=hparams.attention_num_groups,
memory_target_density=hparams.memory_target_density,
multiplicative_overhead=multiplicative_overhead,
make_image_summary=hparams.attention_image_summary,
mask_right=hparams.mask_right,
)
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "att_memory_efficient":
assert hparams.layer_preprocess_sequence == "n"
y = dp(common_attention.multihead_self_attention_memory_efficient, x,
bias, hparams.num_heads)
elif layer_type == "att_local":
y = dp(
common_attention.multihead_attention,
x,
None,
None, # bias
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=("local_mask_right"
if hparams.mask_right else "local_unmasked"),
block_length=hparams.local_attention_window,
block_width=hparams.local_attention_window)
elif layer_type == "att_pseudolocal":
# This is an inefficient implementation of local attention, for the
# purpose of testing model quality.
def _pseudolocal_bias(x):
return common_attention.attention_bias_local(
common_layers.shape_list(x)[1], hparams.local_attention_window,
0 if hparams.mask_right else hparams.local_attention_window)
pseudolocal_bias = dp(_pseudolocal_bias, x)
y = dp(common_attention.multihead_attention, x, None,
pseudolocal_bias, hparams.attention_key_channels or
hparams.hidden_size, hparams.attention_value_channels or
hparams.hidden_size, hparams.hidden_size, hparams.num_heads,
hparams.attention_dropout)
elif layer_type == "att_local_expert":
y, loss = dp(
common_attention.local_expert_attention,
x,
k=hparams.attention_moe_k,
loss_coef=hparams.attention_load_balance,
attention_num_experts=hparams.attention_num_experts,
train=hparams.mode == ModeKeys.TRAIN,
batch_coordinate=batch_coordinate,
mask_right=hparams.mask_right,
split_batch=bool(hparams.attention_split_batch),
attention_kq_size=hparams.attention_kq_size,
attention_v_size=hparams.attention_v_size)
# TODO(avaswani, epot, noam): Do we need to divide by num shards ?
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "att_lsh":
if hparams.lsh_truncated:
attention_fn = common_attention.multihead_attention_sparse_truncated
else:
attention_fn = common_attention.multihead_attention_sparse_dot_prod
y, loss = dp(
attention_fn,
x,
None,
None, # Bias is computed inside
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
# Additional parameters
bi=[
common_attention.BatchInfo(
coordinates=batch_coordinate[i],
order=None, # No future mask
) for i in range(dp.n)
],
use_map_fn=False,
experts_params=dict(nb_hyperplanes=4,))
extra_loss += tf.add_n(loss) / dp.n
elif layer_type == "moe":
y, loss = expert_utils.distributed_moe(
dp,
self._ps_devices,
x,
hparams.mode == ModeKeys.TRAIN,
input_size=hparams.hidden_size,
expert_fn=expert_fn,
num_experts=hparams.moe_num_experts,
k=hparams.moe_k,
loss_coef=hparams.moe_loss_coef)
extra_loss += loss
elif layer_type == "ffn":
y = dp(
expert_utils.ffn_expert_fn(hparams.hidden_size, ffn_hidden_sizes,
hparams.hidden_size),
dp(expert_utils.flatten_all_but_last, x))
y = dp(expert_utils.reshape_like, y, x)
elif layer_type == "conv":
y = dp(
common_layers.conv1d,
x,
hparams.hidden_size,
hparams.kernel_height,
activation=tf.nn.relu,
padding="SAME",
)
else:
assert False, "unknown sublayer %s" % layer_type
if _should_postprocess(layer_type):
x = postprocess(x, y)
else:
x = y
x = preprocess(x)
decoder_output = dp(tf.expand_dims, x, 2)
return decoder_output, extra_loss
def get_batch_coordinate(x):
"""Return a flat int32 tensor of shape [1, batch_size*length, 1]."""
# Compute the batch coordinate before flattening all batches
batch_coordinate = tf.expand_dims(
common_attention.coordinate_tensor(
common_layers.shape_list(x)[:-1], axis=0),
axis=-1)
return batch_coordinate
@registry.register_hparams
def aligned_base():
"""Set of hyperparameters.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = common_hparams.basic_params1()
hparams.hidden_size = 512
hparams.batch_size = 5000
hparams.max_length = 0
hparams.min_length_bucket = 1024
hparams.dropout = 0.0
hparams.layer_prepostprocess_dropout = 0.0
hparams.label_smoothing = 0.0
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 2000
hparams.initializer_gain = 1.0
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.shared_embedding_and_softmax_weights = True
hparams.add_hparam("ffn_hidden_sizes", "2048") # Add new ones like this.
hparams.moe_num_experts = 32
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.add_hparam("layers", "timing," + "conv,att,ffn," * 2)
# attention-related flags
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("pos", "timing") # timing, none
# moe params. local attention moe.
hparams.add_hparam("attention_local", False)
hparams.add_hparam("attention_moe_k", 2)
hparams.add_hparam("attention_num_experts", 16)
hparams.add_hparam("attention_split_batch", False)
# Key, query and value dimensions for the attention
hparams.add_hparam("attention_kq_size", 128)
hparams.add_hparam("attention_v_size", 256)
# Loss coef for load balancing
hparams.add_hparam("attention_load_balance", 2e-2)
hparams.add_hparam("diet_experts", False)
hparams.add_hparam("memory_efficient_ffn", False)
hparams.add_hparam("local_attention_window", 128)
hparams.add_hparam("attention_num_groups", 8)
hparams.add_hparam("memory_target_density", 2.0)
hparams.add_hparam("multiplicative_overhead", 1.25)
hparams.add_hparam("multiplicative_overhead_eval", 2.0)
hparams.add_hparam("attention_image_summary", True)
# LSH params
hparams.add_hparam("lsh_truncated", True)
# For testing right-masking.
# This is not implemented in all layers.
hparams.add_hparam("mask_right", False)
return hparams
@registry.register_hparams
def aligned_memory_efficient():
"""Use multihead_self_attention_memory_efficient.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.59
8.7 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.02
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_memory_efficient,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local_expert():
"""Use local_expert_attention.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.72
10.2 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.27
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_local_expert,ffn," * 2
return hparams
@registry.register_hparams
def aligned_grouped():
"""Use local_expert_attention.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.63
10.2 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.04
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_grouped,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local():
"""Use local attention code.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
12.8 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.08
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_local,ffn," * 2
return hparams
@registry.register_hparams
def aligned_local_1k():
"""Use local attention code, attend to full sequence.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
7.5 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = aligned_local()
hparams.local_attention_window = 1024
return hparams
@registry.register_hparams
def aligned_pseudolocal():
"""Use a bias to simulate local attention. attention radius 128.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.06
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_pseudolocal,ffn," * 2
return hparams
@registry.register_hparams
def aligned_pseudolocal_256():
"""Use a bias to simulate local attention. attentio radius 256.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.56
12.0 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.05
Returns:
a hparams object
"""
hparams = aligned_pseudolocal()
hparams.local_attention_window = 256
return hparams
@registry.register_hparams
def aligned_no_timing():
"""No timing signal.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.75
12.3 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.39
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "conv,att,ffn," * 2
return hparams
@registry.register_hparams
def aligned_no_att():
"""No attention at all.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.89
20.8 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.70
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "conv,ffn," * 2
return hparams
@registry.register_hparams
def aligned_pos_emb():
"""positional embedding insead of timing signal.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.67
12.1 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "pos_emb," + "conv,att,ffn," * 2
return hparams
@registry.register_hparams
def aligned_moe():
"""mixture of experts instead of ffn.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.62
6.7 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 1.94
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att,moe," * 2
return hparams
@registry.register_hparams
def aligned_lsh():
"""Use multihead_attention_sparse_dot_prod.
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.layers = "timing," + "conv,att_lsh,ffn," * 2
return hparams
@registry.register_hparams
def aligned_8k():
"""version for languagemodel_wiki_scramble8k50.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.93
1.5 steps/sec on P100
Returns:
a hparams object
"""
hparams = aligned_base()
hparams.batch_size = 8192
return hparams
@registry.register_hparams
def aligned_8k_grouped():
"""version for languagemodel_wiki_scramble8k50.
languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92
3.3 steps/sec on P100
8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15
Returns:
a hparams object
"""
hparams = aligned_grouped()
hparams.batch_size = 8192
# hparams.attention_image_summary = False
hparams.num_groups = 16
hparams.multiplicative_overhead = 1.1
return hparams
|
en
| 0.654219
|
# coding=utf-8 # Copyright 2018 The Tensor2Tensor Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Single stack of transformations with no masking. Produces output aligned with inputs. Configurable using hyperparameters to use some combination of convolutions, attention, mixtures of experts, etc. A good problem for this model is languagemodel_wiki_scramble1k50 . # Dependency imports # pylint: disable=invalid-name Attention net. See file docstring. # Remove dropout if not training # bias # bias # This is an inefficient implementation of local attention, for the # purpose of testing model quality. # TODO(avaswani, epot, noam): Do we need to divide by num shards ? # Bias is computed inside # Additional parameters # No future mask Return a flat int32 tensor of shape [1, batch_size*length, 1]. # Compute the batch coordinate before flattening all batches Set of hyperparameters. languagemodel_wiki_scramble1k50, 1gpu, 7k steps (10min): log(ppl)_eval = 2.60 12.0 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00 Returns: a hparams object # i.e. no gradient clipping # Add new ones like this. # attention-related flags # All hyperparameters ending in "dropout" are automatically set to 0.0 # when not in training mode. # timing, none # moe params. local attention moe. # Key, query and value dimensions for the attention # Loss coef for load balancing # LSH params # For testing right-masking. # This is not implemented in all layers. Use multihead_self_attention_memory_efficient. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.59 8.7 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.02 Returns: a hparams object Use local_expert_attention. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.72 10.2 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.27 Returns: a hparams object Use local_expert_attention. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.63 10.2 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.04 Returns: a hparams object Use local attention code. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57 12.8 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.08 Returns: a hparams object Use local attention code, attend to full sequence. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57 7.5 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00 Returns: a hparams object Use a bias to simulate local attention. attention radius 128. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.57 12.0 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.06 Returns: a hparams object Use a bias to simulate local attention. attentio radius 256. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.56 12.0 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.05 Returns: a hparams object No timing signal. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.75 12.3 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.39 Returns: a hparams object No attention at all. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.89 20.8 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.70 Returns: a hparams object positional embedding insead of timing signal. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.67 12.1 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.00 Returns: a hparams object mixture of experts instead of ffn. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.62 6.7 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 1.94 Returns: a hparams object Use multihead_attention_sparse_dot_prod. Returns: a hparams object version for languagemodel_wiki_scramble8k50. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.93 1.5 steps/sec on P100 Returns: a hparams object version for languagemodel_wiki_scramble8k50. languagemodel_wiki_scramble1k50, 1gpu, 7k steps: log(ppl)_eval = 2.92 3.3 steps/sec on P100 8gpu (8x batch), 7k steps: log(ppl)_eval = 2.15 Returns: a hparams object # hparams.attention_image_summary = False
| 1.673375
| 2
|
podcast-backend/src/app/pcasts/controllers/search_series_controller.py
|
cuappdev/archives
| 0
|
6627081
|
from . import *
class SearchSeriesController(AppDevController):
def get_path(self):
return '/search/series/<query>/'
def get_methods(self):
return ['GET']
@authorize
def content(self, **kwargs):
user_id = kwargs.get('user').id
search_name = request.view_args['query']
offset = request.args['offset']
max_search = request.args['max']
possible_series = series_dao.\
search_series(search_name, offset, max_search, user_id)
return {'series': [series_schema.dump(s).data for s in possible_series]}
|
from . import *
class SearchSeriesController(AppDevController):
def get_path(self):
return '/search/series/<query>/'
def get_methods(self):
return ['GET']
@authorize
def content(self, **kwargs):
user_id = kwargs.get('user').id
search_name = request.view_args['query']
offset = request.args['offset']
max_search = request.args['max']
possible_series = series_dao.\
search_series(search_name, offset, max_search, user_id)
return {'series': [series_schema.dump(s).data for s in possible_series]}
|
none
| 1
| 2.072625
| 2
|
|
scripts/version.py
|
framawiki/pywikibot
| 0
|
6627082
|
<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script to determine the Pywikibot version (tag, revision and date)."""
#
# (C) Pywikibot team, 2007-2020
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import sys
import pywikibot
from pywikibot.version import getversion, get_toolforge_hostname
try:
import requests
except ImportError:
class DummyRequests:
"""Fake requests instance."""
__version__ = 'n/a'
requests = DummyRequests()
WMF_CACERT = '<KEY>'
def check_environ(environ_name) -> None:
"""Print environment variable."""
pywikibot.output('{0}: {1}'.format(environ_name,
os.environ.get(environ_name,
'Not set')))
def main(*args) -> None:
"""Print pywikibot version and important settings."""
pywikibot.output('Pywikibot: ' + getversion())
pywikibot.output('Release version: ' + pywikibot.__version__)
pywikibot.output('requests version: ' + requests.__version__)
has_wikimedia_cert = False
if (not hasattr(requests, 'certs')
or not hasattr(requests.certs, 'where')
or not callable(requests.certs.where)):
pywikibot.output(' cacerts: not defined')
elif not os.path.isfile(requests.certs.where()):
pywikibot.output(' cacerts: {} (missing)'.format(
requests.certs.where()))
else:
pywikibot.output(' cacerts: ' + requests.certs.where())
with codecs.open(requests.certs.where(), 'r', 'utf-8') as cert_file:
text = cert_file.read()
if WMF_CACERT in text:
has_wikimedia_cert = True
pywikibot.output(' certificate test: {}'
.format(('ok' if has_wikimedia_cert else 'not ok')))
if not has_wikimedia_cert:
pywikibot.output(' Please reinstall requests!')
pywikibot.output('Python: ' + sys.version)
toolforge_env_hostname = get_toolforge_hostname()
if toolforge_env_hostname:
pywikibot.output('Toolforge hostname: ' + toolforge_env_hostname)
check_environ('PYWIKIBOT_DIR')
check_environ('PYWIKIBOT_DIR_PWB')
check_environ('PYWIKIBOT_NO_USER_CONFIG')
pywikibot.output('Config base dir: ' + pywikibot.config2.base_dir)
for family, usernames in pywikibot.config2.usernames.items():
if not usernames:
continue
pywikibot.output('Usernames for family "{0}":'.format(family))
for lang, username in usernames.items():
pywikibot.output('\t{0}: {1}'.format(lang, username))
if __name__ == '__main__':
main()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script to determine the Pywikibot version (tag, revision and date)."""
#
# (C) Pywikibot team, 2007-2020
#
# Distributed under the terms of the MIT license.
#
import codecs
import os
import sys
import pywikibot
from pywikibot.version import getversion, get_toolforge_hostname
try:
import requests
except ImportError:
class DummyRequests:
"""Fake requests instance."""
__version__ = 'n/a'
requests = DummyRequests()
WMF_CACERT = '<KEY>'
def check_environ(environ_name) -> None:
"""Print environment variable."""
pywikibot.output('{0}: {1}'.format(environ_name,
os.environ.get(environ_name,
'Not set')))
def main(*args) -> None:
"""Print pywikibot version and important settings."""
pywikibot.output('Pywikibot: ' + getversion())
pywikibot.output('Release version: ' + pywikibot.__version__)
pywikibot.output('requests version: ' + requests.__version__)
has_wikimedia_cert = False
if (not hasattr(requests, 'certs')
or not hasattr(requests.certs, 'where')
or not callable(requests.certs.where)):
pywikibot.output(' cacerts: not defined')
elif not os.path.isfile(requests.certs.where()):
pywikibot.output(' cacerts: {} (missing)'.format(
requests.certs.where()))
else:
pywikibot.output(' cacerts: ' + requests.certs.where())
with codecs.open(requests.certs.where(), 'r', 'utf-8') as cert_file:
text = cert_file.read()
if WMF_CACERT in text:
has_wikimedia_cert = True
pywikibot.output(' certificate test: {}'
.format(('ok' if has_wikimedia_cert else 'not ok')))
if not has_wikimedia_cert:
pywikibot.output(' Please reinstall requests!')
pywikibot.output('Python: ' + sys.version)
toolforge_env_hostname = get_toolforge_hostname()
if toolforge_env_hostname:
pywikibot.output('Toolforge hostname: ' + toolforge_env_hostname)
check_environ('PYWIKIBOT_DIR')
check_environ('PYWIKIBOT_DIR_PWB')
check_environ('PYWIKIBOT_NO_USER_CONFIG')
pywikibot.output('Config base dir: ' + pywikibot.config2.base_dir)
for family, usernames in pywikibot.config2.usernames.items():
if not usernames:
continue
pywikibot.output('Usernames for family "{0}":'.format(family))
for lang, username in usernames.items():
pywikibot.output('\t{0}: {1}'.format(lang, username))
if __name__ == '__main__':
main()
|
en
| 0.654394
|
#!/usr/bin/python # -*- coding: utf-8 -*- Script to determine the Pywikibot version (tag, revision and date). # # (C) Pywikibot team, 2007-2020 # # Distributed under the terms of the MIT license. # Fake requests instance. Print environment variable. Print pywikibot version and important settings.
| 2.60877
| 3
|
hexrd/ui/color_map_editor.py
|
bnmajor/hexrdgui
| 0
|
6627083
|
<reponame>bnmajor/hexrdgui
import copy
from matplotlib import cm
import matplotlib.colors
import numpy as np
import hexrd.ui.constants
from hexrd.ui.ui_loader import UiLoader
class ColorMapEditor:
def __init__(self, image_object, parent=None):
# The image_object can be any object with the following functions:
# 1. set_cmap: a function to set the cmap on the image
# 2. set_norm: a function to set the norm on the image
self.image_object = image_object
loader = UiLoader()
self.ui = loader.load_file('color_map_editor.ui', parent)
self.bounds = (0, 16384)
self.load_cmaps()
self.setup_connections()
def load_cmaps(self):
cmaps = sorted(i[:-2] for i in dir(cm) if i.endswith('_r'))
self.ui.color_map.addItems(cmaps)
# Set the combobox to be the default
self.ui.color_map.setCurrentText(hexrd.ui.constants.DEFAULT_CMAP)
def setup_connections(self):
self.ui.maximum.valueChanged.connect(self.update_mins_and_maxes)
self.ui.minimum.valueChanged.connect(self.update_mins_and_maxes)
self.ui.color_map.currentIndexChanged.connect(self.update_cmap)
self.ui.reverse.toggled.connect(self.update_cmap)
self.ui.show_under.toggled.connect(self.update_cmap)
self.ui.show_over.toggled.connect(self.update_cmap)
self.ui.maximum.valueChanged.connect(self.update_norm)
self.ui.minimum.valueChanged.connect(self.update_norm)
self.ui.reset_range.pressed.connect(self.reset_range)
self.ui.log_scale.toggled.connect(self.update_norm)
def update_mins_and_maxes(self):
# We can't do this in PySide2 for some reason:
# self.ui.maximum.valueChanged.connect(self.ui.minimum.setMaximum)
# self.ui.minimum.valueChanged.connect(self.ui.maximum.setMinimum)
self.ui.maximum.setMinimum(self.ui.minimum.value())
self.ui.minimum.setMaximum(self.ui.maximum.value())
def block_updates(self, blocked):
self.updates_blocked = blocked
def update_bounds(self, data):
if hasattr(self, 'updates_blocked') and self.updates_blocked:
# We don't want to adjust the bounds
return
bounds = self.percentile_range(data)
self.ui.minimum.setValue(bounds[0])
self.ui.minimum.setToolTip('Min: ' + str(bounds[0]))
self.ui.maximum.setValue(bounds[1])
self.ui.maximum.setToolTip('Max: ' + str(bounds[1]))
self.bounds = bounds
@staticmethod
def percentile_range(data, low=69.0, high=99.9):
if isinstance(data, dict):
values = data.values()
elif not isinstance(data, (list, tuple)):
values = [data]
l = min([np.nanpercentile(v, low) for v in values])
h = min([np.nanpercentile(v, high) for v in values])
if h - l < 5:
h = l + 5
print('Range to be used: ', l, ' -> ', h)
return l, h
def reset_range(self):
if hasattr(self, 'updates_blocked') and self.updates_blocked:
# We don't want to adjust the range
return
if self.ui.minimum.maximum() < self.bounds[0]:
# Make sure we can actually set the value...
self.ui.minimum.setMaximum(self.bounds[0])
self.ui.minimum.setValue(self.bounds[0])
self.ui.maximum.setValue(self.bounds[1])
def update_cmap(self):
# Get the Colormap object from the name
cmap = cm.get_cmap(self.ui.color_map.currentText())
if self.ui.reverse.isChecked():
cmap = cmap.reversed()
# For set_under() and set_over(), we don't want to edit the
# original color map, so make a copy
cmap = copy.copy(cmap)
if self.ui.show_under.isChecked():
cmap.set_under('b')
if self.ui.show_over.isChecked():
cmap.set_over('r')
self.image_object.set_cmap(cmap)
def update_norm(self):
min = self.ui.minimum.value()
max = self.ui.maximum.value()
if self.ui.log_scale.isChecked():
# The min cannot be 0 here, or this will raise an exception
min = 1.e-8 if min < 1.e-8 else min
norm = matplotlib.colors.LogNorm(vmin=min, vmax=max)
else:
norm = matplotlib.colors.Normalize(vmin=min, vmax=max)
self.image_object.set_norm(norm)
|
import copy
from matplotlib import cm
import matplotlib.colors
import numpy as np
import hexrd.ui.constants
from hexrd.ui.ui_loader import UiLoader
class ColorMapEditor:
def __init__(self, image_object, parent=None):
# The image_object can be any object with the following functions:
# 1. set_cmap: a function to set the cmap on the image
# 2. set_norm: a function to set the norm on the image
self.image_object = image_object
loader = UiLoader()
self.ui = loader.load_file('color_map_editor.ui', parent)
self.bounds = (0, 16384)
self.load_cmaps()
self.setup_connections()
def load_cmaps(self):
cmaps = sorted(i[:-2] for i in dir(cm) if i.endswith('_r'))
self.ui.color_map.addItems(cmaps)
# Set the combobox to be the default
self.ui.color_map.setCurrentText(hexrd.ui.constants.DEFAULT_CMAP)
def setup_connections(self):
self.ui.maximum.valueChanged.connect(self.update_mins_and_maxes)
self.ui.minimum.valueChanged.connect(self.update_mins_and_maxes)
self.ui.color_map.currentIndexChanged.connect(self.update_cmap)
self.ui.reverse.toggled.connect(self.update_cmap)
self.ui.show_under.toggled.connect(self.update_cmap)
self.ui.show_over.toggled.connect(self.update_cmap)
self.ui.maximum.valueChanged.connect(self.update_norm)
self.ui.minimum.valueChanged.connect(self.update_norm)
self.ui.reset_range.pressed.connect(self.reset_range)
self.ui.log_scale.toggled.connect(self.update_norm)
def update_mins_and_maxes(self):
# We can't do this in PySide2 for some reason:
# self.ui.maximum.valueChanged.connect(self.ui.minimum.setMaximum)
# self.ui.minimum.valueChanged.connect(self.ui.maximum.setMinimum)
self.ui.maximum.setMinimum(self.ui.minimum.value())
self.ui.minimum.setMaximum(self.ui.maximum.value())
def block_updates(self, blocked):
self.updates_blocked = blocked
def update_bounds(self, data):
if hasattr(self, 'updates_blocked') and self.updates_blocked:
# We don't want to adjust the bounds
return
bounds = self.percentile_range(data)
self.ui.minimum.setValue(bounds[0])
self.ui.minimum.setToolTip('Min: ' + str(bounds[0]))
self.ui.maximum.setValue(bounds[1])
self.ui.maximum.setToolTip('Max: ' + str(bounds[1]))
self.bounds = bounds
@staticmethod
def percentile_range(data, low=69.0, high=99.9):
if isinstance(data, dict):
values = data.values()
elif not isinstance(data, (list, tuple)):
values = [data]
l = min([np.nanpercentile(v, low) for v in values])
h = min([np.nanpercentile(v, high) for v in values])
if h - l < 5:
h = l + 5
print('Range to be used: ', l, ' -> ', h)
return l, h
def reset_range(self):
if hasattr(self, 'updates_blocked') and self.updates_blocked:
# We don't want to adjust the range
return
if self.ui.minimum.maximum() < self.bounds[0]:
# Make sure we can actually set the value...
self.ui.minimum.setMaximum(self.bounds[0])
self.ui.minimum.setValue(self.bounds[0])
self.ui.maximum.setValue(self.bounds[1])
def update_cmap(self):
# Get the Colormap object from the name
cmap = cm.get_cmap(self.ui.color_map.currentText())
if self.ui.reverse.isChecked():
cmap = cmap.reversed()
# For set_under() and set_over(), we don't want to edit the
# original color map, so make a copy
cmap = copy.copy(cmap)
if self.ui.show_under.isChecked():
cmap.set_under('b')
if self.ui.show_over.isChecked():
cmap.set_over('r')
self.image_object.set_cmap(cmap)
def update_norm(self):
min = self.ui.minimum.value()
max = self.ui.maximum.value()
if self.ui.log_scale.isChecked():
# The min cannot be 0 here, or this will raise an exception
min = 1.e-8 if min < 1.e-8 else min
norm = matplotlib.colors.LogNorm(vmin=min, vmax=max)
else:
norm = matplotlib.colors.Normalize(vmin=min, vmax=max)
self.image_object.set_norm(norm)
|
en
| 0.747344
|
# The image_object can be any object with the following functions: # 1. set_cmap: a function to set the cmap on the image # 2. set_norm: a function to set the norm on the image # Set the combobox to be the default # We can't do this in PySide2 for some reason: # self.ui.maximum.valueChanged.connect(self.ui.minimum.setMaximum) # self.ui.minimum.valueChanged.connect(self.ui.maximum.setMinimum) # We don't want to adjust the bounds # We don't want to adjust the range # Make sure we can actually set the value... # Get the Colormap object from the name # For set_under() and set_over(), we don't want to edit the # original color map, so make a copy # The min cannot be 0 here, or this will raise an exception
| 2.575002
| 3
|
dbpedia_links_rating/rating/views.py
|
RPOD/DBpedia-Links-Rating
| 0
|
6627084
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.views.generic import TemplateView, UpdateView, DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.shortcuts import redirect
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse # NOQA
from django.shortcuts import get_object_or_404
from .models import Link, Rating
from .forms import CreateRatingForm
class Rating(LoginRequiredMixin, DetailView):
template_name = 'rating/rating_index.html'
model = Link
def create(request):
content_type_id = request.POST.get('content_type_id')
object_id = request.POST.get('object_id')
ct = ContentType.objects.get_for_id(content_type_id)
return_url = request.POST.get('return_url')
try:
# Current user HAS rated this object
# Updates his rating and total score
rating = Rating.objects.get(author=request.user, object_id=object_id, content_type_id=content_type_id)
rating.vote = request.POST.get('rating')
rating.save()
votes = Rating.objects.filter(object_id=object_id, content_type_id=content_type_id)
total_score = Rating.calculate_score(votes)
score = Link.objects.get(object_id=object_id, content_type_id=content_type_id)
score.total_score = total_score
score.save(force_update=True)
messages.success(request, 'Score updated succesfully')
except Rating.DoesNotExist:
# Current user has NOT rated this object
# Saves first new rating
rating = Rating()
rating.content_type = ct
rating.object_id = object_id
rating.vote = request.POST.get('rating')
rating.author = request.user
rating.save()
# Saves first new total score, same value as new rating
try:
score = Link.objects.get(object_id=object_id, content_type_id=content_type_id)
votes = Rating.objects.filter(object_id=object_id, content_type_id=content_type_id)
total_score = Rating.calculate_score(votes)
score.total_score = total_score
score.save(force_update=True)
messages.success(request, 'Score updated succesfully')
except Link.DoesNotExist:
messages.error(request, 'Link does not exist')
return redirect(return_url)
def calculate_score(rating):
score = []
for vote in rating:
score.append(vote.vote)
score = [item for item in score if item != 0]
n = len(score)
if n == 0:
n = 1
score = float(sum(score))
score = float(score / n)
score = round(score, 1)
return score
def random_link(self):
return get_object_or_404(Link, Link.randomComplete())
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.views.generic import TemplateView, UpdateView, DetailView
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.shortcuts import redirect
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse # NOQA
from django.shortcuts import get_object_or_404
from .models import Link, Rating
from .forms import CreateRatingForm
class Rating(LoginRequiredMixin, DetailView):
template_name = 'rating/rating_index.html'
model = Link
def create(request):
content_type_id = request.POST.get('content_type_id')
object_id = request.POST.get('object_id')
ct = ContentType.objects.get_for_id(content_type_id)
return_url = request.POST.get('return_url')
try:
# Current user HAS rated this object
# Updates his rating and total score
rating = Rating.objects.get(author=request.user, object_id=object_id, content_type_id=content_type_id)
rating.vote = request.POST.get('rating')
rating.save()
votes = Rating.objects.filter(object_id=object_id, content_type_id=content_type_id)
total_score = Rating.calculate_score(votes)
score = Link.objects.get(object_id=object_id, content_type_id=content_type_id)
score.total_score = total_score
score.save(force_update=True)
messages.success(request, 'Score updated succesfully')
except Rating.DoesNotExist:
# Current user has NOT rated this object
# Saves first new rating
rating = Rating()
rating.content_type = ct
rating.object_id = object_id
rating.vote = request.POST.get('rating')
rating.author = request.user
rating.save()
# Saves first new total score, same value as new rating
try:
score = Link.objects.get(object_id=object_id, content_type_id=content_type_id)
votes = Rating.objects.filter(object_id=object_id, content_type_id=content_type_id)
total_score = Rating.calculate_score(votes)
score.total_score = total_score
score.save(force_update=True)
messages.success(request, 'Score updated succesfully')
except Link.DoesNotExist:
messages.error(request, 'Link does not exist')
return redirect(return_url)
def calculate_score(rating):
score = []
for vote in rating:
score.append(vote.vote)
score = [item for item in score if item != 0]
n = len(score)
if n == 0:
n = 1
score = float(sum(score))
score = float(score / n)
score = round(score, 1)
return score
def random_link(self):
return get_object_or_404(Link, Link.randomComplete())
|
en
| 0.941722
|
# -*- coding: utf-8 -*- # NOQA # Current user HAS rated this object # Updates his rating and total score # Current user has NOT rated this object # Saves first new rating # Saves first new total score, same value as new rating
| 1.992928
| 2
|
marvelo_adapter.py
|
timokau/task-placement
| 1
|
6627085
|
"""Imports marvelo problems and results from csv"""
import os
import math
import re
import csv
import numpy as np
from infrastructure import InfrastructureNetwork
from overlay import OverlayNetwork
from embedding import PartialEmbedding
def csv_to_list(csvfile, sep=","):
"""Parses a csv file into a 2d array, ignoring the header"""
with open(csvfile, "r") as f:
lines = list(csv.reader(f, delimiter=sep))
# skip header
return lines[1:]
def parse_overlay(blocks_file, links_file, datarate):
"""Reads an overlay in MARVELOs format from file"""
# read the files
names = []
requirements = []
links = []
for (_id, srcapp, _port, demand) in csv_to_list(blocks_file):
names.append(srcapp)
requirements.append(float(demand))
for (_id, srcapp, _srcport, dstapp) in csv_to_list(links_file):
links.append((srcapp, dstapp))
block_specifications = list(zip(names, requirements))
# construct the overlay from the gathered info
overlay = OverlayNetwork()
for (name, requirement) in block_specifications[:1]:
overlay.add_source(requirement, datarate, name)
for (name, requirement) in block_specifications[1:-1]:
overlay.add_intermediate(requirement, datarate, name)
for (name, requirement) in block_specifications[-1:]:
overlay.set_sink(requirement, datarate, name)
for (source, target) in links:
overlay.add_link(source, target)
return overlay
def parse_infra(
nodes_file,
sink_source_mapping,
positions_file,
source_seed,
transmit_power_dbm,
):
"""Reads an infrastructure definition in MARVELO format from csvs"""
# read the files
names = []
capacities = []
positions = []
for (_id, name, capacity) in csv_to_list(nodes_file):
names.append(name)
capacities.append(float(capacity))
for csvline in csv_to_list(positions_file, sep=";"):
positions.append([float(pos) for pos in csvline[1:]])
# the positions are saved in a weird format, probably a mistake when
# saving
positions = np.transpose(positions)
specs = list(zip(names, capacities, positions))
(sink_idx, source_idx) = sink_source_mapping[(source_seed, len(specs))]
if source_idx == sink_idx:
return None
# make sure source is always first, sink always last
specs[0], specs[source_idx] = specs[source_idx], specs[0]
specs[-1], specs[sink_idx] = specs[sink_idx], specs[-1]
# construct the infrastructure from the gathered info
infra = InfrastructureNetwork(bandwidth=1, noise_floor_dbm=-30)
for (name, capacity, pos) in specs[:1]:
infra.add_source(pos, transmit_power_dbm, capacity, name)
for (name, capacity, pos) in specs[1:-1]:
infra.add_intermediate(pos, transmit_power_dbm, capacity, name)
for (name, capacity, pos) in specs[-1:]:
infra.set_sink(pos, transmit_power_dbm, capacity, name)
return infra
# pylint: disable=too-many-arguments
def parse_embedding(
nodes_file,
sink_source_mapping,
positions_file,
source_seed,
blocks_file,
links_file,
transmit_power_dbm,
datarate,
):
"""Reads a problem instance in MARVELO format from csv files"""
infra = parse_infra(
nodes_file,
sink_source_mapping,
positions_file,
source_seed,
transmit_power_dbm,
)
if infra is None:
return None
overlay = parse_overlay(blocks_file, links_file, datarate)
# otherwise the mapping wouldn't be specified
assert len(overlay.sources) == 1
assert len(infra.sources) == 1
overlay_source = list(overlay.sources)[0]
infra_source = list(infra.sources)[0]
source_mapping = [(overlay_source, infra_source)]
return PartialEmbedding(infra, overlay, source_mapping)
def load_from_dir(basedir):
"""Loads a set of results from a directory with an assumed format"""
# at bitrate 1 equivalent to a linear SINR threshold of 20
datarate = math.log(1 + 20, 2)
transmit_power_dbm = 30 # == 1W
results_dir = f"{basedir}/resultsTxt"
param_dir = f"{basedir}/param"
result_name_pat = re.compile(r"n(\d+)b(\d+)s(\d+).txt")
# pre-generated with the python2 RNG by simply setting the seed and
# then taking random.choice(range(nr_nodes)) twice
sink_source_mapping = dict()
sink_source_file = f"{basedir}/sink_source_mapping.csv"
for (seed, nodes, sink_idx, source_idx) in csv_to_list(sink_source_file):
sink_source_mapping[(int(seed), int(nodes))] = (
int(sink_idx),
int(source_idx),
)
result = []
for result_file in os.listdir(results_dir):
match = result_name_pat.match(result_file)
nodes = int(match.group(1))
blocks = int(match.group(2))
seed = int(match.group(3))
marvelo_result = int(
float(open(f"{results_dir}/{result_file}").read())
)
info = (nodes, blocks, seed)
nodes_file = f"{param_dir}/n{nodes}{seed}.csv"
# arcsfile = f'{param_dir}/a{nodes}{seed}.csv'
links_file = f"{param_dir}/chain_linear_{blocks}.csv"
blocks_file = f"{param_dir}/b{blocks}{seed}.csv"
positions_file = f"{param_dir}/pos{nodes}{seed}.csv"
embedding = parse_embedding(
nodes_file,
sink_source_mapping,
positions_file,
seed,
blocks_file,
links_file,
transmit_power_dbm,
datarate,
)
result.append((embedding, marvelo_result, info))
return result
def main():
"""Some testing"""
count = 0
other = 0
for (embedding, result, _info) in load_from_dir("marvelo_data"):
if embedding is not None:
count += 1
print(result)
else:
other += 1
# 1047, 152
print(count, other)
if __name__ == "__main__":
main()
|
"""Imports marvelo problems and results from csv"""
import os
import math
import re
import csv
import numpy as np
from infrastructure import InfrastructureNetwork
from overlay import OverlayNetwork
from embedding import PartialEmbedding
def csv_to_list(csvfile, sep=","):
"""Parses a csv file into a 2d array, ignoring the header"""
with open(csvfile, "r") as f:
lines = list(csv.reader(f, delimiter=sep))
# skip header
return lines[1:]
def parse_overlay(blocks_file, links_file, datarate):
"""Reads an overlay in MARVELOs format from file"""
# read the files
names = []
requirements = []
links = []
for (_id, srcapp, _port, demand) in csv_to_list(blocks_file):
names.append(srcapp)
requirements.append(float(demand))
for (_id, srcapp, _srcport, dstapp) in csv_to_list(links_file):
links.append((srcapp, dstapp))
block_specifications = list(zip(names, requirements))
# construct the overlay from the gathered info
overlay = OverlayNetwork()
for (name, requirement) in block_specifications[:1]:
overlay.add_source(requirement, datarate, name)
for (name, requirement) in block_specifications[1:-1]:
overlay.add_intermediate(requirement, datarate, name)
for (name, requirement) in block_specifications[-1:]:
overlay.set_sink(requirement, datarate, name)
for (source, target) in links:
overlay.add_link(source, target)
return overlay
def parse_infra(
nodes_file,
sink_source_mapping,
positions_file,
source_seed,
transmit_power_dbm,
):
"""Reads an infrastructure definition in MARVELO format from csvs"""
# read the files
names = []
capacities = []
positions = []
for (_id, name, capacity) in csv_to_list(nodes_file):
names.append(name)
capacities.append(float(capacity))
for csvline in csv_to_list(positions_file, sep=";"):
positions.append([float(pos) for pos in csvline[1:]])
# the positions are saved in a weird format, probably a mistake when
# saving
positions = np.transpose(positions)
specs = list(zip(names, capacities, positions))
(sink_idx, source_idx) = sink_source_mapping[(source_seed, len(specs))]
if source_idx == sink_idx:
return None
# make sure source is always first, sink always last
specs[0], specs[source_idx] = specs[source_idx], specs[0]
specs[-1], specs[sink_idx] = specs[sink_idx], specs[-1]
# construct the infrastructure from the gathered info
infra = InfrastructureNetwork(bandwidth=1, noise_floor_dbm=-30)
for (name, capacity, pos) in specs[:1]:
infra.add_source(pos, transmit_power_dbm, capacity, name)
for (name, capacity, pos) in specs[1:-1]:
infra.add_intermediate(pos, transmit_power_dbm, capacity, name)
for (name, capacity, pos) in specs[-1:]:
infra.set_sink(pos, transmit_power_dbm, capacity, name)
return infra
# pylint: disable=too-many-arguments
def parse_embedding(
nodes_file,
sink_source_mapping,
positions_file,
source_seed,
blocks_file,
links_file,
transmit_power_dbm,
datarate,
):
"""Reads a problem instance in MARVELO format from csv files"""
infra = parse_infra(
nodes_file,
sink_source_mapping,
positions_file,
source_seed,
transmit_power_dbm,
)
if infra is None:
return None
overlay = parse_overlay(blocks_file, links_file, datarate)
# otherwise the mapping wouldn't be specified
assert len(overlay.sources) == 1
assert len(infra.sources) == 1
overlay_source = list(overlay.sources)[0]
infra_source = list(infra.sources)[0]
source_mapping = [(overlay_source, infra_source)]
return PartialEmbedding(infra, overlay, source_mapping)
def load_from_dir(basedir):
"""Loads a set of results from a directory with an assumed format"""
# at bitrate 1 equivalent to a linear SINR threshold of 20
datarate = math.log(1 + 20, 2)
transmit_power_dbm = 30 # == 1W
results_dir = f"{basedir}/resultsTxt"
param_dir = f"{basedir}/param"
result_name_pat = re.compile(r"n(\d+)b(\d+)s(\d+).txt")
# pre-generated with the python2 RNG by simply setting the seed and
# then taking random.choice(range(nr_nodes)) twice
sink_source_mapping = dict()
sink_source_file = f"{basedir}/sink_source_mapping.csv"
for (seed, nodes, sink_idx, source_idx) in csv_to_list(sink_source_file):
sink_source_mapping[(int(seed), int(nodes))] = (
int(sink_idx),
int(source_idx),
)
result = []
for result_file in os.listdir(results_dir):
match = result_name_pat.match(result_file)
nodes = int(match.group(1))
blocks = int(match.group(2))
seed = int(match.group(3))
marvelo_result = int(
float(open(f"{results_dir}/{result_file}").read())
)
info = (nodes, blocks, seed)
nodes_file = f"{param_dir}/n{nodes}{seed}.csv"
# arcsfile = f'{param_dir}/a{nodes}{seed}.csv'
links_file = f"{param_dir}/chain_linear_{blocks}.csv"
blocks_file = f"{param_dir}/b{blocks}{seed}.csv"
positions_file = f"{param_dir}/pos{nodes}{seed}.csv"
embedding = parse_embedding(
nodes_file,
sink_source_mapping,
positions_file,
seed,
blocks_file,
links_file,
transmit_power_dbm,
datarate,
)
result.append((embedding, marvelo_result, info))
return result
def main():
"""Some testing"""
count = 0
other = 0
for (embedding, result, _info) in load_from_dir("marvelo_data"):
if embedding is not None:
count += 1
print(result)
else:
other += 1
# 1047, 152
print(count, other)
if __name__ == "__main__":
main()
|
en
| 0.822035
|
Imports marvelo problems and results from csv Parses a csv file into a 2d array, ignoring the header # skip header Reads an overlay in MARVELOs format from file # read the files # construct the overlay from the gathered info Reads an infrastructure definition in MARVELO format from csvs # read the files # the positions are saved in a weird format, probably a mistake when # saving # make sure source is always first, sink always last # construct the infrastructure from the gathered info # pylint: disable=too-many-arguments Reads a problem instance in MARVELO format from csv files # otherwise the mapping wouldn't be specified Loads a set of results from a directory with an assumed format # at bitrate 1 equivalent to a linear SINR threshold of 20 # == 1W # pre-generated with the python2 RNG by simply setting the seed and # then taking random.choice(range(nr_nodes)) twice # arcsfile = f'{param_dir}/a{nodes}{seed}.csv' Some testing # 1047, 152
| 2.873246
| 3
|
Scrapers/pcmagUrlsScraper.py
|
sydneyhill3901/CSI2999
| 0
|
6627086
|
import requests, time, csv
from bs4 import BeautifulSoup
# listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page="
# pageNumber is the page number on PCMag site to scrape
# write=True writes scraped URLs to CSV file in format phoneName|url
def getReviews(listPageRootUrl, pageNumber, write=True):
pageList = []
currentPage = requests.get(listPageRootUrl + str(pageNumber))
soup = BeautifulSoup(currentPage.content, "html.parser")
x = soup.find_all("div", class_="w-full flex flex-wrap md:flex-no-wrap py-4 border-b border-gray-lighter")
for y in x:
z = y.find("span", class_="ml-1 mr-3")
if z is not None:
rowList = []
k = y.find("h2", class_="text-base md:text-xl font-brand font-bold")
link = "https://www.pcmag.com" + k.find("a")['href']
p = k.find("a")['data-item']
phoneName = p.replace(" Review", "").lower().strip()
if "(" in phoneName and ")" in phoneName:
q = phoneName.split("(")
phoneName = q[0].strip()
if "+" in phoneName:
phoneName = phoneName.replace("+", " plus")
rowList.append(phoneName)
rowList.append(link)
pageList.append(phoneName)
pageList.append(link)
if write:
writeCsvRow(rowList)
return pageList
# listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page="
# pageNumber is the page number on PCMag site to start scraping reviews from
# timeSleep is time to sleep in seconds between making each request
# if any interruption occurs, function can be called with pageNumber = page after the last page scraped before interruption
# csv writer will append to csv file as if no interruption occured
def writeAllReviews(listPageRootUrl, pageNumber, timeSleep):
fullPhoneList = []
startTime = time.time()
if timeSleep < 3:
timeSleep = 5
timeSleep = float(timeSleep)
while requests.get(listPageRootUrl + str(pageNumber)).ok:
pagePhoneList = getReviews(listPageRootUrl, pageNumber)
for x in pagePhoneList:
fullPhoneList.append(x)
print("Reviews on page " + str(pageNumber) + ":")
print(pagePhoneList)
fancySleep(timeSleep)
pageNumber += 1
print("Reached end of reviews.")
print("RUNTIME: " + str(time.time() - startTime) + " seconds.")
return fullPhoneList
# appends one row to CSV
def writeCsvRow(rowList):
dataOutput = open("PCMagURLs.csv", "a+", encoding="utf8")
writer = csv.writer(dataOutput, delimiter='|', lineterminator="\r", quoting=csv.QUOTE_NONE)
writer.writerow(rowList)
# for sleeping fancy
def fancySleep(timeSleep):
print("sleeping " + str(int(timeSleep)) + " seconds", end="", flush=True) # https://stackoverflow.com/questions/5598181/multiple-prints-on-the-same-line-in-python
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .")
time.sleep(timeSleep / 4)
writeAllReviews("https://www.pcmag.com/categories/mobile-phones?page=", 1, 10)
|
import requests, time, csv
from bs4 import BeautifulSoup
# listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page="
# pageNumber is the page number on PCMag site to scrape
# write=True writes scraped URLs to CSV file in format phoneName|url
def getReviews(listPageRootUrl, pageNumber, write=True):
pageList = []
currentPage = requests.get(listPageRootUrl + str(pageNumber))
soup = BeautifulSoup(currentPage.content, "html.parser")
x = soup.find_all("div", class_="w-full flex flex-wrap md:flex-no-wrap py-4 border-b border-gray-lighter")
for y in x:
z = y.find("span", class_="ml-1 mr-3")
if z is not None:
rowList = []
k = y.find("h2", class_="text-base md:text-xl font-brand font-bold")
link = "https://www.pcmag.com" + k.find("a")['href']
p = k.find("a")['data-item']
phoneName = p.replace(" Review", "").lower().strip()
if "(" in phoneName and ")" in phoneName:
q = phoneName.split("(")
phoneName = q[0].strip()
if "+" in phoneName:
phoneName = phoneName.replace("+", " plus")
rowList.append(phoneName)
rowList.append(link)
pageList.append(phoneName)
pageList.append(link)
if write:
writeCsvRow(rowList)
return pageList
# listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page="
# pageNumber is the page number on PCMag site to start scraping reviews from
# timeSleep is time to sleep in seconds between making each request
# if any interruption occurs, function can be called with pageNumber = page after the last page scraped before interruption
# csv writer will append to csv file as if no interruption occured
def writeAllReviews(listPageRootUrl, pageNumber, timeSleep):
fullPhoneList = []
startTime = time.time()
if timeSleep < 3:
timeSleep = 5
timeSleep = float(timeSleep)
while requests.get(listPageRootUrl + str(pageNumber)).ok:
pagePhoneList = getReviews(listPageRootUrl, pageNumber)
for x in pagePhoneList:
fullPhoneList.append(x)
print("Reviews on page " + str(pageNumber) + ":")
print(pagePhoneList)
fancySleep(timeSleep)
pageNumber += 1
print("Reached end of reviews.")
print("RUNTIME: " + str(time.time() - startTime) + " seconds.")
return fullPhoneList
# appends one row to CSV
def writeCsvRow(rowList):
dataOutput = open("PCMagURLs.csv", "a+", encoding="utf8")
writer = csv.writer(dataOutput, delimiter='|', lineterminator="\r", quoting=csv.QUOTE_NONE)
writer.writerow(rowList)
# for sleeping fancy
def fancySleep(timeSleep):
print("sleeping " + str(int(timeSleep)) + " seconds", end="", flush=True) # https://stackoverflow.com/questions/5598181/multiple-prints-on-the-same-line-in-python
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .", end="", flush=True)
time.sleep(timeSleep / 4)
print(" .")
time.sleep(timeSleep / 4)
writeAllReviews("https://www.pcmag.com/categories/mobile-phones?page=", 1, 10)
|
en
| 0.852576
|
# listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page=" # pageNumber is the page number on PCMag site to scrape # write=True writes scraped URLs to CSV file in format phoneName|url # listPageRootUrl is "https://www.pcmag.com/categories/mobile-phones?page=" # pageNumber is the page number on PCMag site to start scraping reviews from # timeSleep is time to sleep in seconds between making each request # if any interruption occurs, function can be called with pageNumber = page after the last page scraped before interruption # csv writer will append to csv file as if no interruption occured # appends one row to CSV # for sleeping fancy # https://stackoverflow.com/questions/5598181/multiple-prints-on-the-same-line-in-python
| 3.186912
| 3
|
solidata_api/_auth/auth_distant_protocols.py
|
co-demos/solidata-backend
| 2
|
6627087
|
"""
auth_distant_protocols.py
"""
from log_config import log, pprint, pformat
log.debug (">>> _auth ... loading auth_distant_protocols ...")
functions_protocols = {
### DONE
"token_claims" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "token_claims",
},
# TESTS TO DO
"confirm_access" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "confirm_access",
},
"new_access_token" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "new_access_token",
},
"fresh_access_token" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "fresh_access_token",
},
"new_refresh_token" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "new_refresh_token",
},
### DONE
"login_user" : {
"endpoint_config" : "user_login",
"endpoint_code" : "login",
},
"login_anonymous" : {
"endpoint_config" : "user_login",
"endpoint_code" : "login_anonymous",
},
### DONE
"register_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "register",
},
"confirm_email_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "confirm_email",
},
"update_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "user_update",
},
"delete_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "user_delete",
},
### TESTS TO DO
"password_forgotten" : {
"endpoint_config" : "auth_password",
"endpoint_code" : "pwd_forgot",
},
"password_reset_get" : {
"endpoint_config" : "auth_password",
"endpoint_code" : "pwd_reset_link",
},
"password_reset_post" : {
"endpoint_config" : "auth_password",
"endpoint_code" : "pwd_reset",
},
### TESTS TO DO
"users_get_one" : {
"endpoint_config" : "users_list",
"endpoint_code" : "get_one",
},
"users_get_list" : {
"endpoint_config" : "users_list",
"endpoint_code" : "users_get_list",
},
# ### TO DO
# "add_claims_to_access_token" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
# ### TO DO
# "user_identity_lookup" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
# ### TO DO
# "my_expired_token_callback" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### WORKING ON IT
# "anonymous_required" : {
# "endpoint_config" : "auth_tokens",
# "endpoint_code" : "token_claims",
# },
### TO DO
# "anonymous_or_guest_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "guest_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "admin_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "staff_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "renew_pwd_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "reset_pwd_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "confirm_email_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "current_user_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
}
|
"""
auth_distant_protocols.py
"""
from log_config import log, pprint, pformat
log.debug (">>> _auth ... loading auth_distant_protocols ...")
functions_protocols = {
### DONE
"token_claims" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "token_claims",
},
# TESTS TO DO
"confirm_access" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "confirm_access",
},
"new_access_token" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "new_access_token",
},
"fresh_access_token" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "fresh_access_token",
},
"new_refresh_token" : {
"endpoint_config" : "auth_tokens",
"endpoint_code" : "new_refresh_token",
},
### DONE
"login_user" : {
"endpoint_config" : "user_login",
"endpoint_code" : "login",
},
"login_anonymous" : {
"endpoint_config" : "user_login",
"endpoint_code" : "login_anonymous",
},
### DONE
"register_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "register",
},
"confirm_email_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "confirm_email",
},
"update_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "user_update",
},
"delete_user" : {
"endpoint_config" : "user_edit",
"endpoint_code" : "user_delete",
},
### TESTS TO DO
"password_forgotten" : {
"endpoint_config" : "auth_password",
"endpoint_code" : "pwd_forgot",
},
"password_reset_get" : {
"endpoint_config" : "auth_password",
"endpoint_code" : "pwd_reset_link",
},
"password_reset_post" : {
"endpoint_config" : "auth_password",
"endpoint_code" : "pwd_reset",
},
### TESTS TO DO
"users_get_one" : {
"endpoint_config" : "users_list",
"endpoint_code" : "get_one",
},
"users_get_list" : {
"endpoint_config" : "users_list",
"endpoint_code" : "users_get_list",
},
# ### TO DO
# "add_claims_to_access_token" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
# ### TO DO
# "user_identity_lookup" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
# ### TO DO
# "my_expired_token_callback" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### WORKING ON IT
# "anonymous_required" : {
# "endpoint_config" : "auth_tokens",
# "endpoint_code" : "token_claims",
# },
### TO DO
# "anonymous_or_guest_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "guest_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "admin_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "staff_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "renew_pwd_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "reset_pwd_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "confirm_email_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
### TO DO
# "current_user_required" : {
# "endpoint_config" : "users_list",
# "endpoint_code" : "get_one",
# },
}
|
en
| 0.190646
|
auth_distant_protocols.py ### DONE # TESTS TO DO ### DONE ### DONE ### TESTS TO DO ### TESTS TO DO # ### TO DO # "add_claims_to_access_token" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, # ### TO DO # "user_identity_lookup" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, # ### TO DO # "my_expired_token_callback" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### WORKING ON IT # "anonymous_required" : { # "endpoint_config" : "auth_tokens", # "endpoint_code" : "token_claims", # }, ### TO DO # "anonymous_or_guest_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### TO DO # "guest_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### TO DO # "admin_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### TO DO # "staff_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### TO DO # "renew_pwd_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### TO DO # "reset_pwd_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### TO DO # "confirm_email_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # }, ### TO DO # "current_user_required" : { # "endpoint_config" : "users_list", # "endpoint_code" : "get_one", # },
| 2.237887
| 2
|
api/drinks/tests/tags.py
|
gthole/drink-stash
| 7
|
6627088
|
<reponame>gthole/drink-stash<filename>api/drinks/tests/tags.py
from datetime import timedelta
from django.utils.timezone import now
from rest_framework.test import APIClient
from drinks.models import Tag
from .base import BaseTestCase
class TagTestCase(BaseTestCase):
def test_fetch_tags(self):
Tag.objects.create(name='sour')
Tag.objects.create(name='bitter')
resp = self.client.get('/api/v1/tags/')
self.assertEqual(
len(resp.json()['results']),
Tag.objects.all().count()
)
self.assertEqual(
resp.json()['results'],
['bitter', 'sour']
)
def test_304_unmodified(self):
Tag.objects.create(name='sour')
resp = self.client.get(
'/api/v1/tags/',
HTTP_IF_MODIFIED_SINCE=now().isoformat(),
HTTP_X_COUNT='1'
)
self.assertEqual(resp.status_code, 304)
def test_no_304_if_modified(self):
Tag.objects.create(name='sour')
resp = self.client.get(
'/api/v1/tags/',
HTTP_IF_MODIFIED_SINCE=(now() - timedelta(minutes=5)).isoformat(),
HTTP_X_COUNT='1'
)
self.assertEqual(resp.status_code, 200)
def test_no_304_if_inaccurate_last_count(self):
Tag.objects.create(name='sour')
resp = self.client.get(
'/api/v1/tags/',
HTTP_IF_MODIFIED_SINCE=now().isoformat(),
HTTP_X_COUNT='2'
)
self.assertEqual(resp.status_code, 200)
def test_no_post(self):
resp = self.client.post('/api/v1/tags/')
self.assertEqual(resp.status_code, 405)
def test_no_put(self):
resp = self.client.put('/api/v1/tags/1/')
self.assertEqual(resp.status_code, 405)
def test_no_delete(self):
resp = self.client.delete('/api/v1/tags/1/')
self.assertEqual(resp.status_code, 405)
|
from datetime import timedelta
from django.utils.timezone import now
from rest_framework.test import APIClient
from drinks.models import Tag
from .base import BaseTestCase
class TagTestCase(BaseTestCase):
def test_fetch_tags(self):
Tag.objects.create(name='sour')
Tag.objects.create(name='bitter')
resp = self.client.get('/api/v1/tags/')
self.assertEqual(
len(resp.json()['results']),
Tag.objects.all().count()
)
self.assertEqual(
resp.json()['results'],
['bitter', 'sour']
)
def test_304_unmodified(self):
Tag.objects.create(name='sour')
resp = self.client.get(
'/api/v1/tags/',
HTTP_IF_MODIFIED_SINCE=now().isoformat(),
HTTP_X_COUNT='1'
)
self.assertEqual(resp.status_code, 304)
def test_no_304_if_modified(self):
Tag.objects.create(name='sour')
resp = self.client.get(
'/api/v1/tags/',
HTTP_IF_MODIFIED_SINCE=(now() - timedelta(minutes=5)).isoformat(),
HTTP_X_COUNT='1'
)
self.assertEqual(resp.status_code, 200)
def test_no_304_if_inaccurate_last_count(self):
Tag.objects.create(name='sour')
resp = self.client.get(
'/api/v1/tags/',
HTTP_IF_MODIFIED_SINCE=now().isoformat(),
HTTP_X_COUNT='2'
)
self.assertEqual(resp.status_code, 200)
def test_no_post(self):
resp = self.client.post('/api/v1/tags/')
self.assertEqual(resp.status_code, 405)
def test_no_put(self):
resp = self.client.put('/api/v1/tags/1/')
self.assertEqual(resp.status_code, 405)
def test_no_delete(self):
resp = self.client.delete('/api/v1/tags/1/')
self.assertEqual(resp.status_code, 405)
|
none
| 1
| 2.221169
| 2
|
|
san_antonio/san_antonio.py
|
NicolasFlandrois/My-Mini-Py-Scripts-Training
| 0
|
6627089
|
<reponame>NicolasFlandrois/My-Mini-Py-Scripts-Training<gh_stars>0
# -*- coding: utf8 -*-
quotes = [
"Ecoutez-moi, <NAME>, nous avons beau être ou ne pas être, nous sommes !",
"On doit pouvoir choisir entre s'écouter parler et se faire entendre."
]
characters = [
"alvin et les Chipmunks",
"Babar",
"<NAME>",
"calimero",
"casper",
"le chat potté",
"Kirikou"
]
def get_random_quote(my_list):
"""This fonction show a random quote from a list."""
# import random
# n = random.randrange(0, 2, 2)
item = my_list[0]
return item
user_answer = "A"
while user_answer != "B":
user_answer = input("Tappez 'Entrer' pour executer le programme, et 'B' pour quitter le programme. ")
print(get_random_quote(quotes))
for character in characters:
n_character = character.capitalize()
print(n_character)
|
# -*- coding: utf8 -*-
quotes = [
"Ecoutez-moi, <NAME>, nous avons beau être ou ne pas être, nous sommes !",
"On doit pouvoir choisir entre s'écouter parler et se faire entendre."
]
characters = [
"alvin et les Chipmunks",
"Babar",
"<NAME>",
"calimero",
"casper",
"le chat potté",
"Kirikou"
]
def get_random_quote(my_list):
"""This fonction show a random quote from a list."""
# import random
# n = random.randrange(0, 2, 2)
item = my_list[0]
return item
user_answer = "A"
while user_answer != "B":
user_answer = input("Tappez 'Entrer' pour executer le programme, et 'B' pour quitter le programme. ")
print(get_random_quote(quotes))
for character in characters:
n_character = character.capitalize()
print(n_character)
|
en
| 0.347731
|
# -*- coding: utf8 -*- This fonction show a random quote from a list. # import random # n = random.randrange(0, 2, 2)
| 4.0644
| 4
|
src/compas_ghpython/artists/mixins/faceartist.py
|
elidim/compas
| 1
|
6627090
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas
from compas.utilities import color_to_colordict
import compas_ghpython
try:
import Rhino
except ImportError:
compas.raise_if_ironpython()
__all__ = ['FaceArtist']
class FaceArtist(object):
def draw_faces(self, keys=None, color=None, join_faces=False):
"""Draw a selection of faces.
Parameters
----------
fkeys : list
A list of face keys identifying which faces to draw.
The default is ``None``, in which case all faces are drawn.
color : str, tuple, dict
The color specififcation for the faces.
Colors should be specified in the form of a string (hex colors) or
as a tuple of RGB components.
To apply the same color to all faces, provide a single color
specification. Individual colors can be assigned using a dictionary
of key-color pairs. Missing keys will be assigned the default face
color (``self.defaults['face.color']``).
The default is ``None``, in which case all faces are assigned the
default face color.
Notes
-----
The faces are named using the following template:
``"{}.face.{}".format(self.datastructure.name, key)``.
"""
keys = keys or list(self.datastructure.faces())
colordict = color_to_colordict(color,
keys,
default=self.defaults.get('color.face'),
colorformat='rgb',
normalize=False)
faces = []
for fkey in keys:
faces.append({
'points': self.datastructure.face_coordinates(fkey),
'name' : self.datastructure.face_name(fkey),
'color' : colordict[fkey],
'layer' : self.datastructure.get_face_attribute(fkey, 'layer', None)
})
meshes = compas_ghpython.xdraw_faces(faces)
if not join_faces:
return meshes
else:
joined_mesh = Rhino.Geometry.Mesh()
[joined_mesh.Append(mesh) for mesh in meshes]
return joined_mesh
def draw_facelabels(self, text=None, color=None):
"""Draw labels for a selection of faces.
Parameters
----------
text : dict
A dictionary of face labels as key-text pairs.
The default value is ``None``, in which case every face will be labelled with its key.
color : str, tuple, dict
The color sepcification of the labels.
String values are interpreted as hex colors (e.g. ``'#ff0000'`` for red).
Tuples are interpreted as RGB component specifications (e.g. ``(255, 0, 0) for red``.
If a dictionary of specififcations is provided, the keys of the
should refer to face keys and the values should be color
specifications in the form of strings or tuples.
The default value is ``None``, in which case the labels are assigned
the default face color (``self.defaults['color.face']``).
Notes
-----
The face labels are named using the following template:
``"{}.face.label.{}".format(self.datastructure.name, key)``.
This name is used afterwards to identify faces and face labels in the Rhino model.
"""
if text is None:
textdict = {key: str(key) for key in self.datastructure.faces()}
elif isinstance(text, dict):
textdict = text
else:
raise NotImplementedError
colordict = color_to_colordict(color,
textdict.keys(),
default=self.defaults.get('color.face'),
colorformat='rgb',
normalize=False)
labels = []
for key, text in iter(textdict.items()):
labels.append({
'pos' : self.datastructure.face_center(key),
'name' : "{}.face.label.{}".format(self.datastructure.name, key),
'color' : colordict[key],
'text' : textdict[key],
'layer' : self.datastructure.get_face_attribute(key, 'layer', None)
})
return compas_ghpython.xdraw_labels(labels)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import compas
from compas.utilities import color_to_colordict
import compas_ghpython
try:
import Rhino
except ImportError:
compas.raise_if_ironpython()
__all__ = ['FaceArtist']
class FaceArtist(object):
def draw_faces(self, keys=None, color=None, join_faces=False):
"""Draw a selection of faces.
Parameters
----------
fkeys : list
A list of face keys identifying which faces to draw.
The default is ``None``, in which case all faces are drawn.
color : str, tuple, dict
The color specififcation for the faces.
Colors should be specified in the form of a string (hex colors) or
as a tuple of RGB components.
To apply the same color to all faces, provide a single color
specification. Individual colors can be assigned using a dictionary
of key-color pairs. Missing keys will be assigned the default face
color (``self.defaults['face.color']``).
The default is ``None``, in which case all faces are assigned the
default face color.
Notes
-----
The faces are named using the following template:
``"{}.face.{}".format(self.datastructure.name, key)``.
"""
keys = keys or list(self.datastructure.faces())
colordict = color_to_colordict(color,
keys,
default=self.defaults.get('color.face'),
colorformat='rgb',
normalize=False)
faces = []
for fkey in keys:
faces.append({
'points': self.datastructure.face_coordinates(fkey),
'name' : self.datastructure.face_name(fkey),
'color' : colordict[fkey],
'layer' : self.datastructure.get_face_attribute(fkey, 'layer', None)
})
meshes = compas_ghpython.xdraw_faces(faces)
if not join_faces:
return meshes
else:
joined_mesh = Rhino.Geometry.Mesh()
[joined_mesh.Append(mesh) for mesh in meshes]
return joined_mesh
def draw_facelabels(self, text=None, color=None):
"""Draw labels for a selection of faces.
Parameters
----------
text : dict
A dictionary of face labels as key-text pairs.
The default value is ``None``, in which case every face will be labelled with its key.
color : str, tuple, dict
The color sepcification of the labels.
String values are interpreted as hex colors (e.g. ``'#ff0000'`` for red).
Tuples are interpreted as RGB component specifications (e.g. ``(255, 0, 0) for red``.
If a dictionary of specififcations is provided, the keys of the
should refer to face keys and the values should be color
specifications in the form of strings or tuples.
The default value is ``None``, in which case the labels are assigned
the default face color (``self.defaults['color.face']``).
Notes
-----
The face labels are named using the following template:
``"{}.face.label.{}".format(self.datastructure.name, key)``.
This name is used afterwards to identify faces and face labels in the Rhino model.
"""
if text is None:
textdict = {key: str(key) for key in self.datastructure.faces()}
elif isinstance(text, dict):
textdict = text
else:
raise NotImplementedError
colordict = color_to_colordict(color,
textdict.keys(),
default=self.defaults.get('color.face'),
colorformat='rgb',
normalize=False)
labels = []
for key, text in iter(textdict.items()):
labels.append({
'pos' : self.datastructure.face_center(key),
'name' : "{}.face.label.{}".format(self.datastructure.name, key),
'color' : colordict[key],
'text' : textdict[key],
'layer' : self.datastructure.get_face_attribute(key, 'layer', None)
})
return compas_ghpython.xdraw_labels(labels)
# ==============================================================================
# Main
# ==============================================================================
if __name__ == "__main__":
pass
|
en
| 0.712484
|
Draw a selection of faces. Parameters ---------- fkeys : list A list of face keys identifying which faces to draw. The default is ``None``, in which case all faces are drawn. color : str, tuple, dict The color specififcation for the faces. Colors should be specified in the form of a string (hex colors) or as a tuple of RGB components. To apply the same color to all faces, provide a single color specification. Individual colors can be assigned using a dictionary of key-color pairs. Missing keys will be assigned the default face color (``self.defaults['face.color']``). The default is ``None``, in which case all faces are assigned the default face color. Notes ----- The faces are named using the following template: ``"{}.face.{}".format(self.datastructure.name, key)``. Draw labels for a selection of faces. Parameters ---------- text : dict A dictionary of face labels as key-text pairs. The default value is ``None``, in which case every face will be labelled with its key. color : str, tuple, dict The color sepcification of the labels. String values are interpreted as hex colors (e.g. ``'#ff0000'`` for red). Tuples are interpreted as RGB component specifications (e.g. ``(255, 0, 0) for red``. If a dictionary of specififcations is provided, the keys of the should refer to face keys and the values should be color specifications in the form of strings or tuples. The default value is ``None``, in which case the labels are assigned the default face color (``self.defaults['color.face']``). Notes ----- The face labels are named using the following template: ``"{}.face.label.{}".format(self.datastructure.name, key)``. This name is used afterwards to identify faces and face labels in the Rhino model. # ============================================================================== # Main # ==============================================================================
| 2.726663
| 3
|
treeio/account/migrations/0001_initial.py
|
Andrea-MariaDB-2/treeio
| 242
|
6627091
|
<reponame>Andrea-MariaDB-2/treeio<filename>treeio/account/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('body', models.TextField(default=b'', null=True, blank=True)),
('ntype', models.CharField(max_length=1, choices=[(b'd', 'Daily'), (b'w', 'Weekly'), (b'm', 'Monthly')])),
('date_created', models.DateTimeField(default=datetime.datetime.now)),
('recipient', models.ForeignKey(to='core.User')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NotificationSetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ntype', models.CharField(max_length=1, verbose_name=b'Type', choices=[(b'd', 'Daily'), (b'w', 'Weekly'), (b'm', 'Monthly')])),
('next_date', models.DateField(null=True, blank=True)),
('last_datetime', models.DateTimeField(default=datetime.datetime.now)),
('enabled', models.BooleanField(default=True)),
('modules', models.ManyToManyField(to='core.Module')),
('owner', models.ForeignKey(to='core.User', unique=True)),
],
options={
},
bases=(models.Model,),
),
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Notification',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('body', models.TextField(default=b'', null=True, blank=True)),
('ntype', models.CharField(max_length=1, choices=[(b'd', 'Daily'), (b'w', 'Weekly'), (b'm', 'Monthly')])),
('date_created', models.DateTimeField(default=datetime.datetime.now)),
('recipient', models.ForeignKey(to='core.User')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='NotificationSetting',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('ntype', models.CharField(max_length=1, verbose_name=b'Type', choices=[(b'd', 'Daily'), (b'w', 'Weekly'), (b'm', 'Monthly')])),
('next_date', models.DateField(null=True, blank=True)),
('last_datetime', models.DateTimeField(default=datetime.datetime.now)),
('enabled', models.BooleanField(default=True)),
('modules', models.ManyToManyField(to='core.Module')),
('owner', models.ForeignKey(to='core.User', unique=True)),
],
options={
},
bases=(models.Model,),
),
]
|
en
| 0.769321
|
# -*- coding: utf-8 -*-
| 1.855903
| 2
|
nanoAPI/handler/router.py
|
Nandan-unni/Nano
| 2
|
6627092
|
<filename>nanoAPI/handler/router.py
from nanoAPI.utils import parse_route
class Router:
def __init__(self):
self.routes = []
def get(self, route: str, controller):
parsed_route = parse_route(route)
parsed_route["method"] = "GET"
parsed_route["controller"] = controller
self.routes.append(parsed_route)
def post(self, route: str, controller):
parsed_route = parse_route(route)
parsed_route["method"] = "POST"
parsed_route["controller"] = controller
self.routes.append(parsed_route)
def search_route(self, req):
for route in self.routes:
if req.method == route['method']:
if req.url == route['url']:
return route['controller'], req
else:
isSame = False
req_modes = req.url.split("/")
self_mods = route['url'].split("/")
if len(req_modes) == len(self_mods):
isSame = True
for i in range(len(self_mods)):
if str(self_mods[i]).startswith(":"):
pass
else:
if self_mods[i] == req_modes[i]:
pass
else:
isSame = False
if isSame:
for i in range(len(self_mods)):
if str(self_mods[i]).startswith(":"):
param = self_mods[i].replace(":", "")
req.params[param] = req_modes[i]
return route["controller"], req
else:
pass
return None, req
|
<filename>nanoAPI/handler/router.py
from nanoAPI.utils import parse_route
class Router:
def __init__(self):
self.routes = []
def get(self, route: str, controller):
parsed_route = parse_route(route)
parsed_route["method"] = "GET"
parsed_route["controller"] = controller
self.routes.append(parsed_route)
def post(self, route: str, controller):
parsed_route = parse_route(route)
parsed_route["method"] = "POST"
parsed_route["controller"] = controller
self.routes.append(parsed_route)
def search_route(self, req):
for route in self.routes:
if req.method == route['method']:
if req.url == route['url']:
return route['controller'], req
else:
isSame = False
req_modes = req.url.split("/")
self_mods = route['url'].split("/")
if len(req_modes) == len(self_mods):
isSame = True
for i in range(len(self_mods)):
if str(self_mods[i]).startswith(":"):
pass
else:
if self_mods[i] == req_modes[i]:
pass
else:
isSame = False
if isSame:
for i in range(len(self_mods)):
if str(self_mods[i]).startswith(":"):
param = self_mods[i].replace(":", "")
req.params[param] = req_modes[i]
return route["controller"], req
else:
pass
return None, req
|
none
| 1
| 2.871114
| 3
|
|
EXIFnaming/nameop.py
|
mvolkert/EXIFnaming
| 0
|
6627093
|
<filename>EXIFnaming/nameop.py
#!/usr/bin/env python3
"""
Organizing fotos according to naming conventions definied by readexif.rename and external programs
dependencies: -
"""
import codecs
import csv
import datetime as dt
import os
import re
from typing import Optional, Match, Iterable, Any, IO, Tuple, List
import numpy as np
from EXIFnaming.helpers import settings, fileop
import EXIFnaming.helpers.constants as c
from EXIFnaming.helpers.date import dateformating
from EXIFnaming.helpers.fileop import renameInPlace, renameTemp, moveBracketSeries, moveSeries, move, removeIfEmtpy, \
get_relpath_depth, move_media, copyFilesTo, writeToFile, is_invalid_path, filterFiles, isfile, \
file_has_ext, remove_ext, get_plain_filenames_of_type
from EXIFnaming.helpers.misc import askToContinue
from EXIFnaming.helpers.program_dir import get_saves_dir, get_info_dir, get_setexif_dir, log, log_function_call
from EXIFnaming.helpers.settings import image_types
from EXIFnaming.helpers.tag_conversion import FilenameAccessor
from sortedcollections import OrderedSet
__all__ = ["filter_series", "filter_primary", "copy_subdirectories", "copy_files", "copy_new_files", "replace_in_file",
"folders_to_main", "rename_HDR", "sanitize_filename", "rename_temp_back", "rename_back", "create_tags_csv",
"create_tags_csv_per_dir", "create_counters_csv", "create_counters_csv_per_dir", 'create_names_csv_per_dir',
"create_example_csvs", "create_rating_csv", "move_each_pretag_to_folder"]
def filter_series():
"""
put each kind of series in its own directory
"""
log_function_call(filter_series.__name__)
inpath = os.getcwd()
skipdirs = ["B" + str(i) for i in range(1, 8)]
skipdirs += ["S", "SM", "TL", "mp4", "HDR", "single", "PANO", "others", "TLM"]
# TLM: Timelapse manual - pictures on different days to be combined to a Timelapse
skipdirs += [model for model in c.CameraModelShort.values() if model]
log().info(inpath)
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, skipdirs): continue
log().info("%s #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames))
filenames = moveBracketSeries(dirpath, filenames)
filenames = moveSeries(dirpath, filenames, "S")
filenames = moveSeries(dirpath, filenames, "SM")
filenames = moveSeries(dirpath, filenames, "TL")
filenames = move_media(dirpath, filenames, settings.video_types, "mp4")
# filter process types to separate folders - attention: ordering of statements matters
filenames = move_media(dirpath, filenames, ["PANO"], "PANO")
filenames = move_media(dirpath, filenames, ["ANIMA"], "ANIMA")
filenames = move_media(dirpath, filenames, ["RET"], "RET")
filenames = move_media(dirpath, filenames, ["ZOOM"], "ZOOM")
filenames = move_media(dirpath, filenames, ["SMALL"], "SMALL")
filenames = move_media(dirpath, filenames, ["CUT"], "CUT")
filenames = move_media(dirpath, filenames, ["HDR"], "HDR")
move_media(dirpath, filenames, settings.image_types, "single")
def filter_primary():
"""
put single and B1 in same directory
"""
log_function_call(filter_primary.__name__)
inpath = os.getcwd()
skipdirs = ["S", "SM", "TL", "mp4", "HDR", "single", "PANO", "others"]
skipdirs += [model for model in c.CameraModelShort.values() if model]
log().info(inpath)
folders_to_main(dirs=["B" + str(i) for i in range(1, 8)])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, skipdirs): continue
log().info("%s #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames))
filenames = moveSeries(dirpath, filenames, "S")
filenames = moveSeries(dirpath, filenames, "SM")
filenames = moveSeries(dirpath, filenames, "TL")
filenames = move_media(dirpath, filenames, settings.video_types, "mp4")
filenames = move_media(dirpath, filenames, ["HDR"], "HDR")
filenames = moveSeries(dirpath, filenames, "B", "1", "primary")
filenames = moveSeries(dirpath, filenames, "B")
move_media(dirpath, filenames, settings.image_types, "primary")
def copy_subdirectories(dest: str, dir_names: []):
"""
copy sub folders of specified names to dest without directory structure
:param dest: copy destination
:param dir_names: directory names to copy
"""
inpath = os.getcwd()
log().info(inpath)
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, whitelist=dir_names): continue
copyFilesTo(filenames, dest, False)
def copy_files(dest: str, sub_name: str = None):
"""
copy files which have names containing sub_name to dest without directory structure
:param dest: copy destination
:param sub_name: name part to search
"""
inpath = os.getcwd()
log().info(inpath)
found_files = []
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
if not sub_name or sub_name in filename:
found_files.append(os.path.join(dirpath, filename))
copyFilesTo(found_files, dest, False)
def copy_new_files(dest: str, playlist: str):
"""
sorting music files - FIXME maybe not the right place here
:param dest: copy destination
:param playlist: name part to search
"""
csv.register_dialect('tab', delimiter='\t', lineterminator='\r\n')
with codecs.open(playlist, "rb", "utf-16") as csvfile:
reader = csv.DictReader(csvfile, dialect="tab")
places = [remove_ext(row["Ort"]) for row in reader if row is not None]
places = [os.path.basename(place) for place in places if place != ""]
inpath = os.getcwd()
mp3_files = []
m4a_files = []
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
fullname = os.path.join(dirpath, filename)
if not remove_ext(filename) in places:
if file_has_ext(filename, [".mp3"]):
mp3_files.append(fullname)
if file_has_ext(filename, [".m4a"]):
m4a_files.append(fullname)
copyFilesTo(mp3_files, os.path.join(dest, "mp3", remove_ext(playlist)), False)
copyFilesTo(m4a_files, os.path.join(dest, "m4a", remove_ext(playlist)), False)
def replace_in_file(search: str, replace: str, fileext: str):
"""
replace search with replace in files ending with fileext
:param search: string to search for
:param replace: string to replace
:param fileext: type of file to search in
"""
inpath = os.getcwd()
log().info(inpath)
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
if filename.endswith(fileext):
log().info(filename)
fullfilename = os.path.join(dirpath, filename)
with open(fullfilename, 'r') as file:
content = file.read()
content = content.replace(search, replace)
with open(fullfilename, 'w') as file:
file.write(content)
def folders_to_main(series: bool = False, primary: bool = False, blurry: bool = False, dirs: list = None,
one_level: bool = True, not_inpath: bool = True):
"""
reverses filtering/sorting into directories
:param series: restrict to reverse of filterSeries
:param primary: restrict to reverse of filterPrimary
:param blurry: restrict to reverse of detectBlurry
:param dirs: restrict to reverse other dirs
:param one_level: reverse only one directory up
:param not_inpath: leave all directories in inpath as they are, only change subdirectories
"""
log_function_call(folders_to_main.__name__, series, primary, blurry, dirs, one_level, not_inpath)
inpath = os.getcwd()
reverseDirs = []
if series: reverseDirs += ["B" + str(i) for i in range(1, 8)] + ["S", "single"]
if primary: reverseDirs += ["B", "S", "TL", "SM", "primary"]
if blurry: reverseDirs += ["blurry"]
if dirs: reverseDirs += list(dirs)
deepest = 0
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not_inpath and dirpath == inpath: continue
if is_invalid_path(dirpath, whitelist=reverseDirs): continue
depth = get_relpath_depth(dirpath, inpath)
deepest = max(deepest, depth)
if not_inpath:
deepest -= 1
if not reverseDirs and deepest > 1:
log().warning("A folder structure with a depth of %2d will be flattened", deepest)
askToContinue()
elif deepest > 3:
log().warning("The folder structure has a depth of %2d", deepest)
log().info("chosen directory names: %r", reverseDirs)
askToContinue()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not_inpath and dirpath == inpath: continue
if is_invalid_path(dirpath, whitelist=reverseDirs): continue
if one_level:
destination = os.path.dirname(dirpath)
else:
destination = inpath
log().info("%s #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames))
for filename in filenames:
if not file_has_ext(filename, settings.image_types + settings.video_types): continue
move(filename, dirpath, destination)
removeIfEmtpy(dirpath)
def move_each_pretag_to_folder():
"""
"""
log_function_call(move_each_pretag_to_folder.__name__)
inpath = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath): continue
for filename in filenames:
filenameAccessor = FilenameAccessor(filename)
if not filenameAccessor.pre in dirpath:
move(filename, dirpath, os.path.join(dirpath, filenameAccessor.pre))
if len(filenameAccessor.primtags) > 0 and not filenameAccessor.primtags[0] in dirpath:
move(filename, dirpath, os.path.join(dirpath, *filenameAccessor.primtags))
def rename_HDR(mode="HDRT", folder=r"HDR\w*"):
"""
rename HDR pictures generated by FRANZIS HDR projects to a nicer form
:param mode: name for HDR-Mode written to file
:param folder: only files in folders of this name are renamed
"""
log_function_call(rename_HDR.__name__, mode, folder)
matchreg = r"^([-\w]+_[0-9]+)B\d(.*)_(?:\d+B)?\d\2"
inpath = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, regex=folder): continue
log().info("Folder: %s", dirpath)
for filename in filenames:
if mode in filename: continue
match = re.search(matchreg, filename)
if match:
_rename_match(dirpath, filename, mode, match)
else:
log().info("no match: %s", filename)
for dirname in dirnames:
match = re.search(matchreg, dirname)
if match:
_rename_match(dirpath, dirname, mode, match)
def _rename_match(dirpath: str, filename: str, mode: str, match: Optional[Match[str]]):
extension = filename[filename.rfind("."):]
filename_new_part1 = match.group(1) + "_" + mode
filename_new_part2 = match.group(2) + extension
filename_new = filename_new_part1 + filename_new_part2
i = 2
while os.path.isfile(os.path.join(dirpath, filename_new)):
filename_new = filename_new_part1 + "%d" % i + filename_new_part2
i += 1
renameInPlace(dirpath, filename, filename_new)
def sanitize_filename(folder=r"", posttags_to_end: List[str] = None, onlyprint=False):
"""
sanitize order of Scene and Process tags
sanitize counter to be split by $
sanitize sub process names added by a external program to by concat to main processname (only Hugin)
:param folder: optional regex for restrict to folders
:param posttags_to_end: optional for resorting special posttags to end
:param onlyprint: if true, renaming will only printed to log and no files are renamed, good for testing
:return:
"""
inpath = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, regex=folder): continue
for filename in (filenames + dirnames):
filename = filename.replace("panorama", "PANO")
filenameAccessor = FilenameAccessor(filename)
_sanitize_posttags(filenameAccessor, posttags_to_end)
_sanitize_process_counter(filenameAccessor)
_sanitize_pano(filenameAccessor)
filename_new = filenameAccessor.sorted_filename()
if not filename == filename_new:
log().info("rename: %s to %s", filename, filename_new)
if not onlyprint:
renameInPlace(dirpath, filename, filename_new)
def _sanitize_posttags(filenameAccessor: FilenameAccessor, posttags_to_end: List[str] = None):
if not posttags_to_end: return
for posttag in posttags_to_end:
if posttag in filenameAccessor.posttags:
filenameAccessor.posttags.remove(posttag)
filenameAccessor.posttags.append(posttag)
def _sanitize_pano(filenameAccessor: FilenameAccessor):
matches = [tag for tag in filenameAccessor.processes if tag.startswith("PANO")]
if not matches: return
pano_name = matches[0]
pano_split = pano_name.split("$")
pano_newname = pano_split[0]
pano_modi = ["blended", "fused", "hdr"]
for pano_modus in pano_modi:
if pano_modus in filenameAccessor.posttags:
pano_newname += "-" + pano_modus
filenameAccessor.posttags.remove(pano_modus)
if len(pano_split) > 0:
pano_newname = "$".join([pano_newname] + pano_split[1:])
filenameAccessor.processes.remove(matches[0])
filenameAccessor.processes = [pano_newname] + filenameAccessor.processes
def _sanitize_process_counter(filenameAccessor: FilenameAccessor):
processes_new = []
for process_mode in filenameAccessor.processes:
if not "$" in process_mode:
match = re.search(r'([^\d]+)(\d.*)', process_mode)
if match:
process_mode = match.group(1) + "$" + match.group(2)
processes_new.append(process_mode)
filenameAccessor.processes = processes_new
def _get_new_filename_from_dict(filename_dict: dict):
filename_new_list = filename_dict["main"] + filename_dict["scene"] + \
filename_dict["process"] + filename_dict["tags"]
return "_".join(filename_new_list)
def rename_temp_back():
"""
rename temporary renamed files back
"""
inpath = os.getcwd()
matchreg = 'temp$'
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
match = re.search(matchreg, filename)
if not match: continue
newFilename = re.sub(matchreg, '', filename)
renameInPlace(dirpath, filename, newFilename)
def rename_back(timestring="", fileext=".JPG"):
"""
rename back using backup in saves; change to directory you want to rename back
:param timestring: time of backup
:param fileext: file extension
:return:
"""
log_function_call(rename_back.__name__)
dirname = get_saves_dir()
tagFile = os.path.join(dirname, "Tags" + fileext + "_" + timestring + ".npz")
if not timestring or os.path.isfile(tagFile):
tagFiles = [x for x in os.listdir(dirname) if ".npz" in x]
tagFile = os.path.join(dirname, tagFiles[-1])
Tagdict = np.load(tagFile, allow_pickle=True)["Tagdict"].item()
temppostfix = renameTemp(Tagdict["Directory"], Tagdict["File Name new"])
log().debug("length of Tagdict: %d", len(list(Tagdict.values())[0]))
for i in range(len(list(Tagdict.values())[0])):
filename = Tagdict["File Name new"][i] + temppostfix
if not os.path.isfile(os.path.join(Tagdict["Directory"][i], filename)): continue
filename_old = Tagdict["File Name"][i]
renameInPlace(Tagdict["Directory"][i], filename, filename_old)
Tagdict["File Name new"][i], Tagdict["File Name"][i] = Tagdict["File Name"][i], Tagdict["File Name new"][i]
timestring = dateformating(dt.datetime.now(), "_MMDDHHmmss")
np.savez_compressed(os.path.join(dirname, "Tags" + fileext + timestring), Tagdict=Tagdict)
def rename_in_csvs(timestring="", fileext=".JPG"):
"""
use backup in saves; rename file reverences in csv in setexif directory
:param timestring: time of backup
:param fileext: file extension
"""
log_function_call(rename_in_csvs.__name__)
dirname = get_saves_dir()
tagFile = os.path.join(dirname, "Tags" + fileext + "_" + timestring + ".npz")
if not timestring or os.path.isfile(tagFile):
tagFiles = [x for x in os.listdir(dirname) if ".npz" in x]
tagFile = os.path.join(dirname, tagFiles[-1])
Tagdict = np.load(tagFile, allow_pickle=True)["Tagdict"].item()
log().debug("length of Tagdict: %d", len(list(Tagdict.values())[0]))
csv_filenames = filterFiles(os.listdir(get_setexif_dir()), [".csv"])
csv_filenames = [os.path.join(get_setexif_dir(), csv_filename) for csv_filename in csv_filenames]
for csv_filename in csv_filenames:
with open(csv_filename, "r", encoding="utf-8") as file:
data = file.read()
for i in range(len(list(Tagdict.values())[0])):
data = data.replace(Tagdict["File Name"][i], Tagdict["File Name new"][i])
with open(csv_filename, "w", encoding="utf-8") as file:
file.write(data)
def create_tags_csv(location: str = ""):
"""
extract tags from the file name
write a csv file with those tags
:param location: optional content of directory column
This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
inpath = os.getcwd()
tag_set = OrderedSet()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_places.csv")
tags_places_file, writer = fileop.create_csv_writer(out_filename, ["directory", "name_part"])
filenameAccessors = [FilenameAccessor(filename) for filename in get_plain_filenames_of_type(image_types, inpath)]
for fileNameAccessor in filenameAccessors:
for tag in fileNameAccessor.tags():
tag_set.add(tag)
writeToFile(get_info_dir("tags.txt"), location + "\n\t" + "\n\t".join(tag_set) + "\n")
for tag in tag_set:
tag_set_names.add((location, tag))
writer.writerows(tag_set_names)
tags_places_file.close()
def create_tags_csv_per_dir():
"""
extract tags from the file name
write a csv file with those tags and group them by toplevel directory
This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_tags_csv_per_dir.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_per_dir.csv")
tags_places_file, writer = fileop.create_csv_writer(out_filename, ["directory", "name_part"])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not inpath == dirpath: continue
for dirname in dirnames:
tag_set = OrderedSet()
filenameAccessors = [FilenameAccessor(filename) for filename in
get_plain_filenames_of_type(image_types, dirpath, dirname)]
if len(filenameAccessors) == 0: continue
for fileNameAccessor in filenameAccessors:
for tag in fileNameAccessor.tags():
tag_set.add(tag)
writeToFile(get_info_dir("tags.txt"), dirname + "\n\t" + "\n\t".join(tag_set) + "\n")
dirname_split = dirname.split("_")
subnames = [subname for subname in dirname_split if not subname.isnumeric()]
dirname = "_".join(subnames)
for tag in tag_set:
tag_set_names.add((dirname, tag))
writer.writerows(tag_set_names)
tags_places_file.close()
def create_counters_csv():
"""
extract counter from the file name
write a csv file with those counters
This csv can be modified to be used with :func:`write_exif_using_csv`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_counters_csv.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_counters.csv")
csvfile, writer = fileop.create_csv_writer(out_filename,
["directory", "name_main", "name_part", "first", "last", "tags3",
"description"])
filenameAccessors = [FilenameAccessor(filename) for filename in get_plain_filenames_of_type(image_types, inpath)]
_add_counter_csv_entries("", filenameAccessors, tag_set_names)
writer.writerows(tag_set_names)
csvfile.close()
def create_counters_csv_per_dir():
"""
extract counter from the file name
write a csv file with those counters for each directory
This csv can be modified to be used with :func:`write_exif_using_csv`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_tags_csv_per_dir.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_counters.csv")
csvfile, writer = fileop.create_csv_writer(out_filename,
["directory", "name_main", "name_part", "first", "last", "tags3",
"description"])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not inpath == dirpath: continue
for dirname in dirnames:
filenameAccessors = [FilenameAccessor(filename) for filename in
get_plain_filenames_of_type(image_types, dirpath, dirname)]
if len(filenameAccessors) == 0: continue
_add_counter_csv_entries(dirname, filenameAccessors, tag_set_names)
writer.writerows(tag_set_names)
csvfile.close()
def _add_counter_csv_entries(dirname: str, filenameAccessors: List[FilenameAccessor], tag_set_names: OrderedSet):
fileNameAccessorFirst = filenameAccessors[0]
fileNameAccessorLast = filenameAccessors[0]
for filenameAccessor in filenameAccessors[1:-1]:
if not filenameAccessor.is_direct_successor_of(fileNameAccessorLast):
tag_set_names.add((dirname, fileNameAccessorFirst.pre, fileNameAccessorFirst.first_posttag(),
fileNameAccessorFirst.counter_main(), fileNameAccessorLast.counter_main()))
fileNameAccessorFirst = filenameAccessor
fileNameAccessorLast = filenameAccessor
tag_set_names.add((dirname, fileNameAccessorFirst.pre, fileNameAccessorFirst.first_posttag(),
fileNameAccessorFirst.counter_main(), fileNameAccessorLast.counter_main()))
def create_names_csv_per_dir(start_after_dir=''):
"""
extract names from the file path
write a csv file with those names for each directory
This csv can be modified to be used with :func:`write_exif_using_csv`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_names_csv_per_dir.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_names.csv")
csvfile, writer = fileop.create_csv_writer(out_filename, ["directory", "name_main", "tags"])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath): continue
filenameAccessors = [FilenameAccessor(filename) for filename in
filterFiles(filenames, image_types)]
if len(filenameAccessors) == 0: continue
tags = []
found = False
for part in dirpath.split(os.sep):
if found:
tags += part.split(', ')
else:
found = part == start_after_dir
filenameAccessorLast = filenameAccessors[0]
tag_set_names.add(
(", ".join(tags), filenameAccessorLast.pre, ', '.join(OrderedSet(tags + [filenameAccessorLast.pre]))))
for filenameAccessor in filenameAccessors[1:]:
if not filenameAccessor.pre == filenameAccessorLast.pre:
tag_set_names.add(
(", ".join(tags), filenameAccessor.pre, ', '.join(OrderedSet(tags + [filenameAccessor.pre]))))
filenameAccessorLast = filenameAccessor
writer.writerows(tag_set_names)
csvfile.close()
def create_rating_csv(rating: int = 4, subdir: str = ""):
"""
creates a csv file with all files in the directory
the rating column is filled with param rating
:param rating: rating to be written
:param subdir: sub directory to make rating file of, if empty all directories will be taken
"""
log_function_call(create_rating_csv.__name__, rating, subdir)
inpath = os.getcwd()
out_filebasename = "rating"
if subdir: out_filebasename += "_" + subdir
out_filename = get_setexif_dir(out_filebasename + ".csv")
rating_file, writer = fileop.create_csv_writer(out_filename, ["name_part", "rating"])
for (dirpath, dirnames, filenames) in os.walk(os.path.join(inpath, subdir)):
if is_invalid_path(dirpath): continue
for filename in filterFiles(filenames, settings.image_types):
writer.writerow([filename, rating])
rating_file.close()
def create_example_csvs():
"""
creates some examples for csv files
"""
_create_empty_csv("rating", ["name_part", "rating"])
_create_empty_csv("gps", ["directory", "name_part", "Location", "gps", "City", "State", "Country", "tags3"])
_create_empty_csv("tags", ["name_main", "first", "last", "tags", "tags3"])
_create_empty_csv("processing", ["directory", "name_part", "tags2", "HDR-ghosting", "HDR-strength"])
def _create_empty_csv(name: str, columns: Iterable):
filename = get_setexif_dir(name + ".csv")
if isfile(filename): return
csv_file, writer = fileop.create_csv_writer(filename, columns)
csv_file.close()
|
<filename>EXIFnaming/nameop.py
#!/usr/bin/env python3
"""
Organizing fotos according to naming conventions definied by readexif.rename and external programs
dependencies: -
"""
import codecs
import csv
import datetime as dt
import os
import re
from typing import Optional, Match, Iterable, Any, IO, Tuple, List
import numpy as np
from EXIFnaming.helpers import settings, fileop
import EXIFnaming.helpers.constants as c
from EXIFnaming.helpers.date import dateformating
from EXIFnaming.helpers.fileop import renameInPlace, renameTemp, moveBracketSeries, moveSeries, move, removeIfEmtpy, \
get_relpath_depth, move_media, copyFilesTo, writeToFile, is_invalid_path, filterFiles, isfile, \
file_has_ext, remove_ext, get_plain_filenames_of_type
from EXIFnaming.helpers.misc import askToContinue
from EXIFnaming.helpers.program_dir import get_saves_dir, get_info_dir, get_setexif_dir, log, log_function_call
from EXIFnaming.helpers.settings import image_types
from EXIFnaming.helpers.tag_conversion import FilenameAccessor
from sortedcollections import OrderedSet
__all__ = ["filter_series", "filter_primary", "copy_subdirectories", "copy_files", "copy_new_files", "replace_in_file",
"folders_to_main", "rename_HDR", "sanitize_filename", "rename_temp_back", "rename_back", "create_tags_csv",
"create_tags_csv_per_dir", "create_counters_csv", "create_counters_csv_per_dir", 'create_names_csv_per_dir',
"create_example_csvs", "create_rating_csv", "move_each_pretag_to_folder"]
def filter_series():
"""
put each kind of series in its own directory
"""
log_function_call(filter_series.__name__)
inpath = os.getcwd()
skipdirs = ["B" + str(i) for i in range(1, 8)]
skipdirs += ["S", "SM", "TL", "mp4", "HDR", "single", "PANO", "others", "TLM"]
# TLM: Timelapse manual - pictures on different days to be combined to a Timelapse
skipdirs += [model for model in c.CameraModelShort.values() if model]
log().info(inpath)
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, skipdirs): continue
log().info("%s #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames))
filenames = moveBracketSeries(dirpath, filenames)
filenames = moveSeries(dirpath, filenames, "S")
filenames = moveSeries(dirpath, filenames, "SM")
filenames = moveSeries(dirpath, filenames, "TL")
filenames = move_media(dirpath, filenames, settings.video_types, "mp4")
# filter process types to separate folders - attention: ordering of statements matters
filenames = move_media(dirpath, filenames, ["PANO"], "PANO")
filenames = move_media(dirpath, filenames, ["ANIMA"], "ANIMA")
filenames = move_media(dirpath, filenames, ["RET"], "RET")
filenames = move_media(dirpath, filenames, ["ZOOM"], "ZOOM")
filenames = move_media(dirpath, filenames, ["SMALL"], "SMALL")
filenames = move_media(dirpath, filenames, ["CUT"], "CUT")
filenames = move_media(dirpath, filenames, ["HDR"], "HDR")
move_media(dirpath, filenames, settings.image_types, "single")
def filter_primary():
"""
put single and B1 in same directory
"""
log_function_call(filter_primary.__name__)
inpath = os.getcwd()
skipdirs = ["S", "SM", "TL", "mp4", "HDR", "single", "PANO", "others"]
skipdirs += [model for model in c.CameraModelShort.values() if model]
log().info(inpath)
folders_to_main(dirs=["B" + str(i) for i in range(1, 8)])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, skipdirs): continue
log().info("%s #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames))
filenames = moveSeries(dirpath, filenames, "S")
filenames = moveSeries(dirpath, filenames, "SM")
filenames = moveSeries(dirpath, filenames, "TL")
filenames = move_media(dirpath, filenames, settings.video_types, "mp4")
filenames = move_media(dirpath, filenames, ["HDR"], "HDR")
filenames = moveSeries(dirpath, filenames, "B", "1", "primary")
filenames = moveSeries(dirpath, filenames, "B")
move_media(dirpath, filenames, settings.image_types, "primary")
def copy_subdirectories(dest: str, dir_names: []):
"""
copy sub folders of specified names to dest without directory structure
:param dest: copy destination
:param dir_names: directory names to copy
"""
inpath = os.getcwd()
log().info(inpath)
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, whitelist=dir_names): continue
copyFilesTo(filenames, dest, False)
def copy_files(dest: str, sub_name: str = None):
"""
copy files which have names containing sub_name to dest without directory structure
:param dest: copy destination
:param sub_name: name part to search
"""
inpath = os.getcwd()
log().info(inpath)
found_files = []
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
if not sub_name or sub_name in filename:
found_files.append(os.path.join(dirpath, filename))
copyFilesTo(found_files, dest, False)
def copy_new_files(dest: str, playlist: str):
"""
sorting music files - FIXME maybe not the right place here
:param dest: copy destination
:param playlist: name part to search
"""
csv.register_dialect('tab', delimiter='\t', lineterminator='\r\n')
with codecs.open(playlist, "rb", "utf-16") as csvfile:
reader = csv.DictReader(csvfile, dialect="tab")
places = [remove_ext(row["Ort"]) for row in reader if row is not None]
places = [os.path.basename(place) for place in places if place != ""]
inpath = os.getcwd()
mp3_files = []
m4a_files = []
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
fullname = os.path.join(dirpath, filename)
if not remove_ext(filename) in places:
if file_has_ext(filename, [".mp3"]):
mp3_files.append(fullname)
if file_has_ext(filename, [".m4a"]):
m4a_files.append(fullname)
copyFilesTo(mp3_files, os.path.join(dest, "mp3", remove_ext(playlist)), False)
copyFilesTo(m4a_files, os.path.join(dest, "m4a", remove_ext(playlist)), False)
def replace_in_file(search: str, replace: str, fileext: str):
"""
replace search with replace in files ending with fileext
:param search: string to search for
:param replace: string to replace
:param fileext: type of file to search in
"""
inpath = os.getcwd()
log().info(inpath)
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
if filename.endswith(fileext):
log().info(filename)
fullfilename = os.path.join(dirpath, filename)
with open(fullfilename, 'r') as file:
content = file.read()
content = content.replace(search, replace)
with open(fullfilename, 'w') as file:
file.write(content)
def folders_to_main(series: bool = False, primary: bool = False, blurry: bool = False, dirs: list = None,
one_level: bool = True, not_inpath: bool = True):
"""
reverses filtering/sorting into directories
:param series: restrict to reverse of filterSeries
:param primary: restrict to reverse of filterPrimary
:param blurry: restrict to reverse of detectBlurry
:param dirs: restrict to reverse other dirs
:param one_level: reverse only one directory up
:param not_inpath: leave all directories in inpath as they are, only change subdirectories
"""
log_function_call(folders_to_main.__name__, series, primary, blurry, dirs, one_level, not_inpath)
inpath = os.getcwd()
reverseDirs = []
if series: reverseDirs += ["B" + str(i) for i in range(1, 8)] + ["S", "single"]
if primary: reverseDirs += ["B", "S", "TL", "SM", "primary"]
if blurry: reverseDirs += ["blurry"]
if dirs: reverseDirs += list(dirs)
deepest = 0
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not_inpath and dirpath == inpath: continue
if is_invalid_path(dirpath, whitelist=reverseDirs): continue
depth = get_relpath_depth(dirpath, inpath)
deepest = max(deepest, depth)
if not_inpath:
deepest -= 1
if not reverseDirs and deepest > 1:
log().warning("A folder structure with a depth of %2d will be flattened", deepest)
askToContinue()
elif deepest > 3:
log().warning("The folder structure has a depth of %2d", deepest)
log().info("chosen directory names: %r", reverseDirs)
askToContinue()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not_inpath and dirpath == inpath: continue
if is_invalid_path(dirpath, whitelist=reverseDirs): continue
if one_level:
destination = os.path.dirname(dirpath)
else:
destination = inpath
log().info("%s #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames))
for filename in filenames:
if not file_has_ext(filename, settings.image_types + settings.video_types): continue
move(filename, dirpath, destination)
removeIfEmtpy(dirpath)
def move_each_pretag_to_folder():
"""
"""
log_function_call(move_each_pretag_to_folder.__name__)
inpath = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath): continue
for filename in filenames:
filenameAccessor = FilenameAccessor(filename)
if not filenameAccessor.pre in dirpath:
move(filename, dirpath, os.path.join(dirpath, filenameAccessor.pre))
if len(filenameAccessor.primtags) > 0 and not filenameAccessor.primtags[0] in dirpath:
move(filename, dirpath, os.path.join(dirpath, *filenameAccessor.primtags))
def rename_HDR(mode="HDRT", folder=r"HDR\w*"):
"""
rename HDR pictures generated by FRANZIS HDR projects to a nicer form
:param mode: name for HDR-Mode written to file
:param folder: only files in folders of this name are renamed
"""
log_function_call(rename_HDR.__name__, mode, folder)
matchreg = r"^([-\w]+_[0-9]+)B\d(.*)_(?:\d+B)?\d\2"
inpath = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, regex=folder): continue
log().info("Folder: %s", dirpath)
for filename in filenames:
if mode in filename: continue
match = re.search(matchreg, filename)
if match:
_rename_match(dirpath, filename, mode, match)
else:
log().info("no match: %s", filename)
for dirname in dirnames:
match = re.search(matchreg, dirname)
if match:
_rename_match(dirpath, dirname, mode, match)
def _rename_match(dirpath: str, filename: str, mode: str, match: Optional[Match[str]]):
extension = filename[filename.rfind("."):]
filename_new_part1 = match.group(1) + "_" + mode
filename_new_part2 = match.group(2) + extension
filename_new = filename_new_part1 + filename_new_part2
i = 2
while os.path.isfile(os.path.join(dirpath, filename_new)):
filename_new = filename_new_part1 + "%d" % i + filename_new_part2
i += 1
renameInPlace(dirpath, filename, filename_new)
def sanitize_filename(folder=r"", posttags_to_end: List[str] = None, onlyprint=False):
"""
sanitize order of Scene and Process tags
sanitize counter to be split by $
sanitize sub process names added by a external program to by concat to main processname (only Hugin)
:param folder: optional regex for restrict to folders
:param posttags_to_end: optional for resorting special posttags to end
:param onlyprint: if true, renaming will only printed to log and no files are renamed, good for testing
:return:
"""
inpath = os.getcwd()
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath, regex=folder): continue
for filename in (filenames + dirnames):
filename = filename.replace("panorama", "PANO")
filenameAccessor = FilenameAccessor(filename)
_sanitize_posttags(filenameAccessor, posttags_to_end)
_sanitize_process_counter(filenameAccessor)
_sanitize_pano(filenameAccessor)
filename_new = filenameAccessor.sorted_filename()
if not filename == filename_new:
log().info("rename: %s to %s", filename, filename_new)
if not onlyprint:
renameInPlace(dirpath, filename, filename_new)
def _sanitize_posttags(filenameAccessor: FilenameAccessor, posttags_to_end: List[str] = None):
if not posttags_to_end: return
for posttag in posttags_to_end:
if posttag in filenameAccessor.posttags:
filenameAccessor.posttags.remove(posttag)
filenameAccessor.posttags.append(posttag)
def _sanitize_pano(filenameAccessor: FilenameAccessor):
matches = [tag for tag in filenameAccessor.processes if tag.startswith("PANO")]
if not matches: return
pano_name = matches[0]
pano_split = pano_name.split("$")
pano_newname = pano_split[0]
pano_modi = ["blended", "fused", "hdr"]
for pano_modus in pano_modi:
if pano_modus in filenameAccessor.posttags:
pano_newname += "-" + pano_modus
filenameAccessor.posttags.remove(pano_modus)
if len(pano_split) > 0:
pano_newname = "$".join([pano_newname] + pano_split[1:])
filenameAccessor.processes.remove(matches[0])
filenameAccessor.processes = [pano_newname] + filenameAccessor.processes
def _sanitize_process_counter(filenameAccessor: FilenameAccessor):
processes_new = []
for process_mode in filenameAccessor.processes:
if not "$" in process_mode:
match = re.search(r'([^\d]+)(\d.*)', process_mode)
if match:
process_mode = match.group(1) + "$" + match.group(2)
processes_new.append(process_mode)
filenameAccessor.processes = processes_new
def _get_new_filename_from_dict(filename_dict: dict):
filename_new_list = filename_dict["main"] + filename_dict["scene"] + \
filename_dict["process"] + filename_dict["tags"]
return "_".join(filename_new_list)
def rename_temp_back():
"""
rename temporary renamed files back
"""
inpath = os.getcwd()
matchreg = 'temp$'
for (dirpath, dirnames, filenames) in os.walk(inpath):
for filename in filenames:
match = re.search(matchreg, filename)
if not match: continue
newFilename = re.sub(matchreg, '', filename)
renameInPlace(dirpath, filename, newFilename)
def rename_back(timestring="", fileext=".JPG"):
"""
rename back using backup in saves; change to directory you want to rename back
:param timestring: time of backup
:param fileext: file extension
:return:
"""
log_function_call(rename_back.__name__)
dirname = get_saves_dir()
tagFile = os.path.join(dirname, "Tags" + fileext + "_" + timestring + ".npz")
if not timestring or os.path.isfile(tagFile):
tagFiles = [x for x in os.listdir(dirname) if ".npz" in x]
tagFile = os.path.join(dirname, tagFiles[-1])
Tagdict = np.load(tagFile, allow_pickle=True)["Tagdict"].item()
temppostfix = renameTemp(Tagdict["Directory"], Tagdict["File Name new"])
log().debug("length of Tagdict: %d", len(list(Tagdict.values())[0]))
for i in range(len(list(Tagdict.values())[0])):
filename = Tagdict["File Name new"][i] + temppostfix
if not os.path.isfile(os.path.join(Tagdict["Directory"][i], filename)): continue
filename_old = Tagdict["File Name"][i]
renameInPlace(Tagdict["Directory"][i], filename, filename_old)
Tagdict["File Name new"][i], Tagdict["File Name"][i] = Tagdict["File Name"][i], Tagdict["File Name new"][i]
timestring = dateformating(dt.datetime.now(), "_MMDDHHmmss")
np.savez_compressed(os.path.join(dirname, "Tags" + fileext + timestring), Tagdict=Tagdict)
def rename_in_csvs(timestring="", fileext=".JPG"):
"""
use backup in saves; rename file reverences in csv in setexif directory
:param timestring: time of backup
:param fileext: file extension
"""
log_function_call(rename_in_csvs.__name__)
dirname = get_saves_dir()
tagFile = os.path.join(dirname, "Tags" + fileext + "_" + timestring + ".npz")
if not timestring or os.path.isfile(tagFile):
tagFiles = [x for x in os.listdir(dirname) if ".npz" in x]
tagFile = os.path.join(dirname, tagFiles[-1])
Tagdict = np.load(tagFile, allow_pickle=True)["Tagdict"].item()
log().debug("length of Tagdict: %d", len(list(Tagdict.values())[0]))
csv_filenames = filterFiles(os.listdir(get_setexif_dir()), [".csv"])
csv_filenames = [os.path.join(get_setexif_dir(), csv_filename) for csv_filename in csv_filenames]
for csv_filename in csv_filenames:
with open(csv_filename, "r", encoding="utf-8") as file:
data = file.read()
for i in range(len(list(Tagdict.values())[0])):
data = data.replace(Tagdict["File Name"][i], Tagdict["File Name new"][i])
with open(csv_filename, "w", encoding="utf-8") as file:
file.write(data)
def create_tags_csv(location: str = ""):
"""
extract tags from the file name
write a csv file with those tags
:param location: optional content of directory column
This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
inpath = os.getcwd()
tag_set = OrderedSet()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_places.csv")
tags_places_file, writer = fileop.create_csv_writer(out_filename, ["directory", "name_part"])
filenameAccessors = [FilenameAccessor(filename) for filename in get_plain_filenames_of_type(image_types, inpath)]
for fileNameAccessor in filenameAccessors:
for tag in fileNameAccessor.tags():
tag_set.add(tag)
writeToFile(get_info_dir("tags.txt"), location + "\n\t" + "\n\t".join(tag_set) + "\n")
for tag in tag_set:
tag_set_names.add((location, tag))
writer.writerows(tag_set_names)
tags_places_file.close()
def create_tags_csv_per_dir():
"""
extract tags from the file name
write a csv file with those tags and group them by toplevel directory
This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_tags_csv_per_dir.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_per_dir.csv")
tags_places_file, writer = fileop.create_csv_writer(out_filename, ["directory", "name_part"])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not inpath == dirpath: continue
for dirname in dirnames:
tag_set = OrderedSet()
filenameAccessors = [FilenameAccessor(filename) for filename in
get_plain_filenames_of_type(image_types, dirpath, dirname)]
if len(filenameAccessors) == 0: continue
for fileNameAccessor in filenameAccessors:
for tag in fileNameAccessor.tags():
tag_set.add(tag)
writeToFile(get_info_dir("tags.txt"), dirname + "\n\t" + "\n\t".join(tag_set) + "\n")
dirname_split = dirname.split("_")
subnames = [subname for subname in dirname_split if not subname.isnumeric()]
dirname = "_".join(subnames)
for tag in tag_set:
tag_set_names.add((dirname, tag))
writer.writerows(tag_set_names)
tags_places_file.close()
def create_counters_csv():
"""
extract counter from the file name
write a csv file with those counters
This csv can be modified to be used with :func:`write_exif_using_csv`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_counters_csv.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_counters.csv")
csvfile, writer = fileop.create_csv_writer(out_filename,
["directory", "name_main", "name_part", "first", "last", "tags3",
"description"])
filenameAccessors = [FilenameAccessor(filename) for filename in get_plain_filenames_of_type(image_types, inpath)]
_add_counter_csv_entries("", filenameAccessors, tag_set_names)
writer.writerows(tag_set_names)
csvfile.close()
def create_counters_csv_per_dir():
"""
extract counter from the file name
write a csv file with those counters for each directory
This csv can be modified to be used with :func:`write_exif_using_csv`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_tags_csv_per_dir.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_counters.csv")
csvfile, writer = fileop.create_csv_writer(out_filename,
["directory", "name_main", "name_part", "first", "last", "tags3",
"description"])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if not inpath == dirpath: continue
for dirname in dirnames:
filenameAccessors = [FilenameAccessor(filename) for filename in
get_plain_filenames_of_type(image_types, dirpath, dirname)]
if len(filenameAccessors) == 0: continue
_add_counter_csv_entries(dirname, filenameAccessors, tag_set_names)
writer.writerows(tag_set_names)
csvfile.close()
def _add_counter_csv_entries(dirname: str, filenameAccessors: List[FilenameAccessor], tag_set_names: OrderedSet):
fileNameAccessorFirst = filenameAccessors[0]
fileNameAccessorLast = filenameAccessors[0]
for filenameAccessor in filenameAccessors[1:-1]:
if not filenameAccessor.is_direct_successor_of(fileNameAccessorLast):
tag_set_names.add((dirname, fileNameAccessorFirst.pre, fileNameAccessorFirst.first_posttag(),
fileNameAccessorFirst.counter_main(), fileNameAccessorLast.counter_main()))
fileNameAccessorFirst = filenameAccessor
fileNameAccessorLast = filenameAccessor
tag_set_names.add((dirname, fileNameAccessorFirst.pre, fileNameAccessorFirst.first_posttag(),
fileNameAccessorFirst.counter_main(), fileNameAccessorLast.counter_main()))
def create_names_csv_per_dir(start_after_dir=''):
"""
extract names from the file path
write a csv file with those names for each directory
This csv can be modified to be used with :func:`write_exif_using_csv`
If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text.
"""
log_function_call(create_names_csv_per_dir.__name__)
inpath = os.getcwd()
tag_set_names = OrderedSet()
out_filename = get_info_dir("tags_names.csv")
csvfile, writer = fileop.create_csv_writer(out_filename, ["directory", "name_main", "tags"])
for (dirpath, dirnames, filenames) in os.walk(inpath):
if is_invalid_path(dirpath): continue
filenameAccessors = [FilenameAccessor(filename) for filename in
filterFiles(filenames, image_types)]
if len(filenameAccessors) == 0: continue
tags = []
found = False
for part in dirpath.split(os.sep):
if found:
tags += part.split(', ')
else:
found = part == start_after_dir
filenameAccessorLast = filenameAccessors[0]
tag_set_names.add(
(", ".join(tags), filenameAccessorLast.pre, ', '.join(OrderedSet(tags + [filenameAccessorLast.pre]))))
for filenameAccessor in filenameAccessors[1:]:
if not filenameAccessor.pre == filenameAccessorLast.pre:
tag_set_names.add(
(", ".join(tags), filenameAccessor.pre, ', '.join(OrderedSet(tags + [filenameAccessor.pre]))))
filenameAccessorLast = filenameAccessor
writer.writerows(tag_set_names)
csvfile.close()
def create_rating_csv(rating: int = 4, subdir: str = ""):
"""
creates a csv file with all files in the directory
the rating column is filled with param rating
:param rating: rating to be written
:param subdir: sub directory to make rating file of, if empty all directories will be taken
"""
log_function_call(create_rating_csv.__name__, rating, subdir)
inpath = os.getcwd()
out_filebasename = "rating"
if subdir: out_filebasename += "_" + subdir
out_filename = get_setexif_dir(out_filebasename + ".csv")
rating_file, writer = fileop.create_csv_writer(out_filename, ["name_part", "rating"])
for (dirpath, dirnames, filenames) in os.walk(os.path.join(inpath, subdir)):
if is_invalid_path(dirpath): continue
for filename in filterFiles(filenames, settings.image_types):
writer.writerow([filename, rating])
rating_file.close()
def create_example_csvs():
"""
creates some examples for csv files
"""
_create_empty_csv("rating", ["name_part", "rating"])
_create_empty_csv("gps", ["directory", "name_part", "Location", "gps", "City", "State", "Country", "tags3"])
_create_empty_csv("tags", ["name_main", "first", "last", "tags", "tags3"])
_create_empty_csv("processing", ["directory", "name_part", "tags2", "HDR-ghosting", "HDR-strength"])
def _create_empty_csv(name: str, columns: Iterable):
filename = get_setexif_dir(name + ".csv")
if isfile(filename): return
csv_file, writer = fileop.create_csv_writer(filename, columns)
csv_file.close()
|
en
| 0.803134
|
#!/usr/bin/env python3 Organizing fotos according to naming conventions definied by readexif.rename and external programs dependencies: - put each kind of series in its own directory # TLM: Timelapse manual - pictures on different days to be combined to a Timelapse #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames)) # filter process types to separate folders - attention: ordering of statements matters put single and B1 in same directory #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames)) copy sub folders of specified names to dest without directory structure :param dest: copy destination :param dir_names: directory names to copy copy files which have names containing sub_name to dest without directory structure :param dest: copy destination :param sub_name: name part to search sorting music files - FIXME maybe not the right place here :param dest: copy destination :param playlist: name part to search replace search with replace in files ending with fileext :param search: string to search for :param replace: string to replace :param fileext: type of file to search in reverses filtering/sorting into directories :param series: restrict to reverse of filterSeries :param primary: restrict to reverse of filterPrimary :param blurry: restrict to reverse of detectBlurry :param dirs: restrict to reverse other dirs :param one_level: reverse only one directory up :param not_inpath: leave all directories in inpath as they are, only change subdirectories #dirs:%d #files:%d", dirpath, len(dirnames), len(filenames)) rename HDR pictures generated by FRANZIS HDR projects to a nicer form :param mode: name for HDR-Mode written to file :param folder: only files in folders of this name are renamed sanitize order of Scene and Process tags sanitize counter to be split by $ sanitize sub process names added by a external program to by concat to main processname (only Hugin) :param folder: optional regex for restrict to folders :param posttags_to_end: optional for resorting special posttags to end :param onlyprint: if true, renaming will only printed to log and no files are renamed, good for testing :return: rename temporary renamed files back rename back using backup in saves; change to directory you want to rename back :param timestring: time of backup :param fileext: file extension :return: use backup in saves; rename file reverences in csv in setexif directory :param timestring: time of backup :param fileext: file extension extract tags from the file name write a csv file with those tags :param location: optional content of directory column This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. extract tags from the file name write a csv file with those tags and group them by toplevel directory This csv can be modified to be used with :func:`write_exif_using_csv` or :func:`placeinfo.write_infos` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. extract counter from the file name write a csv file with those counters This csv can be modified to be used with :func:`write_exif_using_csv` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. extract counter from the file name write a csv file with those counters for each directory This csv can be modified to be used with :func:`write_exif_using_csv` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. extract names from the file path write a csv file with those names for each directory This csv can be modified to be used with :func:`write_exif_using_csv` If you want to modify it with EXCEL or Calc take care to import all columns of the csv as text. creates a csv file with all files in the directory the rating column is filled with param rating :param rating: rating to be written :param subdir: sub directory to make rating file of, if empty all directories will be taken creates some examples for csv files
| 1.99111
| 2
|
core/backend/djacket/urls.py
|
Djacket/djacket
| 85
|
6627094
|
<reponame>Djacket/djacket
from django.conf import settings
from django.contrib import admin
from django.views.static import serve
from django.conf.urls import include, url
from user.views import user_deposit
from djacket.views import index
# Djacket main urls will be addressed here.
urlpatterns = [
url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # project docs view.
url(r'^admin/', include(admin.site.urls)), # admin interface.
url(r'^', include('repository.urls')), # import repository app urls with no prefix.
url(r'^account/', include('user.urls')), # import user app urls with 'account' prefix.
url(r'^(?P<username>\w+)$', user_deposit, name='user_deposit'), # show user deposit for url '/username'.
url(r'^$', index, name='index'), # index changes to logged in user if he/she is authenticated or to djacket intro if not.
]
# serve static medias on /media/* while DEBUG settings are on
if settings.DEBUG:
urlpatterns = urlpatterns + \
[url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),]
|
from django.conf import settings
from django.contrib import admin
from django.views.static import serve
from django.conf.urls import include, url
from user.views import user_deposit
from djacket.views import index
# Djacket main urls will be addressed here.
urlpatterns = [
url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # project docs view.
url(r'^admin/', include(admin.site.urls)), # admin interface.
url(r'^', include('repository.urls')), # import repository app urls with no prefix.
url(r'^account/', include('user.urls')), # import user app urls with 'account' prefix.
url(r'^(?P<username>\w+)$', user_deposit, name='user_deposit'), # show user deposit for url '/username'.
url(r'^$', index, name='index'), # index changes to logged in user if he/she is authenticated or to djacket intro if not.
]
# serve static medias on /media/* while DEBUG settings are on
if settings.DEBUG:
urlpatterns = urlpatterns + \
[url(r'^media/(?P<path>.*)$', serve, {'document_root': settings.MEDIA_ROOT}),]
|
en
| 0.815429
|
# Djacket main urls will be addressed here. # project docs view. # admin interface. # import repository app urls with no prefix. # import user app urls with 'account' prefix. # show user deposit for url '/username'. # index changes to logged in user if he/she is authenticated or to djacket intro if not. # serve static medias on /media/* while DEBUG settings are on
| 2.063095
| 2
|
submissions/abc156/b.py
|
m-star18/atcoder
| 1
|
6627095
|
<gh_stars>1-10
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
def Base_10_to_n(X, n):
if int(X / n):
return Base_10_to_n(int(X / n), n) + str(X % n)
return str(X % n)
n, k = map(int, readline().split())
print(len(Base_10_to_n(n, k)))
|
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
def Base_10_to_n(X, n):
if int(X / n):
return Base_10_to_n(int(X / n), n) + str(X % n)
return str(X % n)
n, k = map(int, readline().split())
print(len(Base_10_to_n(n, k)))
|
none
| 1
| 2.953619
| 3
|
|
pywizlight/__init__.py
|
UH-60/pywizlight
| 221
|
6627096
|
<filename>pywizlight/__init__.py<gh_stars>100-1000
from pywizlight.bulb import PilotBuilder, PilotParser, wizlight
from pywizlight import discovery
from pywizlight.scenes import SCENES
from pywizlight.bulblibrary import BulbType
__all__ = [
"BulbType",
"discovery",
"PilotBuilder",
"PilotParser",
"SCENES",
"wizlight",
]
|
<filename>pywizlight/__init__.py<gh_stars>100-1000
from pywizlight.bulb import PilotBuilder, PilotParser, wizlight
from pywizlight import discovery
from pywizlight.scenes import SCENES
from pywizlight.bulblibrary import BulbType
__all__ = [
"BulbType",
"discovery",
"PilotBuilder",
"PilotParser",
"SCENES",
"wizlight",
]
|
none
| 1
| 1.483598
| 1
|
|
layers/__init__.py
|
exityan/reid-strong-baseline
| 0
|
6627097
|
<gh_stars>0
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import torch.nn.functional as F
from .triplet_loss import TripletLoss, CrossEntropyLabelSmooth
from .center_loss import CenterLoss
def make_loss(cfg, num_classes): # modified by gu
sampler = cfg.DATALOADER.SAMPLER
if cfg.MODEL.METRIC_LOSS_TYPE == 'triplet':
triplet = TripletLoss(cfg.SOLVER.MARGIN) # triplet loss
else:
print('expected METRIC_LOSS_TYPE should be triplet'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
if cfg.MODEL.IF_LABELSMOOTH == 'on':
xent = CrossEntropyLabelSmooth(num_classes=num_classes) # new add by luo
print("label smooth on, numclasses:", num_classes)
if sampler == 'softmax':
def loss_func(score, feat, target):
return F.cross_entropy(score, target)
elif cfg.DATALOADER.SAMPLER == 'triplet':
def loss_func(score, feat, target):
return triplet(feat, target)[0]
elif cfg.DATALOADER.SAMPLER == 'softmax_triplet':
def loss_func(score, feat, target):
if cfg.MODEL.METRIC_LOSS_TYPE == 'triplet':
if cfg.MODEL.IF_LABELSMOOTH == 'on':
return xent(score, target) + triplet(feat, target)[0]
else:
return F.cross_entropy(score, target) + triplet(feat, target)[0]
else:
print('expected METRIC_LOSS_TYPE should be triplet'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
else:
print('expected sampler should be softmax, triplet or softmax_triplet, '
'but got {}'.format(cfg.DATALOADER.SAMPLER))
return loss_func
def make_loss_with_center(cfg, num_classes, use_gpu=True): # modified by gu
if cfg.MODEL.NAME == 'resnet18' or cfg.MODEL.NAME == 'resnet34':
feat_dim = 512
else:
feat_dim = 2048
if cfg.MODEL.METRIC_LOSS_TYPE == 'center':
center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=use_gpu) # center loss
elif cfg.MODEL.METRIC_LOSS_TYPE == 'triplet_center':
triplet = TripletLoss(cfg.SOLVER.MARGIN) # triplet loss
center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=use_gpu) # center loss
else:
print('expected METRIC_LOSS_TYPE with center should be center, triplet_center'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
if cfg.MODEL.IF_LABELSMOOTH == 'on':
xent = CrossEntropyLabelSmooth(num_classes=num_classes, use_gpu=use_gpu) # new add by luo
print("label smooth on, numclasses:", num_classes)
def loss_func(score, feat, target):
if cfg.MODEL.METRIC_LOSS_TYPE == 'center':
if cfg.MODEL.IF_LABELSMOOTH == 'on':
return xent(score, target) + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
else:
return F.cross_entropy(score, target) + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
elif cfg.MODEL.METRIC_LOSS_TYPE == 'triplet_center':
if cfg.MODEL.IF_LABELSMOOTH == 'on':
return xent(score, target) + \
triplet(feat, target)[0] + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
else:
return F.cross_entropy(score, target) + \
triplet(feat, target)[0] + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
else:
print('expected METRIC_LOSS_TYPE with center should be center, triplet_center'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
return loss_func, center_criterion
|
# encoding: utf-8
"""
@author: liaoxingyu
@contact: <EMAIL>
"""
import torch.nn.functional as F
from .triplet_loss import TripletLoss, CrossEntropyLabelSmooth
from .center_loss import CenterLoss
def make_loss(cfg, num_classes): # modified by gu
sampler = cfg.DATALOADER.SAMPLER
if cfg.MODEL.METRIC_LOSS_TYPE == 'triplet':
triplet = TripletLoss(cfg.SOLVER.MARGIN) # triplet loss
else:
print('expected METRIC_LOSS_TYPE should be triplet'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
if cfg.MODEL.IF_LABELSMOOTH == 'on':
xent = CrossEntropyLabelSmooth(num_classes=num_classes) # new add by luo
print("label smooth on, numclasses:", num_classes)
if sampler == 'softmax':
def loss_func(score, feat, target):
return F.cross_entropy(score, target)
elif cfg.DATALOADER.SAMPLER == 'triplet':
def loss_func(score, feat, target):
return triplet(feat, target)[0]
elif cfg.DATALOADER.SAMPLER == 'softmax_triplet':
def loss_func(score, feat, target):
if cfg.MODEL.METRIC_LOSS_TYPE == 'triplet':
if cfg.MODEL.IF_LABELSMOOTH == 'on':
return xent(score, target) + triplet(feat, target)[0]
else:
return F.cross_entropy(score, target) + triplet(feat, target)[0]
else:
print('expected METRIC_LOSS_TYPE should be triplet'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
else:
print('expected sampler should be softmax, triplet or softmax_triplet, '
'but got {}'.format(cfg.DATALOADER.SAMPLER))
return loss_func
def make_loss_with_center(cfg, num_classes, use_gpu=True): # modified by gu
if cfg.MODEL.NAME == 'resnet18' or cfg.MODEL.NAME == 'resnet34':
feat_dim = 512
else:
feat_dim = 2048
if cfg.MODEL.METRIC_LOSS_TYPE == 'center':
center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=use_gpu) # center loss
elif cfg.MODEL.METRIC_LOSS_TYPE == 'triplet_center':
triplet = TripletLoss(cfg.SOLVER.MARGIN) # triplet loss
center_criterion = CenterLoss(num_classes=num_classes, feat_dim=feat_dim, use_gpu=use_gpu) # center loss
else:
print('expected METRIC_LOSS_TYPE with center should be center, triplet_center'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
if cfg.MODEL.IF_LABELSMOOTH == 'on':
xent = CrossEntropyLabelSmooth(num_classes=num_classes, use_gpu=use_gpu) # new add by luo
print("label smooth on, numclasses:", num_classes)
def loss_func(score, feat, target):
if cfg.MODEL.METRIC_LOSS_TYPE == 'center':
if cfg.MODEL.IF_LABELSMOOTH == 'on':
return xent(score, target) + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
else:
return F.cross_entropy(score, target) + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
elif cfg.MODEL.METRIC_LOSS_TYPE == 'triplet_center':
if cfg.MODEL.IF_LABELSMOOTH == 'on':
return xent(score, target) + \
triplet(feat, target)[0] + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
else:
return F.cross_entropy(score, target) + \
triplet(feat, target)[0] + \
cfg.SOLVER.CENTER_LOSS_WEIGHT * center_criterion(feat, target)
else:
print('expected METRIC_LOSS_TYPE with center should be center, triplet_center'
'but got {}'.format(cfg.MODEL.METRIC_LOSS_TYPE))
return loss_func, center_criterion
|
en
| 0.793926
|
# encoding: utf-8 @author: liaoxingyu @contact: <EMAIL> # modified by gu # triplet loss # new add by luo # modified by gu # center loss # triplet loss # center loss # new add by luo
| 2.183752
| 2
|
GununMaddesi.py
|
ahmetlii/GununMaddesi
| 0
|
6627098
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
import mavri
import datetime
from random import randint
wiki = 'tr.wikipedia'
username='Mavrikant Bot'
xx = mavri.login(wiki, username)
one_day = datetime.timedelta(days=1)
baslangic = datetime.date(2015, 9, 1) # Bu tarihten öncesinde GM sorunlu.
bugun = datetime.date(datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)
gelecekTarih = bugun + 2*one_day # 2 gün sonrası için kontrol yap
gelecekTarihStr = gelecekTarih.strftime("%Y-%m-%d")
kaynakTarih = baslangic + randint(0, int(str(bugun - baslangic).split(' days')[0])) * one_day # Başlangıç ve Bugün arasında rasgele bir tarih seç
logPage = 'User:'+username+'/Log/Günün Maddesi'
YarinSayfa = mavri.content_of_page(wiki, 'Şablon:GM/' + gelecekTarihStr)
if (YarinSayfa == ''): # Yarının GM sayfası yok
Summary = 'Olumsuz'
Durum = '\n* {{Çapraz}}'
# Kaynak sayfa bul ve içeriğini kopyala
kaynakSayfa = mavri.content_of_page(wiki, 'Şablon:GM/' + kaynakTarih.strftime("%Y-%m-%d"))
while kaynakSayfa == '':
kaynakTarih += one_day # Sayfa boş çıktı. Sonraki güne geç.
kaynakSayfa = mavri.content_of_page(wiki, 'Şablon:GM/' + kaynakTarih.strftime("%Y-%m-%d"))
# Kaynak sayfa ile gelecek GM sayfasını oluştur
mavri.change_page(wiki, 'Şablon:GM/' + gelecekTarihStr, kaynakSayfa, '[[Şablon:GM/' + kaynakTarih.strftime("%Y-%m-%d") + ']] sayfasından kopyalandı.', xx)
else: # Yarının GM sayfası oluşturulmuş. Süper.
Summary = 'Olumlu'
Durum = '\n* {{Tamam}}'
# Log sayfasına rapor yaz
Durum += " [[Şablon:GM/%s | %s]]" %(gelecekTarihStr,gelecekTarihStr)
mavri.appendtext_on_page(wiki, logPage, Durum, Summary, xx)
|
# -*- coding: utf-8 -*-
# !/usr/bin/python
import mavri
import datetime
from random import randint
wiki = 'tr.wikipedia'
username='Mavrikant Bot'
xx = mavri.login(wiki, username)
one_day = datetime.timedelta(days=1)
baslangic = datetime.date(2015, 9, 1) # Bu tarihten öncesinde GM sorunlu.
bugun = datetime.date(datetime.datetime.now().year, datetime.datetime.now().month, datetime.datetime.now().day)
gelecekTarih = bugun + 2*one_day # 2 gün sonrası için kontrol yap
gelecekTarihStr = gelecekTarih.strftime("%Y-%m-%d")
kaynakTarih = baslangic + randint(0, int(str(bugun - baslangic).split(' days')[0])) * one_day # Başlangıç ve Bugün arasında rasgele bir tarih seç
logPage = 'User:'+username+'/Log/Günün Maddesi'
YarinSayfa = mavri.content_of_page(wiki, 'Şablon:GM/' + gelecekTarihStr)
if (YarinSayfa == ''): # Yarının GM sayfası yok
Summary = 'Olumsuz'
Durum = '\n* {{Çapraz}}'
# Kaynak sayfa bul ve içeriğini kopyala
kaynakSayfa = mavri.content_of_page(wiki, 'Şablon:GM/' + kaynakTarih.strftime("%Y-%m-%d"))
while kaynakSayfa == '':
kaynakTarih += one_day # Sayfa boş çıktı. Sonraki güne geç.
kaynakSayfa = mavri.content_of_page(wiki, 'Şablon:GM/' + kaynakTarih.strftime("%Y-%m-%d"))
# Kaynak sayfa ile gelecek GM sayfasını oluştur
mavri.change_page(wiki, 'Şablon:GM/' + gelecekTarihStr, kaynakSayfa, '[[Şablon:GM/' + kaynakTarih.strftime("%Y-%m-%d") + ']] sayfasından kopyalandı.', xx)
else: # Yarının GM sayfası oluşturulmuş. Süper.
Summary = 'Olumlu'
Durum = '\n* {{Tamam}}'
# Log sayfasına rapor yaz
Durum += " [[Şablon:GM/%s | %s]]" %(gelecekTarihStr,gelecekTarihStr)
mavri.appendtext_on_page(wiki, logPage, Durum, Summary, xx)
|
tr
| 0.999197
|
# -*- coding: utf-8 -*- # !/usr/bin/python # Bu tarihten öncesinde GM sorunlu. # 2 gün sonrası için kontrol yap # Başlangıç ve Bugün arasında rasgele bir tarih seç # Yarının GM sayfası yok # Kaynak sayfa bul ve içeriğini kopyala # Sayfa boş çıktı. Sonraki güne geç. # Kaynak sayfa ile gelecek GM sayfasını oluştur # Yarının GM sayfası oluşturulmuş. Süper. # Log sayfasına rapor yaz
| 2.800514
| 3
|
pytests/test_BoundaryDetector.py
|
calumcorrie/Meraki-Crowd-Interface
| 0
|
6627099
|
import os
import sys
import numpy as np
# import BoundaryDetector from lib directory
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from lib.BoundaryDetector import BoundaryDetector
# generate expected map
expected_sym_map = np.ones((100,100),dtype=np.bool_)
expected_sym_map[ 11:89 , 11:89 ] = 0
expected_sym_map = ( expected_sym_map == 1)
def test_getBoundaryMask():
b = BoundaryDetector(os.path.join("testing","simple_fp.png"))
b.run()
x = b.getBoundaryMask()
assert ( np.array_equal( x, expected_sym_map ) )
|
import os
import sys
import numpy as np
# import BoundaryDetector from lib directory
parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(parentddir)
from lib.BoundaryDetector import BoundaryDetector
# generate expected map
expected_sym_map = np.ones((100,100),dtype=np.bool_)
expected_sym_map[ 11:89 , 11:89 ] = 0
expected_sym_map = ( expected_sym_map == 1)
def test_getBoundaryMask():
b = BoundaryDetector(os.path.join("testing","simple_fp.png"))
b.run()
x = b.getBoundaryMask()
assert ( np.array_equal( x, expected_sym_map ) )
|
en
| 0.475491
|
# import BoundaryDetector from lib directory # generate expected map
| 2.355141
| 2
|
tests/test_maybe.py
|
darkfeline/mir.monads
| 0
|
6627100
|
<gh_stars>0
# Copyright (C) 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Maybe Monad."""
import mir.monads.maybe as maybe
@maybe.monadic
def invert(a):
return 1 / a
def test_bind():
assert maybe.Just(2).bind(invert) == maybe.Just(0.5)
def test_bind_nothing():
assert maybe.Just(0).bind(invert).bind(invert) == maybe.Nothing()
def test_fmap_just():
assert maybe.Just(1).fmap(lambda x: x + 1) == maybe.Just(2)
def test_fmap_just_none():
assert maybe.Just(1).fmap(lambda x: None) == maybe.Nothing()
def test_fmap_just_exception():
assert maybe.Just(1).fmap(lambda x: 1 + '') == maybe.Nothing()
def test_fmap_nothing():
assert maybe.Nothing().fmap(lambda x: x + 1) == maybe.Nothing() # pragma: no branch
def test_apply_just():
assert maybe.Just(lambda x: x + 1).apply(maybe.Just(1)) == maybe.Just(2)
def test_apply_nothing():
assert maybe.Nothing().apply(maybe.Just(1)) == maybe.Nothing()
|
# Copyright (C) 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Maybe Monad."""
import mir.monads.maybe as maybe
@maybe.monadic
def invert(a):
return 1 / a
def test_bind():
assert maybe.Just(2).bind(invert) == maybe.Just(0.5)
def test_bind_nothing():
assert maybe.Just(0).bind(invert).bind(invert) == maybe.Nothing()
def test_fmap_just():
assert maybe.Just(1).fmap(lambda x: x + 1) == maybe.Just(2)
def test_fmap_just_none():
assert maybe.Just(1).fmap(lambda x: None) == maybe.Nothing()
def test_fmap_just_exception():
assert maybe.Just(1).fmap(lambda x: 1 + '') == maybe.Nothing()
def test_fmap_nothing():
assert maybe.Nothing().fmap(lambda x: x + 1) == maybe.Nothing() # pragma: no branch
def test_apply_just():
assert maybe.Just(lambda x: x + 1).apply(maybe.Just(1)) == maybe.Just(2)
def test_apply_nothing():
assert maybe.Nothing().apply(maybe.Just(1)) == maybe.Nothing()
|
en
| 0.844457
|
# Copyright (C) 2016 <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Tests for Maybe Monad. # pragma: no branch
| 2.316909
| 2
|
users-profils-Q/Tests/test_profiles_service_request.py
|
Enaifuos/Recommandation-System-Food-Eatrack-Application
| 0
|
6627101
|
import sys
import unittest
from collections import OrderedDict
#sys.path.append("..")
from src.client_side import profiles_service_requester as rq
import pandas as pd
import os
package_dir = os.path.dirname(os.path.abspath(__file__))
class profiles_service_requester_test(unittest.TestCase):
"""Test case utilisé pour tester les fonctionnalités du module profile_service_requester"""
def setUp(self):
self.frigo_content_depart = ['fraise', 'lait', 'banane']
self.existant_ids_group = [675719, 539102, 2810501]
self.existant_id = 675719
self.existant_id_empty_fridge = 1896099 # pas utilisé pour l'instant !
self.incorrect_ids_group = [33, 25495, 675719]
self.inexistant_id = 000
self.resource_profiles_file = os.path.join(package_dir, '../Res/generated_profiles.csv')
self.resource_fridge_file = os.path.join(package_dir, '../Res/fridge_content.csv')
self.resource_history_file = os.path.join(package_dir, '../Res/clean_reviews.csv')
profiles_file = pd.read_csv(self.resource_profiles_file, sep=";")
fridge_file = pd.read_csv(self.resource_fridge_file, sep=";")
history_file = pd.read_csv(self.resource_history_file, sep=",")
#print("\n")
#print(profiles_file[profiles_file['user_id'] == int(self.existant_id)])
df_profiles = profiles_file[profiles_file['user_id'] == int(self.existant_id)]
df_fridge = fridge_file[(fridge_file['user_id'] == int(self.existant_id))]
df_history = history_file[(history_file['profileID'] == int(self.existant_id))]
self.actual_preferences_existant_user = df_profiles['preferences'].values.tolist()[0].split(',')
self.actual_allergies_existant_user = df_profiles['undesirable'].values.tolist()[0].split(',')
self.actual_fridge_existant_user = df_fridge['content'].values.tolist()[0].split(',')
self.actual_history_existant_user = df_history['RecipeID'].values.tolist()
self.actual_preferences_existant_group_user = []
self.actual_allergies_existant_group_user = []
self.actual_fridge_existant_group_user = []
self.actual_history_existant_group_user = []
for id in self.existant_ids_group:
df_profiles = profiles_file[(profiles_file['user_id'] == int(id))]
df_fridge = fridge_file[(fridge_file['user_id'] == int(id))]
df_history = history_file[(history_file['profileID'] == int(id))]
self.actual_preferences_existant_group_user += df_profiles['preferences'].values.tolist()[0].split(',')
self.actual_allergies_existant_group_user += df_profiles['undesirable'].values.tolist()[0].split(',')
self.actual_history_existant_group_user += df_history['RecipeID'].values.tolist()
self.actual_history_existant_group_user = list(OrderedDict.fromkeys(self.actual_history_existant_group_user))
if str(id) == str(self.existant_ids_group[0]) :
self.actual_fridge_existant_group_user += df_fridge['content'].values.tolist()[0].split(',')
def test_get_user_profil_existant(self):
"""Test case utilisé pour tester récupération du profil d'un utilisateur existant"""
profil = rq.get_user_profil(self.existant_id)
self.assertEqual(profil[0], self.actual_preferences_existant_user)
self.assertEqual(profil[1], self.actual_allergies_existant_user)
self.assertEqual(profil[2], self.actual_fridge_existant_user)
self.assertEqual(profil[3], self.actual_history_existant_user)
self.assertEqual(profil[4], 0)
def test_get_user_profil_inexistant(self):
"""Test case utilisé pour tester récupération du profil d'un utilisateur inexistant"""
profil = rq.get_user_profil(self.inexistant_id)
self.assertEqual(profil[4], -1)
def test_get_group_users_profil_existant(self):
"""Test case utilisé pour tester récupération du profil d'un utilisateur inexistant"""
profils = rq.get_group_profils(self.existant_ids_group)
self.assertEqual(profils[0], self.actual_preferences_existant_group_user)
self.assertEqual(profils[1], self.actual_allergies_existant_group_user)
self.assertEqual(profils[2], self.actual_fridge_existant_group_user)
self.assertEqual(profils[3], self.actual_history_existant_group_user)
def test_get_group_users_profil_not_existant(self):
"""Test case utilisé pour tester récupération du profil d'un groupe d'utilisateurs inexistants"""
profils = rq.get_group_profils(self.incorrect_ids_group)
self.assertEqual(profils[4], -1)
if __name__ == '__main__':
unittest.main()
|
import sys
import unittest
from collections import OrderedDict
#sys.path.append("..")
from src.client_side import profiles_service_requester as rq
import pandas as pd
import os
package_dir = os.path.dirname(os.path.abspath(__file__))
class profiles_service_requester_test(unittest.TestCase):
"""Test case utilisé pour tester les fonctionnalités du module profile_service_requester"""
def setUp(self):
self.frigo_content_depart = ['fraise', 'lait', 'banane']
self.existant_ids_group = [675719, 539102, 2810501]
self.existant_id = 675719
self.existant_id_empty_fridge = 1896099 # pas utilisé pour l'instant !
self.incorrect_ids_group = [33, 25495, 675719]
self.inexistant_id = 000
self.resource_profiles_file = os.path.join(package_dir, '../Res/generated_profiles.csv')
self.resource_fridge_file = os.path.join(package_dir, '../Res/fridge_content.csv')
self.resource_history_file = os.path.join(package_dir, '../Res/clean_reviews.csv')
profiles_file = pd.read_csv(self.resource_profiles_file, sep=";")
fridge_file = pd.read_csv(self.resource_fridge_file, sep=";")
history_file = pd.read_csv(self.resource_history_file, sep=",")
#print("\n")
#print(profiles_file[profiles_file['user_id'] == int(self.existant_id)])
df_profiles = profiles_file[profiles_file['user_id'] == int(self.existant_id)]
df_fridge = fridge_file[(fridge_file['user_id'] == int(self.existant_id))]
df_history = history_file[(history_file['profileID'] == int(self.existant_id))]
self.actual_preferences_existant_user = df_profiles['preferences'].values.tolist()[0].split(',')
self.actual_allergies_existant_user = df_profiles['undesirable'].values.tolist()[0].split(',')
self.actual_fridge_existant_user = df_fridge['content'].values.tolist()[0].split(',')
self.actual_history_existant_user = df_history['RecipeID'].values.tolist()
self.actual_preferences_existant_group_user = []
self.actual_allergies_existant_group_user = []
self.actual_fridge_existant_group_user = []
self.actual_history_existant_group_user = []
for id in self.existant_ids_group:
df_profiles = profiles_file[(profiles_file['user_id'] == int(id))]
df_fridge = fridge_file[(fridge_file['user_id'] == int(id))]
df_history = history_file[(history_file['profileID'] == int(id))]
self.actual_preferences_existant_group_user += df_profiles['preferences'].values.tolist()[0].split(',')
self.actual_allergies_existant_group_user += df_profiles['undesirable'].values.tolist()[0].split(',')
self.actual_history_existant_group_user += df_history['RecipeID'].values.tolist()
self.actual_history_existant_group_user = list(OrderedDict.fromkeys(self.actual_history_existant_group_user))
if str(id) == str(self.existant_ids_group[0]) :
self.actual_fridge_existant_group_user += df_fridge['content'].values.tolist()[0].split(',')
def test_get_user_profil_existant(self):
"""Test case utilisé pour tester récupération du profil d'un utilisateur existant"""
profil = rq.get_user_profil(self.existant_id)
self.assertEqual(profil[0], self.actual_preferences_existant_user)
self.assertEqual(profil[1], self.actual_allergies_existant_user)
self.assertEqual(profil[2], self.actual_fridge_existant_user)
self.assertEqual(profil[3], self.actual_history_existant_user)
self.assertEqual(profil[4], 0)
def test_get_user_profil_inexistant(self):
"""Test case utilisé pour tester récupération du profil d'un utilisateur inexistant"""
profil = rq.get_user_profil(self.inexistant_id)
self.assertEqual(profil[4], -1)
def test_get_group_users_profil_existant(self):
"""Test case utilisé pour tester récupération du profil d'un utilisateur inexistant"""
profils = rq.get_group_profils(self.existant_ids_group)
self.assertEqual(profils[0], self.actual_preferences_existant_group_user)
self.assertEqual(profils[1], self.actual_allergies_existant_group_user)
self.assertEqual(profils[2], self.actual_fridge_existant_group_user)
self.assertEqual(profils[3], self.actual_history_existant_group_user)
def test_get_group_users_profil_not_existant(self):
"""Test case utilisé pour tester récupération du profil d'un groupe d'utilisateurs inexistants"""
profils = rq.get_group_profils(self.incorrect_ids_group)
self.assertEqual(profils[4], -1)
if __name__ == '__main__':
unittest.main()
|
fr
| 0.984008
|
#sys.path.append("..") Test case utilisé pour tester les fonctionnalités du module profile_service_requester # pas utilisé pour l'instant ! #print("\n") #print(profiles_file[profiles_file['user_id'] == int(self.existant_id)]) Test case utilisé pour tester récupération du profil d'un utilisateur existant Test case utilisé pour tester récupération du profil d'un utilisateur inexistant Test case utilisé pour tester récupération du profil d'un utilisateur inexistant Test case utilisé pour tester récupération du profil d'un groupe d'utilisateurs inexistants
| 2.700447
| 3
|
src/models/debug_fetch_data.py
|
voreille/plc_seg
| 0
|
6627102
|
<reponame>voreille/plc_seg
import os
from pathlib import Path
from random import shuffle
import datetime
import dotenv
import h5py
import pandas as pd
from src.models.fetch_data_from_hdf5 import get_tf_data
project_dir = Path(__file__).resolve().parents[2]
dotenv_path = project_dir / ".env"
dotenv.load_dotenv(str(dotenv_path))
log_dir = project_dir / ("logs/fit/" +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
path_data_nii = Path(os.environ["NII_PATH"])
path_mask_lung_nii = Path(os.environ["NII_LUNG_PATH"])
path_clinical_info = Path(os.environ["CLINIC_INFO_PATH"])
image_size = (256, 256)
bs = 2
def get_trainval_patient_list(df, patient_list):
id_list = [int(p.split('_')[1]) for p in patient_list]
df = df.loc[id_list, :]
id_patient_plc_neg_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 0)].index)
id_patient_plc_pos_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 1)].index)
shuffle(id_patient_plc_neg_training)
shuffle(id_patient_plc_pos_training)
id_patient_plc_neg_val = id_patient_plc_neg_training[:2]
id_patient_plc_pos_val = id_patient_plc_pos_training[:4]
id_val = id_patient_plc_neg_val + id_patient_plc_pos_val
id_patient_plc_neg_train = id_patient_plc_neg_training[2:]
id_patient_plc_pos_train = id_patient_plc_pos_training[4:]
id_train = id_patient_plc_neg_train + id_patient_plc_pos_train
patient_list_val = [f"PatientLC_{i}" for i in id_val]
patient_list_train = [f"PatientLC_{i}" for i in id_train]
return patient_list_train, patient_list_val
def main():
file_train = h5py.File(
"/home/val/python_wkspce/plc_seg/data/processed/2d_pet_normalized/train.hdf5",
"r")
clinical_df = pd.read_csv(path_clinical_info).set_index("patient_id")
patient_list = list(file_train.keys())
patient_list = [p for p in patient_list if p not in ["PatientLC_63"]]
patient_list_train, patient_list_val = get_trainval_patient_list(
clinical_df, patient_list)
data_val = get_tf_data(
file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=False,
centered_on_gtvt=True,
patient_list_copy=patient_list_val,
).cache().batch(2)
data_train = get_tf_data(file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=True,
random_shift=20,
n_repeat=10,
num_parallel_calls='auto',
oversample_plc_neg=True,
patient_list_copy=patient_list_train).batch(bs)
for x, y, plc_status in data_val.as_numpy_iterator():
print(
f"voici, voilé le x {x.shape}, le y {y.shape} et le plc_status {plc_status}"
)
file_train.close()
if __name__ == '__main__':
main()
|
import os
from pathlib import Path
from random import shuffle
import datetime
import dotenv
import h5py
import pandas as pd
from src.models.fetch_data_from_hdf5 import get_tf_data
project_dir = Path(__file__).resolve().parents[2]
dotenv_path = project_dir / ".env"
dotenv.load_dotenv(str(dotenv_path))
log_dir = project_dir / ("logs/fit/" +
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
path_data_nii = Path(os.environ["NII_PATH"])
path_mask_lung_nii = Path(os.environ["NII_LUNG_PATH"])
path_clinical_info = Path(os.environ["CLINIC_INFO_PATH"])
image_size = (256, 256)
bs = 2
def get_trainval_patient_list(df, patient_list):
id_list = [int(p.split('_')[1]) for p in patient_list]
df = df.loc[id_list, :]
id_patient_plc_neg_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 0)].index)
id_patient_plc_pos_training = list(df[(df["is_chuv"] == 1)
& (df["plc_status"] == 1)].index)
shuffle(id_patient_plc_neg_training)
shuffle(id_patient_plc_pos_training)
id_patient_plc_neg_val = id_patient_plc_neg_training[:2]
id_patient_plc_pos_val = id_patient_plc_pos_training[:4]
id_val = id_patient_plc_neg_val + id_patient_plc_pos_val
id_patient_plc_neg_train = id_patient_plc_neg_training[2:]
id_patient_plc_pos_train = id_patient_plc_pos_training[4:]
id_train = id_patient_plc_neg_train + id_patient_plc_pos_train
patient_list_val = [f"PatientLC_{i}" for i in id_val]
patient_list_train = [f"PatientLC_{i}" for i in id_train]
return patient_list_train, patient_list_val
def main():
file_train = h5py.File(
"/home/val/python_wkspce/plc_seg/data/processed/2d_pet_normalized/train.hdf5",
"r")
clinical_df = pd.read_csv(path_clinical_info).set_index("patient_id")
patient_list = list(file_train.keys())
patient_list = [p for p in patient_list if p not in ["PatientLC_63"]]
patient_list_train, patient_list_val = get_trainval_patient_list(
clinical_df, patient_list)
data_val = get_tf_data(
file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=False,
centered_on_gtvt=True,
patient_list_copy=patient_list_val,
).cache().batch(2)
data_train = get_tf_data(file_train,
clinical_df,
output_shape_image=(256, 256),
random_slice=True,
random_shift=20,
n_repeat=10,
num_parallel_calls='auto',
oversample_plc_neg=True,
patient_list_copy=patient_list_train).batch(bs)
for x, y, plc_status in data_val.as_numpy_iterator():
print(
f"voici, voilé le x {x.shape}, le y {y.shape} et le plc_status {plc_status}"
)
file_train.close()
if __name__ == '__main__':
main()
|
none
| 1
| 2.160693
| 2
|
|
recurring_content_detector/keras_rmac/__init__.py
|
busterbeam/recurring-content-detector
| 1
|
6627103
|
from . import rmac
|
from . import rmac
|
none
| 1
| 1.096897
| 1
|
|
entropylab/graph_experiment.py
|
IgorQM/entropy
| 0
|
6627104
|
<reponame>IgorQM/entropy<filename>entropylab/graph_experiment.py
import asyncio
import enum
import sys
import traceback
from copy import deepcopy
from datetime import datetime
from inspect import signature, iscoroutinefunction, getfullargspec
from itertools import count
from typing import Optional, Dict, Any, Set, Union, Callable, Coroutine, Iterable
from graphviz import Digraph
from entropylab.api.data_reader import (
DataReader,
NodeResults,
ExperimentReader,
)
from entropylab.api.data_writer import DataWriter, NodeData
from entropylab.api.errors import EntropyError
from entropylab.api.execution import (
ExperimentExecutor,
EntropyContext,
_EntropyContextFactory,
)
from entropylab.api.experiment import (
ExperimentDefinition,
ExperimentHandle,
_Experiment,
)
from entropylab.api.graph import GraphHelper, Node, Output, _NodeExecutionInfo
from entropylab.instruments.lab_topology import ExperimentResources
from entropylab.logger import logger
def pynode(
label: str,
input_vars: Dict[str, Output] = None,
output_vars: Set[str] = None,
must_run_after: Set[Node] = None,
):
"""
decorator for running using the given python function as a PyNode
:param label: node label
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution.
:return: node instance
"""
def decorate(fn):
return PyNode(label, fn, input_vars, output_vars, must_run_after)
return decorate
class PyNode(Node):
"""
Node that gets a python function or coroutine and wraps it with an entropy graph node.
Node is defined by a python function, input and outputs.
Entropy will call the function, filling the function parameters:
1. If function parameter has the same name as node input,
Entropy will pass the input value
2. If function parameter is of type EntropyContext, Entropy will pass current context
3. If function parameter has the same name as given experiment run kwargs,
Entropy will pass the kwarg value
4. if function parameter is of type *args,
Entropy will pass all inputs that were not specified by name
5. If function parameter name is "is_last",
Entropy will pass True if this is the last node in the graph
"""
def __init__(
self,
label: str = None,
program: Union[Callable, Coroutine] = None,
input_vars: Dict[str, Output] = None,
output_vars: Set[str] = None,
must_run_after: Set[Node] = None,
save_results: bool = True,
):
"""
Node that gets a python function or coroutine and wraps
it with an entropy graph node.
Node is defined by a python function, input and outputs.
Entropy will call the function, filling the function parameters:
1. If function parameter has the same name as node input,
Entropy will pass the input value
2. If function parameter is of type EntropyContext,
Entropy will pass current context
3. If function parameter has the same name as given
experiment run kwargs, Entropy will pass the kwarg value
4. if function parameter is of type *args,
Entropy will pass all inputs that were not specified by name
5. If function parameter name is "is_last",
Entropy will pass True if this is the last node in the graph
:param label: node label
:param program: the node program in a python function or coroutine
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution.
"""
super().__init__(label, input_vars, output_vars, must_run_after, save_results)
self._program = program
async def _execute_async(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
args_parameters, keyword_function_parameters = self._prepare_for_execution(
context, is_last, kwargs, input_values
)
try:
if iscoroutinefunction(self._program):
results = await self._program(
*args_parameters, **keyword_function_parameters
)
else:
results = self._program(*args_parameters, **keyword_function_parameters)
return self._handle_results(results)
except BaseException as e:
raise e
def _execute(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
args_parameters, keyword_function_parameters = self._prepare_for_execution(
context, is_last, kwargs, input_values
)
try:
if iscoroutinefunction(self._program):
results = asyncio.run(
self._program(*args_parameters, **keyword_function_parameters)
)
else:
results = self._program(*args_parameters, **keyword_function_parameters)
return self._handle_results(results)
except BaseException as e:
raise e
def _handle_results(self, results):
outputs = {}
if isinstance(results, Dict):
for var in self._output_vars:
try:
outputs[var] = results[var]
except KeyError:
logger.error(
f"WARNING Could not fetch variable '{var}' "
f"from the results of node <{self.label}>"
)
pass
else:
if results:
raise EntropyError(
f"node {self.label} result should be a "
f"dictionary but is {type(results)}"
)
return outputs
def _prepare_for_execution(
self, context, is_last, kwargs, input_values: Dict[str, Any]
):
sig = signature(self._program)
keyword_function_parameters = {}
for param in sig.parameters:
if sig.parameters[param].annotation is EntropyContext:
keyword_function_parameters[param] = context
if "is_last" in sig.parameters:
keyword_function_parameters["is_last"] = is_last
(
args,
varargs,
varkw,
defaults,
kwonlyargs,
kwonlydefaults,
annotations,
) = getfullargspec(self._program)
for arg in args + kwonlyargs:
if (
arg not in input_values
and arg not in keyword_function_parameters
and arg not in kwargs
and (
(defaults and arg not in defaults)
or (kwonlydefaults and arg not in kwonlydefaults)
)
):
logger.error(f"Error in node {self.label} - {arg} is not in parameters")
raise KeyError(arg)
if arg in input_values:
keyword_function_parameters[arg] = input_values[arg]
elif arg in kwargs:
keyword_function_parameters[arg] = kwargs[arg]
args_parameters = []
if varargs is not None:
for item in input_values:
if item not in keyword_function_parameters:
args_parameters.append(input_values[item])
return args_parameters, keyword_function_parameters
def _create_actual_graph(nodes: Set[Node], key_nodes: Set[Node]):
nodes_copy: Dict[Node, Node] = {node: deepcopy(node) for node in nodes}
for node in nodes_copy:
for input_var in nodes_copy[node]._input_vars:
output = node._input_vars[input_var]
nodes_copy[node]._input_vars[input_var] = nodes_copy[output.node].outputs[
output.name
]
nodes_copy[node]._must_run_after = {nodes_copy[m] for m in node._must_run_after}
return {_NodeExecutionInfo(nodes_copy[node], node in key_nodes) for node in nodes}
class SubGraphNode(Node):
"""
Node that holds a complete graph and runs as a single node within
another graph.
"""
def __init__(
self,
graph: Union[GraphHelper, Node, Set[Node]],
label: str = None,
input_vars: Dict[str, Output] = None,
output_vars: Set[str] = None,
must_run_after: Set[Node] = None,
key_nodes: Optional[Set[Node]] = None,
save_results: bool = True,
):
"""
:param graph: the graph model
:param label: node label
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution.
"""
super().__init__(label, input_vars, output_vars, must_run_after, save_results)
self._key_nodes = key_nodes
if self._key_nodes is None:
self._key_nodes = set()
if isinstance(graph, GraphHelper):
self._graph: Set[_NodeExecutionInfo] = _create_actual_graph(
graph.nodes, self._key_nodes
)
elif isinstance(graph, Node):
self._graph: Set[_NodeExecutionInfo] = _create_actual_graph(
{graph}, self._key_nodes
)
elif isinstance(graph, Set):
self._graph: Set[_NodeExecutionInfo] = _create_actual_graph(
graph, self._key_nodes
)
else:
raise Exception(
"graph parameter type is not supported, please pass a Node or set of nodes"
)
async def _execute_async(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
executors = {node.node: _NodeExecutor(node) for node in self._graph}
return await _AsyncGraphExecutor(
self._graph, executors, **kwargs
).execute_async(context._context_factory)
def _execute(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
executors = {node.node: _NodeExecutor(node) for node in self._graph}
return _GraphExecutor(self._graph, executors, **kwargs).execute(
context._context_factory
)
class _NodeExecutor:
def __init__(self, node_execution_info: _NodeExecutionInfo) -> None:
super().__init__()
self._node: Node = node_execution_info.node
self._start_time: Optional[datetime] = None
self._end_time: Optional[datetime] = None
self.result: Dict[str, Any] = {}
self.to_run = True
self._is_key_node = node_execution_info.is_key_node
def run(
self,
input_values: Dict[str, Any],
context_factory: _EntropyContextFactory,
is_last: int,
**kwargs,
) -> Dict[str, Any]:
if self.to_run:
context = context_factory.create()
self._prepare_for_run(context)
self.result = self._node._execute(
input_values,
context,
is_last,
**kwargs,
)
return self._handle_result(context)
async def run_async(
self,
input_values: Dict[str, Any],
context_factory: _EntropyContextFactory,
is_last: int,
**kwargs,
) -> Dict[str, Any]:
if self.to_run:
context = context_factory.create()
self._prepare_for_run(context)
self.result = await self._node._execute_async(
input_values,
context,
is_last,
**kwargs,
)
return self._handle_result(context)
def _handle_result(self, context):
if self._node._should_save_results():
# logger fetching results
for output_id in self.result:
output = self.result[output_id]
context.add_result(label=f"{output_id}", data=output)
self._end_time = datetime.now()
logger.debug(
f"Done running node <{self._node.__class__.__name__}> {self._node.label}"
)
return self.result
def _prepare_for_run(self, context: EntropyContext):
logger.info(
f"Running node <{self._node.__class__.__name__}> {self._node.label}"
)
self._start_time = datetime.now()
logger.debug(
f"Saving metadata before running node "
f"<{self._node.__class__.__name__}> {self._node.label} id={context._get_stage_id()}"
)
context._data_writer.save_node(
context._exp_id,
NodeData(
context._get_stage_id(),
self._start_time,
self._node.label,
self._is_key_node,
),
)
class _AsyncGraphExecutor(ExperimentExecutor):
def __init__(
self,
nodes_execution_info: Set[_NodeExecutionInfo],
nodes: Dict[Node, _NodeExecutor],
**kwargs,
) -> None:
super().__init__()
self._graph: GraphHelper = GraphHelper(nodes_execution_info)
self._node_kwargs = kwargs
self._tasks: Dict[Node, Any] = dict()
self._results: Dict = dict()
self._stopped = False
self._node_id_iter = count(start=0, step=1)
self._executors: Dict[Node, _NodeExecutor] = nodes
def execute(self, context: _EntropyContextFactory) -> Any:
async_result = asyncio.run(self.execute_async(context))
return async_result
@property
def failed(self) -> bool:
return self._stopped
async def execute_async(self, context_factory: _EntropyContextFactory):
# traverse the graph and run the nodes
end_nodes = self._graph.leaves
chains = []
for node in end_nodes:
chains.append(self._run_node_and_ancestors(node, context_factory, 0))
result = await asyncio.gather(*chains)
result = [x for x in result if x is not None]
combined_result = {}
if result:
combined_result = {k: v for d in result for k, v in d.items()}
return combined_result
async def _run_node_and_ancestors(
self, node: Node, context_factory: _EntropyContextFactory, is_last: int
):
tasks = []
for input_name in node.get_parents():
if input_name not in self._tasks:
task = self._run_node_and_ancestors(
input_name, context_factory, is_last + 1
)
tasks.append(task)
self._tasks[input_name] = task
else:
tasks.append(self._tasks[input_name])
results = []
if len(tasks) > 0:
await asyncio.wait(tasks)
if self._stopped:
return None
results = {}
inputs_by_name = node.get_inputs_by_name()
for input_name in inputs_by_name:
parent_node = inputs_by_name[input_name].node
parent_output_name = inputs_by_name[input_name].name
if (
parent_node not in self._executors
or parent_output_name not in self._executors[parent_node].result
):
raise EntropyError(
f"node {node.label} input is missing: {parent_output_name}"
)
results[input_name] = self._executors[parent_node].result[
parent_output_name
]
node_executor = self._executors[node]
try:
return await node_executor.run_async(
results,
context_factory,
node in self._graph.leaves,
**self._node_kwargs,
)
except BaseException as e:
self._stopped = True
trace = "\n".join(traceback.format_exception(*sys.exc_info()))
logger.error(
f"Stopping GraphHelper, Error in node {node.label} "
f"of type {e.__class__.__qualname__}. message: {e}\ntrace:\n{trace}"
)
return
class _GraphExecutor(ExperimentExecutor):
def __init__(
self,
nodes_execution_info: Set[_NodeExecutionInfo],
nodes: Dict[Node, _NodeExecutor],
**kwargs,
) -> None:
super().__init__()
self._graph: GraphHelper = GraphHelper(nodes_execution_info)
self._node_kwargs = kwargs
self._tasks: Dict[Node, Any] = dict()
self._results: Dict = dict()
self._stopped = False
self._executors: Dict[Node, _NodeExecutor] = nodes
def execute(self, context_factory: _EntropyContextFactory) -> Any:
sorted_nodes = self._graph.nodes_in_topological_order()
for node in sorted_nodes:
results = {}
inputs_by_name = node.get_inputs_by_name()
for input_name in inputs_by_name:
parent_node = inputs_by_name[input_name].node
parent_output_name = inputs_by_name[input_name].name
if (
parent_node not in self._executors
or parent_output_name not in self._executors[parent_node].result
):
raise EntropyError(
f"node {node.label} input is missing: {parent_output_name}"
)
results[input_name] = self._executors[parent_node].result[
parent_output_name
]
node_executor = self._executors[node]
try:
node_executor.run(
results,
context_factory,
node in self._graph.leaves,
**self._node_kwargs,
)
except BaseException as e:
self._stopped = True
trace = "\n".join(traceback.format_exception(*sys.exc_info()))
logger.error(
f"Stopping GraphHelper, Error in node {node.label} of type "
f"{e.__class__.__qualname__}. message: {e}\ntrace:\n{trace}"
)
return
combined_result = {}
for node in self._graph.leaves:
result = self._executors[node].result
if result:
for key in result:
combined_result[key] = result[key]
return combined_result
@property
def failed(self) -> bool:
return self._stopped
class GraphReader(ExperimentReader):
"""
Reads results and data from a single graph experiment
"""
def __init__(self, experiment_id: int, db: DataReader) -> None:
"""
Reads results and data from a single graph experiment
:param experiment_id: the id of experiment
:param db: results database that implemented the DataReader abstract class
"""
super().__init__(experiment_id, db)
def get_results_from_node(
self, node_label: str, result_label: Optional[str] = None
) -> Iterable[NodeResults]:
"""
returns an iterable of all results from a node with the given label
and result with the given result_label
:param node_label: label of node to get results from
:param result_label: label of result records
"""
return self._data_reader.get_results_from_node(
node_label, self._experiment_id, result_label
)
class GraphExperimentHandle(ExperimentHandle):
"""
An handle of the graph experiment execution
can be used to get information and read results
"""
def __init__(self, experiment: _Experiment, graph: GraphHelper) -> None:
super().__init__()
self._experiment = experiment
self._graph = graph
@property
def id(self):
return self._experiment.exp_id
@property
def results(self) -> GraphReader:
"""
returns a reader for reading results from a graph experiments
"""
return GraphReader(self.id, self._experiment.data_reader())
def dot_graph(self):
return self._graph.export_dot_graph()
class GraphExecutionType(enum.Enum):
Sync = 1
Async = 2
class Graph(ExperimentDefinition):
"""
Experiment defined by a graph model and runs within entropy.
Information, results and metadata will be saved during every execution.
Nodes within the graph will be executed in a topological order.
Every node will be declared with a set of inputs and outputs, that
will transfer for a node to it's children.
"""
def __init__(
self,
resources: Optional[ExperimentResources],
graph: Union[Node, Set[Node], GraphHelper],
label: Optional[str] = None,
story: str = None,
key_nodes: Optional[Set[Node]] = None,
execution_type: GraphExecutionType = GraphExecutionType.Sync,
) -> None:
"""
Experiment defined by a graph model and runs within entropy.
:param resources: shared lab resources or temporary resources that
are used in the experiment.
:param graph: the experiment model, can be a graph or a single node.
:param label: experiment label
:param story: a description of the experiment, which will create an experiment story
with all other parts of the experiment
:param key_nodes: a set of graph key nodes. those nodes will be marked as graph result.
:param execution_type: specifty whether to run the graph in a sync mode, single node
on a given time, or asynchronously - which will run node in parallel
according to their dependency and implementation (using async.io)
"""
super().__init__(resources, label, story)
self._key_nodes = key_nodes
if self._key_nodes is None:
self._key_nodes = set()
if isinstance(graph, GraphHelper):
self._original_nodes: Set[Node] = graph.nodes
elif isinstance(graph, Node):
self._original_nodes: Set[Node] = {graph}
elif isinstance(graph, Set):
self._original_nodes: Set[Node] = graph
else:
raise Exception(
"graph parameter type is not supported, please pass a Node or set of nodes"
)
all_ancestors = set(
[anc for node in self._original_nodes for anc in node.ancestors()]
)
if all_ancestors != self._original_nodes:
raise Exception(
f"nodes has inputs that are not part of this graph: "
f"{[node.label for node in all_ancestors.difference(self._original_nodes)]}"
)
self._actual_graph: Set[_NodeExecutionInfo] = _create_actual_graph(
self._original_nodes, self._key_nodes
)
self._to_node: Optional[Node] = None
self._execution_type: GraphExecutionType = execution_type
def _get_execution_instructions(self) -> ExperimentExecutor:
executors = {node.node: _NodeExecutor(node) for node in self._actual_graph}
if self._execution_type == GraphExecutionType.Sync:
return _GraphExecutor(self._actual_graph, executors, **self._kwargs)
elif self._execution_type == GraphExecutionType.Async:
return _AsyncGraphExecutor(self._actual_graph, executors, **self._kwargs)
else:
raise Exception(f"Execution type {self._execution_type} is not supported")
def serialize(self) -> str:
"""
dot graph representing the experiment
"""
return str(GraphHelper(self._actual_graph).export_dot_graph())
def run(self, db: Optional[DataWriter] = None, **kwargs) -> GraphExperimentHandle:
experiment = self._run(db, **kwargs)
return GraphExperimentHandle(experiment, GraphHelper(self._actual_graph))
def run_to_node(
self,
node: Node,
db: Optional[DataWriter] = None,
label: Optional[str] = None,
**kwargs,
) -> GraphExperimentHandle:
"""
Run the experiment in Entropy environment, only with the given node and
its ancestors.
Every call to this function creates a new run and returns a different handle.
:param node: the node object you want to run to.
:param db: results db. if given, results will be saved in this DB. otherwise
results will only be saved during this python session
:param label: label for the current execution
:param kwargs: key word arguments that will be passed to the experiment code as well.
user can specify here extra arguments, and request them in the
functions declarations.
:return:a handle of the new graph experiment run
"""
if node not in self._original_nodes:
raise KeyError("Node is not in graph")
logger.info(f"Running node {node.label} and dependencies")
nodes = self._calculate_ancestors(node)
full_graph = self._actual_graph
self._actual_graph = _create_actual_graph(nodes, self._key_nodes)
old_label = self.label
if label:
self.label = label
try:
return self.run(db, **kwargs)
finally:
self.label = old_label
self._actual_graph = full_graph
def _calculate_ancestors(self, node):
ancestors: Set = set()
for parent in node.ancestors():
if parent in self._original_nodes:
ancestors.add(parent)
return ancestors
def dot_graph(self) -> Digraph:
return GraphHelper(self._actual_graph).export_dot_graph()
|
import asyncio
import enum
import sys
import traceback
from copy import deepcopy
from datetime import datetime
from inspect import signature, iscoroutinefunction, getfullargspec
from itertools import count
from typing import Optional, Dict, Any, Set, Union, Callable, Coroutine, Iterable
from graphviz import Digraph
from entropylab.api.data_reader import (
DataReader,
NodeResults,
ExperimentReader,
)
from entropylab.api.data_writer import DataWriter, NodeData
from entropylab.api.errors import EntropyError
from entropylab.api.execution import (
ExperimentExecutor,
EntropyContext,
_EntropyContextFactory,
)
from entropylab.api.experiment import (
ExperimentDefinition,
ExperimentHandle,
_Experiment,
)
from entropylab.api.graph import GraphHelper, Node, Output, _NodeExecutionInfo
from entropylab.instruments.lab_topology import ExperimentResources
from entropylab.logger import logger
def pynode(
label: str,
input_vars: Dict[str, Output] = None,
output_vars: Set[str] = None,
must_run_after: Set[Node] = None,
):
"""
decorator for running using the given python function as a PyNode
:param label: node label
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution.
:return: node instance
"""
def decorate(fn):
return PyNode(label, fn, input_vars, output_vars, must_run_after)
return decorate
class PyNode(Node):
"""
Node that gets a python function or coroutine and wraps it with an entropy graph node.
Node is defined by a python function, input and outputs.
Entropy will call the function, filling the function parameters:
1. If function parameter has the same name as node input,
Entropy will pass the input value
2. If function parameter is of type EntropyContext, Entropy will pass current context
3. If function parameter has the same name as given experiment run kwargs,
Entropy will pass the kwarg value
4. if function parameter is of type *args,
Entropy will pass all inputs that were not specified by name
5. If function parameter name is "is_last",
Entropy will pass True if this is the last node in the graph
"""
def __init__(
self,
label: str = None,
program: Union[Callable, Coroutine] = None,
input_vars: Dict[str, Output] = None,
output_vars: Set[str] = None,
must_run_after: Set[Node] = None,
save_results: bool = True,
):
"""
Node that gets a python function or coroutine and wraps
it with an entropy graph node.
Node is defined by a python function, input and outputs.
Entropy will call the function, filling the function parameters:
1. If function parameter has the same name as node input,
Entropy will pass the input value
2. If function parameter is of type EntropyContext,
Entropy will pass current context
3. If function parameter has the same name as given
experiment run kwargs, Entropy will pass the kwarg value
4. if function parameter is of type *args,
Entropy will pass all inputs that were not specified by name
5. If function parameter name is "is_last",
Entropy will pass True if this is the last node in the graph
:param label: node label
:param program: the node program in a python function or coroutine
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution.
"""
super().__init__(label, input_vars, output_vars, must_run_after, save_results)
self._program = program
async def _execute_async(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
args_parameters, keyword_function_parameters = self._prepare_for_execution(
context, is_last, kwargs, input_values
)
try:
if iscoroutinefunction(self._program):
results = await self._program(
*args_parameters, **keyword_function_parameters
)
else:
results = self._program(*args_parameters, **keyword_function_parameters)
return self._handle_results(results)
except BaseException as e:
raise e
def _execute(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
args_parameters, keyword_function_parameters = self._prepare_for_execution(
context, is_last, kwargs, input_values
)
try:
if iscoroutinefunction(self._program):
results = asyncio.run(
self._program(*args_parameters, **keyword_function_parameters)
)
else:
results = self._program(*args_parameters, **keyword_function_parameters)
return self._handle_results(results)
except BaseException as e:
raise e
def _handle_results(self, results):
outputs = {}
if isinstance(results, Dict):
for var in self._output_vars:
try:
outputs[var] = results[var]
except KeyError:
logger.error(
f"WARNING Could not fetch variable '{var}' "
f"from the results of node <{self.label}>"
)
pass
else:
if results:
raise EntropyError(
f"node {self.label} result should be a "
f"dictionary but is {type(results)}"
)
return outputs
def _prepare_for_execution(
self, context, is_last, kwargs, input_values: Dict[str, Any]
):
sig = signature(self._program)
keyword_function_parameters = {}
for param in sig.parameters:
if sig.parameters[param].annotation is EntropyContext:
keyword_function_parameters[param] = context
if "is_last" in sig.parameters:
keyword_function_parameters["is_last"] = is_last
(
args,
varargs,
varkw,
defaults,
kwonlyargs,
kwonlydefaults,
annotations,
) = getfullargspec(self._program)
for arg in args + kwonlyargs:
if (
arg not in input_values
and arg not in keyword_function_parameters
and arg not in kwargs
and (
(defaults and arg not in defaults)
or (kwonlydefaults and arg not in kwonlydefaults)
)
):
logger.error(f"Error in node {self.label} - {arg} is not in parameters")
raise KeyError(arg)
if arg in input_values:
keyword_function_parameters[arg] = input_values[arg]
elif arg in kwargs:
keyword_function_parameters[arg] = kwargs[arg]
args_parameters = []
if varargs is not None:
for item in input_values:
if item not in keyword_function_parameters:
args_parameters.append(input_values[item])
return args_parameters, keyword_function_parameters
def _create_actual_graph(nodes: Set[Node], key_nodes: Set[Node]):
nodes_copy: Dict[Node, Node] = {node: deepcopy(node) for node in nodes}
for node in nodes_copy:
for input_var in nodes_copy[node]._input_vars:
output = node._input_vars[input_var]
nodes_copy[node]._input_vars[input_var] = nodes_copy[output.node].outputs[
output.name
]
nodes_copy[node]._must_run_after = {nodes_copy[m] for m in node._must_run_after}
return {_NodeExecutionInfo(nodes_copy[node], node in key_nodes) for node in nodes}
class SubGraphNode(Node):
"""
Node that holds a complete graph and runs as a single node within
another graph.
"""
def __init__(
self,
graph: Union[GraphHelper, Node, Set[Node]],
label: str = None,
input_vars: Dict[str, Output] = None,
output_vars: Set[str] = None,
must_run_after: Set[Node] = None,
key_nodes: Optional[Set[Node]] = None,
save_results: bool = True,
):
"""
:param graph: the graph model
:param label: node label
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution.
"""
super().__init__(label, input_vars, output_vars, must_run_after, save_results)
self._key_nodes = key_nodes
if self._key_nodes is None:
self._key_nodes = set()
if isinstance(graph, GraphHelper):
self._graph: Set[_NodeExecutionInfo] = _create_actual_graph(
graph.nodes, self._key_nodes
)
elif isinstance(graph, Node):
self._graph: Set[_NodeExecutionInfo] = _create_actual_graph(
{graph}, self._key_nodes
)
elif isinstance(graph, Set):
self._graph: Set[_NodeExecutionInfo] = _create_actual_graph(
graph, self._key_nodes
)
else:
raise Exception(
"graph parameter type is not supported, please pass a Node or set of nodes"
)
async def _execute_async(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
executors = {node.node: _NodeExecutor(node) for node in self._graph}
return await _AsyncGraphExecutor(
self._graph, executors, **kwargs
).execute_async(context._context_factory)
def _execute(
self,
input_values: Dict[str, Any],
context: EntropyContext,
is_last,
**kwargs,
) -> Dict[str, Any]:
executors = {node.node: _NodeExecutor(node) for node in self._graph}
return _GraphExecutor(self._graph, executors, **kwargs).execute(
context._context_factory
)
class _NodeExecutor:
def __init__(self, node_execution_info: _NodeExecutionInfo) -> None:
super().__init__()
self._node: Node = node_execution_info.node
self._start_time: Optional[datetime] = None
self._end_time: Optional[datetime] = None
self.result: Dict[str, Any] = {}
self.to_run = True
self._is_key_node = node_execution_info.is_key_node
def run(
self,
input_values: Dict[str, Any],
context_factory: _EntropyContextFactory,
is_last: int,
**kwargs,
) -> Dict[str, Any]:
if self.to_run:
context = context_factory.create()
self._prepare_for_run(context)
self.result = self._node._execute(
input_values,
context,
is_last,
**kwargs,
)
return self._handle_result(context)
async def run_async(
self,
input_values: Dict[str, Any],
context_factory: _EntropyContextFactory,
is_last: int,
**kwargs,
) -> Dict[str, Any]:
if self.to_run:
context = context_factory.create()
self._prepare_for_run(context)
self.result = await self._node._execute_async(
input_values,
context,
is_last,
**kwargs,
)
return self._handle_result(context)
def _handle_result(self, context):
if self._node._should_save_results():
# logger fetching results
for output_id in self.result:
output = self.result[output_id]
context.add_result(label=f"{output_id}", data=output)
self._end_time = datetime.now()
logger.debug(
f"Done running node <{self._node.__class__.__name__}> {self._node.label}"
)
return self.result
def _prepare_for_run(self, context: EntropyContext):
logger.info(
f"Running node <{self._node.__class__.__name__}> {self._node.label}"
)
self._start_time = datetime.now()
logger.debug(
f"Saving metadata before running node "
f"<{self._node.__class__.__name__}> {self._node.label} id={context._get_stage_id()}"
)
context._data_writer.save_node(
context._exp_id,
NodeData(
context._get_stage_id(),
self._start_time,
self._node.label,
self._is_key_node,
),
)
class _AsyncGraphExecutor(ExperimentExecutor):
def __init__(
self,
nodes_execution_info: Set[_NodeExecutionInfo],
nodes: Dict[Node, _NodeExecutor],
**kwargs,
) -> None:
super().__init__()
self._graph: GraphHelper = GraphHelper(nodes_execution_info)
self._node_kwargs = kwargs
self._tasks: Dict[Node, Any] = dict()
self._results: Dict = dict()
self._stopped = False
self._node_id_iter = count(start=0, step=1)
self._executors: Dict[Node, _NodeExecutor] = nodes
def execute(self, context: _EntropyContextFactory) -> Any:
async_result = asyncio.run(self.execute_async(context))
return async_result
@property
def failed(self) -> bool:
return self._stopped
async def execute_async(self, context_factory: _EntropyContextFactory):
# traverse the graph and run the nodes
end_nodes = self._graph.leaves
chains = []
for node in end_nodes:
chains.append(self._run_node_and_ancestors(node, context_factory, 0))
result = await asyncio.gather(*chains)
result = [x for x in result if x is not None]
combined_result = {}
if result:
combined_result = {k: v for d in result for k, v in d.items()}
return combined_result
async def _run_node_and_ancestors(
self, node: Node, context_factory: _EntropyContextFactory, is_last: int
):
tasks = []
for input_name in node.get_parents():
if input_name not in self._tasks:
task = self._run_node_and_ancestors(
input_name, context_factory, is_last + 1
)
tasks.append(task)
self._tasks[input_name] = task
else:
tasks.append(self._tasks[input_name])
results = []
if len(tasks) > 0:
await asyncio.wait(tasks)
if self._stopped:
return None
results = {}
inputs_by_name = node.get_inputs_by_name()
for input_name in inputs_by_name:
parent_node = inputs_by_name[input_name].node
parent_output_name = inputs_by_name[input_name].name
if (
parent_node not in self._executors
or parent_output_name not in self._executors[parent_node].result
):
raise EntropyError(
f"node {node.label} input is missing: {parent_output_name}"
)
results[input_name] = self._executors[parent_node].result[
parent_output_name
]
node_executor = self._executors[node]
try:
return await node_executor.run_async(
results,
context_factory,
node in self._graph.leaves,
**self._node_kwargs,
)
except BaseException as e:
self._stopped = True
trace = "\n".join(traceback.format_exception(*sys.exc_info()))
logger.error(
f"Stopping GraphHelper, Error in node {node.label} "
f"of type {e.__class__.__qualname__}. message: {e}\ntrace:\n{trace}"
)
return
class _GraphExecutor(ExperimentExecutor):
def __init__(
self,
nodes_execution_info: Set[_NodeExecutionInfo],
nodes: Dict[Node, _NodeExecutor],
**kwargs,
) -> None:
super().__init__()
self._graph: GraphHelper = GraphHelper(nodes_execution_info)
self._node_kwargs = kwargs
self._tasks: Dict[Node, Any] = dict()
self._results: Dict = dict()
self._stopped = False
self._executors: Dict[Node, _NodeExecutor] = nodes
def execute(self, context_factory: _EntropyContextFactory) -> Any:
sorted_nodes = self._graph.nodes_in_topological_order()
for node in sorted_nodes:
results = {}
inputs_by_name = node.get_inputs_by_name()
for input_name in inputs_by_name:
parent_node = inputs_by_name[input_name].node
parent_output_name = inputs_by_name[input_name].name
if (
parent_node not in self._executors
or parent_output_name not in self._executors[parent_node].result
):
raise EntropyError(
f"node {node.label} input is missing: {parent_output_name}"
)
results[input_name] = self._executors[parent_node].result[
parent_output_name
]
node_executor = self._executors[node]
try:
node_executor.run(
results,
context_factory,
node in self._graph.leaves,
**self._node_kwargs,
)
except BaseException as e:
self._stopped = True
trace = "\n".join(traceback.format_exception(*sys.exc_info()))
logger.error(
f"Stopping GraphHelper, Error in node {node.label} of type "
f"{e.__class__.__qualname__}. message: {e}\ntrace:\n{trace}"
)
return
combined_result = {}
for node in self._graph.leaves:
result = self._executors[node].result
if result:
for key in result:
combined_result[key] = result[key]
return combined_result
@property
def failed(self) -> bool:
return self._stopped
class GraphReader(ExperimentReader):
"""
Reads results and data from a single graph experiment
"""
def __init__(self, experiment_id: int, db: DataReader) -> None:
"""
Reads results and data from a single graph experiment
:param experiment_id: the id of experiment
:param db: results database that implemented the DataReader abstract class
"""
super().__init__(experiment_id, db)
def get_results_from_node(
self, node_label: str, result_label: Optional[str] = None
) -> Iterable[NodeResults]:
"""
returns an iterable of all results from a node with the given label
and result with the given result_label
:param node_label: label of node to get results from
:param result_label: label of result records
"""
return self._data_reader.get_results_from_node(
node_label, self._experiment_id, result_label
)
class GraphExperimentHandle(ExperimentHandle):
"""
An handle of the graph experiment execution
can be used to get information and read results
"""
def __init__(self, experiment: _Experiment, graph: GraphHelper) -> None:
super().__init__()
self._experiment = experiment
self._graph = graph
@property
def id(self):
return self._experiment.exp_id
@property
def results(self) -> GraphReader:
"""
returns a reader for reading results from a graph experiments
"""
return GraphReader(self.id, self._experiment.data_reader())
def dot_graph(self):
return self._graph.export_dot_graph()
class GraphExecutionType(enum.Enum):
Sync = 1
Async = 2
class Graph(ExperimentDefinition):
"""
Experiment defined by a graph model and runs within entropy.
Information, results and metadata will be saved during every execution.
Nodes within the graph will be executed in a topological order.
Every node will be declared with a set of inputs and outputs, that
will transfer for a node to it's children.
"""
def __init__(
self,
resources: Optional[ExperimentResources],
graph: Union[Node, Set[Node], GraphHelper],
label: Optional[str] = None,
story: str = None,
key_nodes: Optional[Set[Node]] = None,
execution_type: GraphExecutionType = GraphExecutionType.Sync,
) -> None:
"""
Experiment defined by a graph model and runs within entropy.
:param resources: shared lab resources or temporary resources that
are used in the experiment.
:param graph: the experiment model, can be a graph or a single node.
:param label: experiment label
:param story: a description of the experiment, which will create an experiment story
with all other parts of the experiment
:param key_nodes: a set of graph key nodes. those nodes will be marked as graph result.
:param execution_type: specifty whether to run the graph in a sync mode, single node
on a given time, or asynchronously - which will run node in parallel
according to their dependency and implementation (using async.io)
"""
super().__init__(resources, label, story)
self._key_nodes = key_nodes
if self._key_nodes is None:
self._key_nodes = set()
if isinstance(graph, GraphHelper):
self._original_nodes: Set[Node] = graph.nodes
elif isinstance(graph, Node):
self._original_nodes: Set[Node] = {graph}
elif isinstance(graph, Set):
self._original_nodes: Set[Node] = graph
else:
raise Exception(
"graph parameter type is not supported, please pass a Node or set of nodes"
)
all_ancestors = set(
[anc for node in self._original_nodes for anc in node.ancestors()]
)
if all_ancestors != self._original_nodes:
raise Exception(
f"nodes has inputs that are not part of this graph: "
f"{[node.label for node in all_ancestors.difference(self._original_nodes)]}"
)
self._actual_graph: Set[_NodeExecutionInfo] = _create_actual_graph(
self._original_nodes, self._key_nodes
)
self._to_node: Optional[Node] = None
self._execution_type: GraphExecutionType = execution_type
def _get_execution_instructions(self) -> ExperimentExecutor:
executors = {node.node: _NodeExecutor(node) for node in self._actual_graph}
if self._execution_type == GraphExecutionType.Sync:
return _GraphExecutor(self._actual_graph, executors, **self._kwargs)
elif self._execution_type == GraphExecutionType.Async:
return _AsyncGraphExecutor(self._actual_graph, executors, **self._kwargs)
else:
raise Exception(f"Execution type {self._execution_type} is not supported")
def serialize(self) -> str:
"""
dot graph representing the experiment
"""
return str(GraphHelper(self._actual_graph).export_dot_graph())
def run(self, db: Optional[DataWriter] = None, **kwargs) -> GraphExperimentHandle:
experiment = self._run(db, **kwargs)
return GraphExperimentHandle(experiment, GraphHelper(self._actual_graph))
def run_to_node(
self,
node: Node,
db: Optional[DataWriter] = None,
label: Optional[str] = None,
**kwargs,
) -> GraphExperimentHandle:
"""
Run the experiment in Entropy environment, only with the given node and
its ancestors.
Every call to this function creates a new run and returns a different handle.
:param node: the node object you want to run to.
:param db: results db. if given, results will be saved in this DB. otherwise
results will only be saved during this python session
:param label: label for the current execution
:param kwargs: key word arguments that will be passed to the experiment code as well.
user can specify here extra arguments, and request them in the
functions declarations.
:return:a handle of the new graph experiment run
"""
if node not in self._original_nodes:
raise KeyError("Node is not in graph")
logger.info(f"Running node {node.label} and dependencies")
nodes = self._calculate_ancestors(node)
full_graph = self._actual_graph
self._actual_graph = _create_actual_graph(nodes, self._key_nodes)
old_label = self.label
if label:
self.label = label
try:
return self.run(db, **kwargs)
finally:
self.label = old_label
self._actual_graph = full_graph
def _calculate_ancestors(self, node):
ancestors: Set = set()
for parent in node.ancestors():
if parent in self._original_nodes:
ancestors.add(parent)
return ancestors
def dot_graph(self) -> Digraph:
return GraphHelper(self._actual_graph).export_dot_graph()
|
en
| 0.782901
|
decorator for running using the given python function as a PyNode
:param label: node label
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution.
:return: node instance Node that gets a python function or coroutine and wraps it with an entropy graph node.
Node is defined by a python function, input and outputs.
Entropy will call the function, filling the function parameters:
1. If function parameter has the same name as node input,
Entropy will pass the input value
2. If function parameter is of type EntropyContext, Entropy will pass current context
3. If function parameter has the same name as given experiment run kwargs,
Entropy will pass the kwarg value
4. if function parameter is of type *args,
Entropy will pass all inputs that were not specified by name
5. If function parameter name is "is_last",
Entropy will pass True if this is the last node in the graph Node that gets a python function or coroutine and wraps
it with an entropy graph node.
Node is defined by a python function, input and outputs.
Entropy will call the function, filling the function parameters:
1. If function parameter has the same name as node input,
Entropy will pass the input value
2. If function parameter is of type EntropyContext,
Entropy will pass current context
3. If function parameter has the same name as given
experiment run kwargs, Entropy will pass the kwarg value
4. if function parameter is of type *args,
Entropy will pass all inputs that were not specified by name
5. If function parameter name is "is_last",
Entropy will pass True if this is the last node in the graph
:param label: node label
:param program: the node program in a python function or coroutine
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution. Node that holds a complete graph and runs as a single node within
another graph. :param graph: the graph model
:param label: node label
:param input_vars: dictionary of node inputs, keys are input names.
the input values should be defined as the following:
>>>input_vars={"a": node.outputs["x"]}
:param output_vars: a set of outputs name. The given python function should
return a dictionary, which it's keys are the same as output vars
:param must_run_after: A set of nodes. If those nodes are in the same graph,
current node will run after they finish execution. # logger fetching results # traverse the graph and run the nodes Reads results and data from a single graph experiment Reads results and data from a single graph experiment
:param experiment_id: the id of experiment
:param db: results database that implemented the DataReader abstract class returns an iterable of all results from a node with the given label
and result with the given result_label
:param node_label: label of node to get results from
:param result_label: label of result records An handle of the graph experiment execution
can be used to get information and read results returns a reader for reading results from a graph experiments Experiment defined by a graph model and runs within entropy.
Information, results and metadata will be saved during every execution.
Nodes within the graph will be executed in a topological order.
Every node will be declared with a set of inputs and outputs, that
will transfer for a node to it's children. Experiment defined by a graph model and runs within entropy.
:param resources: shared lab resources or temporary resources that
are used in the experiment.
:param graph: the experiment model, can be a graph or a single node.
:param label: experiment label
:param story: a description of the experiment, which will create an experiment story
with all other parts of the experiment
:param key_nodes: a set of graph key nodes. those nodes will be marked as graph result.
:param execution_type: specifty whether to run the graph in a sync mode, single node
on a given time, or asynchronously - which will run node in parallel
according to their dependency and implementation (using async.io) dot graph representing the experiment Run the experiment in Entropy environment, only with the given node and
its ancestors.
Every call to this function creates a new run and returns a different handle.
:param node: the node object you want to run to.
:param db: results db. if given, results will be saved in this DB. otherwise
results will only be saved during this python session
:param label: label for the current execution
:param kwargs: key word arguments that will be passed to the experiment code as well.
user can specify here extra arguments, and request them in the
functions declarations.
:return:a handle of the new graph experiment run
| 2.635437
| 3
|
livecoin/spiders/crypto.py
|
Mario-D93/livecoin
| 0
|
6627105
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
from datetime import date
class CryptoSpider(scrapy.Spider):
name = 'crypto'
allowed_domains = ['www.livecoin.net/en']
script = '''
function main(splash, args)
splash.private_mode_enabled = False
assert(splash:go(args.url))
splash:wait(1)
btn = splash:select_all(".filterPanelItem___2z5Gb")
btn[3]:mouse_click()
splash:wait(1)
btn_more = splash:select("div.showMoreContainer___2HlS0 button")
btn_more:mouse_click()
assert(splash:wait(2))
splash:set_viewport_full()
splash:wait(1)
return splash:html()
end
'''
def start_requests(self):
yield SplashRequest(url="https://www.livecoin.net/en", callback=self.parse, endpoint="execute",args={
'lua_source':self.script
})
def parse(self, response):
dt = date.today()
yield{
'Date: ':dt
}
for currency in response.xpath("//div[contains(@class, 'ReactVirtualized__Table__row tableRow___3EtiS ')]"):
yield{
'coin_pair':currency.xpath(".//div[1]/div/text()").get(),
'volume_24h':currency.xpath(".//div[2]/span/text()").get(),
'last_price':currency.xpath(".//div[3]/span/text()").get(),
'change_24h':currency.xpath(".//div[4]/span/span/text()").get(),
}
|
# -*- coding: utf-8 -*-
import scrapy
from scrapy_splash import SplashRequest
from datetime import date
class CryptoSpider(scrapy.Spider):
name = 'crypto'
allowed_domains = ['www.livecoin.net/en']
script = '''
function main(splash, args)
splash.private_mode_enabled = False
assert(splash:go(args.url))
splash:wait(1)
btn = splash:select_all(".filterPanelItem___2z5Gb")
btn[3]:mouse_click()
splash:wait(1)
btn_more = splash:select("div.showMoreContainer___2HlS0 button")
btn_more:mouse_click()
assert(splash:wait(2))
splash:set_viewport_full()
splash:wait(1)
return splash:html()
end
'''
def start_requests(self):
yield SplashRequest(url="https://www.livecoin.net/en", callback=self.parse, endpoint="execute",args={
'lua_source':self.script
})
def parse(self, response):
dt = date.today()
yield{
'Date: ':dt
}
for currency in response.xpath("//div[contains(@class, 'ReactVirtualized__Table__row tableRow___3EtiS ')]"):
yield{
'coin_pair':currency.xpath(".//div[1]/div/text()").get(),
'volume_24h':currency.xpath(".//div[2]/span/text()").get(),
'last_price':currency.xpath(".//div[3]/span/text()").get(),
'change_24h':currency.xpath(".//div[4]/span/span/text()").get(),
}
|
en
| 0.333476
|
# -*- coding: utf-8 -*- function main(splash, args) splash.private_mode_enabled = False assert(splash:go(args.url)) splash:wait(1) btn = splash:select_all(".filterPanelItem___2z5Gb") btn[3]:mouse_click() splash:wait(1) btn_more = splash:select("div.showMoreContainer___2HlS0 button") btn_more:mouse_click() assert(splash:wait(2)) splash:set_viewport_full() splash:wait(1) return splash:html() end
| 2.706578
| 3
|
src/Random.py
|
eatPorkAndSeePigRun/ReliableTransport
| 0
|
6627106
|
# coding: utf-8
import random
class Random(object):
""" 这个类的代码不能修改 """
""" 一个均匀的随机数生成器 """
def __init__(self, size=100):
self.size = 0
self.seeds = []
for i in range(size):
self.seeds.append(0)
def random(self):
if len(self.seeds) == 0:
return 0
if self.size == 0:
for i in range(len(self.seeds)):
self.seeds[i] = i
self.size = len(self.seeds)
i = random.randint(0, 0xffffffff) % self.size
x = self.seeds[i]
self.size -= 1
self.seeds[i] = self.seeds[self.size]
return x
|
# coding: utf-8
import random
class Random(object):
""" 这个类的代码不能修改 """
""" 一个均匀的随机数生成器 """
def __init__(self, size=100):
self.size = 0
self.seeds = []
for i in range(size):
self.seeds.append(0)
def random(self):
if len(self.seeds) == 0:
return 0
if self.size == 0:
for i in range(len(self.seeds)):
self.seeds[i] = i
self.size = len(self.seeds)
i = random.randint(0, 0xffffffff) % self.size
x = self.seeds[i]
self.size -= 1
self.seeds[i] = self.seeds[self.size]
return x
|
zh
| 0.992744
|
# coding: utf-8 这个类的代码不能修改 一个均匀的随机数生成器
| 3.408604
| 3
|
app/ingest/domain/services/process_service.py
|
harvard-lts/import-management-service
| 0
|
6627107
|
<filename>app/ingest/domain/services/process_service.py
"""
This module defines a ProcessService, which is a domain service that defines Process operations.
"""
from logging import Logger
from app.ingest.domain.services.exceptions.ingest_service_exception import IngestServiceException
from app.ingest.domain.services.exceptions.message_body_field_exception import MessageBodyFieldException
from app.ingest.domain.services.exceptions.process_status_message_handling_exception import \
ProcessStatusMessageHandlingException
from app.ingest.domain.services.ingest_service import IngestService
from app.ingest.domain.services.message_body_transformer import MessageBodyTransformer
class ProcessService:
def __init__(
self,
ingest_service: IngestService,
logger: Logger
) -> None:
self.__ingest_service = ingest_service
self.__logger = logger
def handle_process_status_message(self, message_body: dict, message_id: str) -> None:
"""
Handles a Process Status message.
:param message_body: message body
:type message_body: dict
:param message_id: message id
:type message_id: str
:raises ProcessServiceException
"""
message_body_transformer = MessageBodyTransformer()
try:
package_id = message_body_transformer.get_message_body_field_value(
'package_id',
message_body,
message_id
)
process_status = message_body_transformer.get_message_body_field_value(
'batch_ingest_status',
message_body,
message_id
)
self.__logger.info("Obtaining ingest by the package id of the received message " + package_id + "...")
ingest = self.__ingest_service.get_ingest_by_package_id(package_id)
if process_status == "failure":
self.__logger.info("Setting ingest as processed failed...")
self.__ingest_service.set_ingest_as_processed_failed(ingest)
return
self.__logger.info("Setting ingest as processed...")
drs_url = message_body_transformer.get_message_body_field_value('drs_url', message_body, message_id)
self.__ingest_service.set_ingest_as_processed(ingest, drs_url)
except (MessageBodyFieldException, IngestServiceException) as e:
raise ProcessStatusMessageHandlingException(message_id, str(e))
|
<filename>app/ingest/domain/services/process_service.py
"""
This module defines a ProcessService, which is a domain service that defines Process operations.
"""
from logging import Logger
from app.ingest.domain.services.exceptions.ingest_service_exception import IngestServiceException
from app.ingest.domain.services.exceptions.message_body_field_exception import MessageBodyFieldException
from app.ingest.domain.services.exceptions.process_status_message_handling_exception import \
ProcessStatusMessageHandlingException
from app.ingest.domain.services.ingest_service import IngestService
from app.ingest.domain.services.message_body_transformer import MessageBodyTransformer
class ProcessService:
def __init__(
self,
ingest_service: IngestService,
logger: Logger
) -> None:
self.__ingest_service = ingest_service
self.__logger = logger
def handle_process_status_message(self, message_body: dict, message_id: str) -> None:
"""
Handles a Process Status message.
:param message_body: message body
:type message_body: dict
:param message_id: message id
:type message_id: str
:raises ProcessServiceException
"""
message_body_transformer = MessageBodyTransformer()
try:
package_id = message_body_transformer.get_message_body_field_value(
'package_id',
message_body,
message_id
)
process_status = message_body_transformer.get_message_body_field_value(
'batch_ingest_status',
message_body,
message_id
)
self.__logger.info("Obtaining ingest by the package id of the received message " + package_id + "...")
ingest = self.__ingest_service.get_ingest_by_package_id(package_id)
if process_status == "failure":
self.__logger.info("Setting ingest as processed failed...")
self.__ingest_service.set_ingest_as_processed_failed(ingest)
return
self.__logger.info("Setting ingest as processed...")
drs_url = message_body_transformer.get_message_body_field_value('drs_url', message_body, message_id)
self.__ingest_service.set_ingest_as_processed(ingest, drs_url)
except (MessageBodyFieldException, IngestServiceException) as e:
raise ProcessStatusMessageHandlingException(message_id, str(e))
|
en
| 0.445842
|
This module defines a ProcessService, which is a domain service that defines Process operations. Handles a Process Status message. :param message_body: message body :type message_body: dict :param message_id: message id :type message_id: str :raises ProcessServiceException
| 2.385827
| 2
|
haystack/fields.py
|
pbs/django-haystack
| 2
|
6627108
|
<reponame>pbs/django-haystack<gh_stars>1-10
import re
from django.utils import datetime_safe
from django.template import loader, Context
from haystack.exceptions import SearchFieldError
class NOT_PROVIDED:
pass
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the SearchFields variants.
class SearchField(object):
"""The base implementation of a search field."""
field_type = None
def __init__(self, model_attr=None, use_template=False, template_name=None,
document=False, indexed=True, stored=True, faceted=False,
default=NOT_PROVIDED, null=False, index_fieldname=None,
facet_class=None, boost=1.0, weight=None):
# Track what the index thinks this field is called.
self.instance_name = None
self.model_attr = model_attr
self.use_template = use_template
self.template_name = template_name
self.document = document
self.indexed = indexed
self.stored = stored
self.faceted = faceted
self._default = default
self.null = null
self.index_fieldname = index_fieldname
self.boost = weight or boost
self.is_multivalued = False
# We supply the facet_class for making it easy to create a faceted
# field based off of this field.
self.facet_class = facet_class
if self.facet_class is None:
self.facet_class = FacetCharField
self.set_instance_name(None)
def set_instance_name(self, instance_name):
self.instance_name = instance_name
if self.index_fieldname is None:
self.index_fieldname = self.instance_name
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def prepare(self, obj):
"""
Takes data from the provided object and prepares it for storage in the
index.
"""
# Give priority to a template.
if self.use_template:
return self.prepare_template(obj)
elif self.model_attr is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.model_attr.split('__')
current_object = obj
for attr in attrs:
if not hasattr(current_object, attr):
raise SearchFieldError("The model '%s' does not have a model_attr '%s'." % (repr(obj), attr))
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
else:
raise SearchFieldError("The model '%s' has an empty model_attr '%s' and doesn't allow a default or null value." % (repr(obj), attr))
if callable(current_object):
return current_object()
return current_object
if self.has_default():
return self.default
else:
return None
def prepare_template(self, obj):
"""
Flattens an object for indexing.
This loads a template
(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and
returns the result of rendering that template. ``object`` will be in
its context.
"""
if self.instance_name is None and self.template_name is None:
raise SearchFieldError("This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.")
if self.template_name is not None:
template_names = self.template_name
if not isinstance(template_names, (list, tuple)):
template_names = [template_names]
else:
template_names = ['search/indexes/%s/%s_%s.txt' % (obj._meta.app_label, obj._meta.module_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render(Context({'object': obj}))
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
class CharField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetCharField
super(CharField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(CharField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class LocationField(SearchField):
field_type = 'location'
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super(LocationField, self).prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.get_coords()
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from haystack.utils.geo import ensure_point, Point
if value is None:
return None
if hasattr(value, 'geom_type'):
value = ensure_point(value)
return value
if isinstance(value, basestring):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get('lat', 0)
lng = value.get('lon', 0)
value = Point(float(lng), float(lat))
return value
class NgramField(CharField):
field_type = 'ngram'
def __init__(self, **kwargs):
if kwargs.get('faceted') is True:
raise SearchFieldError("%s can not be faceted." % self.__class__.__name__)
super(NgramField, self).__init__(**kwargs)
class EdgeNgramField(NgramField):
field_type = 'edge_ngram'
class IntegerField(SearchField):
field_type = 'integer'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetIntegerField
super(IntegerField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(IntegerField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(SearchField):
field_type = 'float'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetFloatField
super(FloatField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(FloatField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDecimalField
super(DecimalField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(DecimalField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class BooleanField(SearchField):
field_type = 'boolean'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetBooleanField
super(BooleanField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(BooleanField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
class DateField(SearchField):
field_type = 'date'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateField
super(DateField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise SearchFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
class DateTimeField(SearchField):
field_type = 'datetime'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateTimeField
super(DateTimeField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))
else:
raise SearchFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
class MultiValueField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetMultiValueField
if kwargs.get('use_template') is True:
raise SearchFieldError("'%s' fields can not use templates to prepare their data." % self.__class__.__name__)
super(MultiValueField, self).__init__(**kwargs)
self.is_multivalued = True
def prepare(self, obj):
return self.convert(super(MultiValueField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return list(value)
class FacetField(SearchField):
"""
``FacetField`` is slightly different than the other fields because it can
work in conjunction with other fields as its data source.
Accepts an optional ``facet_for`` kwarg, which should be the field name
(not ``index_fieldname``) of the field it should pull data from.
"""
instance_name = None
def __init__(self, **kwargs):
handled_kwargs = self.handle_facet_parameters(kwargs)
super(FacetField, self).__init__(**handled_kwargs)
def handle_facet_parameters(self, kwargs):
if kwargs.get('faceted', False):
raise SearchFieldError("FacetField (%s) does not accept the 'faceted' argument." % self.instance_name)
if not kwargs.get('null', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'null' argument." % self.instance_name)
if not kwargs.get('indexed', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'indexed' argument." % self.instance_name)
if kwargs.get('facet_class'):
raise SearchFieldError("FacetField (%s) does not accept the 'facet_class' argument." % self.instance_name)
self.facet_for = None
self.facet_class = None
# Make sure the field is nullable.
kwargs['null'] = True
if 'facet_for' in kwargs:
self.facet_for = kwargs['facet_for']
del(kwargs['facet_for'])
return kwargs
def get_facet_for_name(self):
return self.facet_for or self.instance_name
class FacetCharField(FacetField, CharField):
pass
class FacetIntegerField(FacetField, IntegerField):
pass
class FacetFloatField(FacetField, FloatField):
pass
class FacetDecimalField(FacetField, DecimalField):
pass
class FacetBooleanField(FacetField, BooleanField):
pass
class FacetDateField(FacetField, DateField):
pass
class FacetDateTimeField(FacetField, DateTimeField):
pass
class FacetMultiValueField(FacetField, MultiValueField):
pass
|
import re
from django.utils import datetime_safe
from django.template import loader, Context
from haystack.exceptions import SearchFieldError
class NOT_PROVIDED:
pass
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the SearchFields variants.
class SearchField(object):
"""The base implementation of a search field."""
field_type = None
def __init__(self, model_attr=None, use_template=False, template_name=None,
document=False, indexed=True, stored=True, faceted=False,
default=NOT_PROVIDED, null=False, index_fieldname=None,
facet_class=None, boost=1.0, weight=None):
# Track what the index thinks this field is called.
self.instance_name = None
self.model_attr = model_attr
self.use_template = use_template
self.template_name = template_name
self.document = document
self.indexed = indexed
self.stored = stored
self.faceted = faceted
self._default = default
self.null = null
self.index_fieldname = index_fieldname
self.boost = weight or boost
self.is_multivalued = False
# We supply the facet_class for making it easy to create a faceted
# field based off of this field.
self.facet_class = facet_class
if self.facet_class is None:
self.facet_class = FacetCharField
self.set_instance_name(None)
def set_instance_name(self, instance_name):
self.instance_name = instance_name
if self.index_fieldname is None:
self.index_fieldname = self.instance_name
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def prepare(self, obj):
"""
Takes data from the provided object and prepares it for storage in the
index.
"""
# Give priority to a template.
if self.use_template:
return self.prepare_template(obj)
elif self.model_attr is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.model_attr.split('__')
current_object = obj
for attr in attrs:
if not hasattr(current_object, attr):
raise SearchFieldError("The model '%s' does not have a model_attr '%s'." % (repr(obj), attr))
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
else:
raise SearchFieldError("The model '%s' has an empty model_attr '%s' and doesn't allow a default or null value." % (repr(obj), attr))
if callable(current_object):
return current_object()
return current_object
if self.has_default():
return self.default
else:
return None
def prepare_template(self, obj):
"""
Flattens an object for indexing.
This loads a template
(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and
returns the result of rendering that template. ``object`` will be in
its context.
"""
if self.instance_name is None and self.template_name is None:
raise SearchFieldError("This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.")
if self.template_name is not None:
template_names = self.template_name
if not isinstance(template_names, (list, tuple)):
template_names = [template_names]
else:
template_names = ['search/indexes/%s/%s_%s.txt' % (obj._meta.app_label, obj._meta.module_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render(Context({'object': obj}))
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
class CharField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetCharField
super(CharField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(CharField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class LocationField(SearchField):
field_type = 'location'
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super(LocationField, self).prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.get_coords()
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from haystack.utils.geo import ensure_point, Point
if value is None:
return None
if hasattr(value, 'geom_type'):
value = ensure_point(value)
return value
if isinstance(value, basestring):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get('lat', 0)
lng = value.get('lon', 0)
value = Point(float(lng), float(lat))
return value
class NgramField(CharField):
field_type = 'ngram'
def __init__(self, **kwargs):
if kwargs.get('faceted') is True:
raise SearchFieldError("%s can not be faceted." % self.__class__.__name__)
super(NgramField, self).__init__(**kwargs)
class EdgeNgramField(NgramField):
field_type = 'edge_ngram'
class IntegerField(SearchField):
field_type = 'integer'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetIntegerField
super(IntegerField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(IntegerField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(SearchField):
field_type = 'float'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetFloatField
super(FloatField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(FloatField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDecimalField
super(DecimalField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(DecimalField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class BooleanField(SearchField):
field_type = 'boolean'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetBooleanField
super(BooleanField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(BooleanField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
class DateField(SearchField):
field_type = 'date'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateField
super(DateField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise SearchFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
class DateTimeField(SearchField):
field_type = 'datetime'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateTimeField
super(DateTimeField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))
else:
raise SearchFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
class MultiValueField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetMultiValueField
if kwargs.get('use_template') is True:
raise SearchFieldError("'%s' fields can not use templates to prepare their data." % self.__class__.__name__)
super(MultiValueField, self).__init__(**kwargs)
self.is_multivalued = True
def prepare(self, obj):
return self.convert(super(MultiValueField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return list(value)
class FacetField(SearchField):
"""
``FacetField`` is slightly different than the other fields because it can
work in conjunction with other fields as its data source.
Accepts an optional ``facet_for`` kwarg, which should be the field name
(not ``index_fieldname``) of the field it should pull data from.
"""
instance_name = None
def __init__(self, **kwargs):
handled_kwargs = self.handle_facet_parameters(kwargs)
super(FacetField, self).__init__(**handled_kwargs)
def handle_facet_parameters(self, kwargs):
if kwargs.get('faceted', False):
raise SearchFieldError("FacetField (%s) does not accept the 'faceted' argument." % self.instance_name)
if not kwargs.get('null', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'null' argument." % self.instance_name)
if not kwargs.get('indexed', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'indexed' argument." % self.instance_name)
if kwargs.get('facet_class'):
raise SearchFieldError("FacetField (%s) does not accept the 'facet_class' argument." % self.instance_name)
self.facet_for = None
self.facet_class = None
# Make sure the field is nullable.
kwargs['null'] = True
if 'facet_for' in kwargs:
self.facet_for = kwargs['facet_for']
del(kwargs['facet_for'])
return kwargs
def get_facet_for_name(self):
return self.facet_for or self.instance_name
class FacetCharField(FacetField, CharField):
pass
class FacetIntegerField(FacetField, IntegerField):
pass
class FacetFloatField(FacetField, FloatField):
pass
class FacetDecimalField(FacetField, DecimalField):
pass
class FacetBooleanField(FacetField, BooleanField):
pass
class FacetDateField(FacetField, DateField):
pass
class FacetDateTimeField(FacetField, DateTimeField):
pass
class FacetMultiValueField(FacetField, MultiValueField):
pass
|
en
| 0.840489
|
# All the SearchFields variants. The base implementation of a search field. # Track what the index thinks this field is called. # We supply the facet_class for making it easy to create a faceted # field based off of this field. Returns a boolean of whether this field has a default value. Returns the default value for the field. Takes data from the provided object and prepares it for storage in the index. # Give priority to a template. # Check for `__` in the field for looking through the relation. # Fall out of the loop, given any further attempts at # accesses will fail misreably. # Fall out of the loop, given any further attempts at # accesses will fail misreably. Flattens an object for indexing. This loads a template (``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and returns the result of rendering that template. ``object`` will be in its context. Handles conversion between the data found and the type of the field. Extending classes should override this method and provide correct data coercion. # GeoJSON-alike ``FacetField`` is slightly different than the other fields because it can work in conjunction with other fields as its data source. Accepts an optional ``facet_for`` kwarg, which should be the field name (not ``index_fieldname``) of the field it should pull data from. # Make sure the field is nullable.
| 2.466636
| 2
|
pyshley/sys/backup.py
|
IndiBowstring/pyshley
| 0
|
6627109
|
import os
from _datetime import datetime
# TODO: delete hint
# The current full container list is pyshley.lib.config.foundrySettings['dockerContainers'].keys()
def dockerStop(containers: list) -> None:
"""
Stops each listed container.
Parameters:
arg1 (list): List of container names.
"""
pass
def dockerStart(containers: list) -> None:
"""
Starts each listed container.
Parameters:
arg1 (list): List of container names.
"""
pass
def makeTarBall(filePaths: list, type: str) -> str:
""""
Creates a tarball including all the files listed in filePaths
Parameters:
arg1 (list): List of all the folders needing to be packed into a tarball
Returns:
str: Absolute path to the tarball
"""
year = datetime.now()
pass
def fullBackup() -> str:
"""
Performs a full backup of foundry data.
Deposits Player, World, Module, and Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-full.tar.gz
Returns:
str: Absolute path to the tarball.
"""
# Run the individual backups
playerBackupLocation = playerBackup()
assetBackupLocation = assetBackup()
worldBackupLocation = worldBackup()
moduleBackupLocation = moduleBackup()
backupList = [playerBackupLocation, assetBackupLocation, worldBackupLocation, moduleBackupLocation]
# Run makeTarBall() and return the absolute path of where the tarball is
return makeTarBall(backupList, 'full')
def playerBackup() -> str:
"""
Performs a partial backup of player data.
Deposits Player data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-player.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
def assetBackup() -> str:
"""
Performs a partial backup of asset data.
Deposits Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-asset.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
def worldBackup() -> str:
"""
Performs a partial backup of world data.
Deposits Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-world.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
def moduleBackup() -> str:
"""
Performs a partial backup of module data.
Deposits Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-module.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
|
import os
from _datetime import datetime
# TODO: delete hint
# The current full container list is pyshley.lib.config.foundrySettings['dockerContainers'].keys()
def dockerStop(containers: list) -> None:
"""
Stops each listed container.
Parameters:
arg1 (list): List of container names.
"""
pass
def dockerStart(containers: list) -> None:
"""
Starts each listed container.
Parameters:
arg1 (list): List of container names.
"""
pass
def makeTarBall(filePaths: list, type: str) -> str:
""""
Creates a tarball including all the files listed in filePaths
Parameters:
arg1 (list): List of all the folders needing to be packed into a tarball
Returns:
str: Absolute path to the tarball
"""
year = datetime.now()
pass
def fullBackup() -> str:
"""
Performs a full backup of foundry data.
Deposits Player, World, Module, and Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-full.tar.gz
Returns:
str: Absolute path to the tarball.
"""
# Run the individual backups
playerBackupLocation = playerBackup()
assetBackupLocation = assetBackup()
worldBackupLocation = worldBackup()
moduleBackupLocation = moduleBackup()
backupList = [playerBackupLocation, assetBackupLocation, worldBackupLocation, moduleBackupLocation]
# Run makeTarBall() and return the absolute path of where the tarball is
return makeTarBall(backupList, 'full')
def playerBackup() -> str:
"""
Performs a partial backup of player data.
Deposits Player data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-player.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
def assetBackup() -> str:
"""
Performs a partial backup of asset data.
Deposits Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-asset.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
def worldBackup() -> str:
"""
Performs a partial backup of world data.
Deposits Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-world.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
def moduleBackup() -> str:
"""
Performs a partial backup of module data.
Deposits Asset data into a .tar.gz within the default backup directory.
Naming scheme follows: YYMMDD-module.tar.gz
Returns:
str: Absolute path to the tarball.
"""
pass
|
en
| 0.65728
|
# TODO: delete hint # The current full container list is pyshley.lib.config.foundrySettings['dockerContainers'].keys() Stops each listed container. Parameters: arg1 (list): List of container names. Starts each listed container. Parameters: arg1 (list): List of container names. " Creates a tarball including all the files listed in filePaths Parameters: arg1 (list): List of all the folders needing to be packed into a tarball Returns: str: Absolute path to the tarball Performs a full backup of foundry data. Deposits Player, World, Module, and Asset data into a .tar.gz within the default backup directory. Naming scheme follows: YYMMDD-full.tar.gz Returns: str: Absolute path to the tarball. # Run the individual backups # Run makeTarBall() and return the absolute path of where the tarball is Performs a partial backup of player data. Deposits Player data into a .tar.gz within the default backup directory. Naming scheme follows: YYMMDD-player.tar.gz Returns: str: Absolute path to the tarball. Performs a partial backup of asset data. Deposits Asset data into a .tar.gz within the default backup directory. Naming scheme follows: YYMMDD-asset.tar.gz Returns: str: Absolute path to the tarball. Performs a partial backup of world data. Deposits Asset data into a .tar.gz within the default backup directory. Naming scheme follows: YYMMDD-world.tar.gz Returns: str: Absolute path to the tarball. Performs a partial backup of module data. Deposits Asset data into a .tar.gz within the default backup directory. Naming scheme follows: YYMMDD-module.tar.gz Returns: str: Absolute path to the tarball.
| 2.476569
| 2
|
src/oci/apm_config/models/create_metric_group_details.py
|
LaudateCorpus1/oci-python-sdk
| 0
|
6627110
|
<reponame>LaudateCorpus1/oci-python-sdk
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .create_config_details import CreateConfigDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateMetricGroupDetails(CreateConfigDetails):
"""
A metric group defines a set of metrics to collect from a span. It uses a span filter to specify which spans to
process. The set is then published to a namespace, which is a product level subdivision of metrics.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateMetricGroupDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.apm_config.models.CreateMetricGroupDetails.config_type` attribute
of this class is ``METRIC_GROUP`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param config_type:
The value to assign to the config_type property of this CreateMetricGroupDetails.
Allowed values for this property are: "SPAN_FILTER", "METRIC_GROUP", "APDEX"
:type config_type: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateMetricGroupDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateMetricGroupDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this CreateMetricGroupDetails.
:type display_name: str
:param filter_id:
The value to assign to the filter_id property of this CreateMetricGroupDetails.
:type filter_id: str
:param namespace:
The value to assign to the namespace property of this CreateMetricGroupDetails.
:type namespace: str
:param dimensions:
The value to assign to the dimensions property of this CreateMetricGroupDetails.
:type dimensions: list[oci.apm_config.models.Dimension]
:param metrics:
The value to assign to the metrics property of this CreateMetricGroupDetails.
:type metrics: list[oci.apm_config.models.Metric]
"""
self.swagger_types = {
'config_type': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'filter_id': 'str',
'namespace': 'str',
'dimensions': 'list[Dimension]',
'metrics': 'list[Metric]'
}
self.attribute_map = {
'config_type': 'configType',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'filter_id': 'filterId',
'namespace': 'namespace',
'dimensions': 'dimensions',
'metrics': 'metrics'
}
self._config_type = None
self._freeform_tags = None
self._defined_tags = None
self._display_name = None
self._filter_id = None
self._namespace = None
self._dimensions = None
self._metrics = None
self._config_type = 'METRIC_GROUP'
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this CreateMetricGroupDetails.
The name of the metric group.
:return: The display_name of this CreateMetricGroupDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateMetricGroupDetails.
The name of the metric group.
:param display_name: The display_name of this CreateMetricGroupDetails.
:type: str
"""
self._display_name = display_name
@property
def filter_id(self):
"""
**[Required]** Gets the filter_id of this CreateMetricGroupDetails.
The `OCID`__ of a Span Filter. The filterId is mandatory for the creation
of MetricGroups. A filterId is generated when a Span Filter is created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The filter_id of this CreateMetricGroupDetails.
:rtype: str
"""
return self._filter_id
@filter_id.setter
def filter_id(self, filter_id):
"""
Sets the filter_id of this CreateMetricGroupDetails.
The `OCID`__ of a Span Filter. The filterId is mandatory for the creation
of MetricGroups. A filterId is generated when a Span Filter is created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param filter_id: The filter_id of this CreateMetricGroupDetails.
:type: str
"""
self._filter_id = filter_id
@property
def namespace(self):
"""
Gets the namespace of this CreateMetricGroupDetails.
The namespace to which the metrics are published. It must be one of several predefined namespaces.
:return: The namespace of this CreateMetricGroupDetails.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this CreateMetricGroupDetails.
The namespace to which the metrics are published. It must be one of several predefined namespaces.
:param namespace: The namespace of this CreateMetricGroupDetails.
:type: str
"""
self._namespace = namespace
@property
def dimensions(self):
"""
Gets the dimensions of this CreateMetricGroupDetails.
A list of dimensions for the metric. This variable should not be used.
:return: The dimensions of this CreateMetricGroupDetails.
:rtype: list[oci.apm_config.models.Dimension]
"""
return self._dimensions
@dimensions.setter
def dimensions(self, dimensions):
"""
Sets the dimensions of this CreateMetricGroupDetails.
A list of dimensions for the metric. This variable should not be used.
:param dimensions: The dimensions of this CreateMetricGroupDetails.
:type: list[oci.apm_config.models.Dimension]
"""
self._dimensions = dimensions
@property
def metrics(self):
"""
**[Required]** Gets the metrics of this CreateMetricGroupDetails.
The list of metrics in this group.
:return: The metrics of this CreateMetricGroupDetails.
:rtype: list[oci.apm_config.models.Metric]
"""
return self._metrics
@metrics.setter
def metrics(self, metrics):
"""
Sets the metrics of this CreateMetricGroupDetails.
The list of metrics in this group.
:param metrics: The metrics of this CreateMetricGroupDetails.
:type: list[oci.apm_config.models.Metric]
"""
self._metrics = metrics
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .create_config_details import CreateConfigDetails
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class CreateMetricGroupDetails(CreateConfigDetails):
"""
A metric group defines a set of metrics to collect from a span. It uses a span filter to specify which spans to
process. The set is then published to a namespace, which is a product level subdivision of metrics.
"""
def __init__(self, **kwargs):
"""
Initializes a new CreateMetricGroupDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.apm_config.models.CreateMetricGroupDetails.config_type` attribute
of this class is ``METRIC_GROUP`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param config_type:
The value to assign to the config_type property of this CreateMetricGroupDetails.
Allowed values for this property are: "SPAN_FILTER", "METRIC_GROUP", "APDEX"
:type config_type: str
:param freeform_tags:
The value to assign to the freeform_tags property of this CreateMetricGroupDetails.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this CreateMetricGroupDetails.
:type defined_tags: dict(str, dict(str, object))
:param display_name:
The value to assign to the display_name property of this CreateMetricGroupDetails.
:type display_name: str
:param filter_id:
The value to assign to the filter_id property of this CreateMetricGroupDetails.
:type filter_id: str
:param namespace:
The value to assign to the namespace property of this CreateMetricGroupDetails.
:type namespace: str
:param dimensions:
The value to assign to the dimensions property of this CreateMetricGroupDetails.
:type dimensions: list[oci.apm_config.models.Dimension]
:param metrics:
The value to assign to the metrics property of this CreateMetricGroupDetails.
:type metrics: list[oci.apm_config.models.Metric]
"""
self.swagger_types = {
'config_type': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))',
'display_name': 'str',
'filter_id': 'str',
'namespace': 'str',
'dimensions': 'list[Dimension]',
'metrics': 'list[Metric]'
}
self.attribute_map = {
'config_type': 'configType',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags',
'display_name': 'displayName',
'filter_id': 'filterId',
'namespace': 'namespace',
'dimensions': 'dimensions',
'metrics': 'metrics'
}
self._config_type = None
self._freeform_tags = None
self._defined_tags = None
self._display_name = None
self._filter_id = None
self._namespace = None
self._dimensions = None
self._metrics = None
self._config_type = 'METRIC_GROUP'
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this CreateMetricGroupDetails.
The name of the metric group.
:return: The display_name of this CreateMetricGroupDetails.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this CreateMetricGroupDetails.
The name of the metric group.
:param display_name: The display_name of this CreateMetricGroupDetails.
:type: str
"""
self._display_name = display_name
@property
def filter_id(self):
"""
**[Required]** Gets the filter_id of this CreateMetricGroupDetails.
The `OCID`__ of a Span Filter. The filterId is mandatory for the creation
of MetricGroups. A filterId is generated when a Span Filter is created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The filter_id of this CreateMetricGroupDetails.
:rtype: str
"""
return self._filter_id
@filter_id.setter
def filter_id(self, filter_id):
"""
Sets the filter_id of this CreateMetricGroupDetails.
The `OCID`__ of a Span Filter. The filterId is mandatory for the creation
of MetricGroups. A filterId is generated when a Span Filter is created.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param filter_id: The filter_id of this CreateMetricGroupDetails.
:type: str
"""
self._filter_id = filter_id
@property
def namespace(self):
"""
Gets the namespace of this CreateMetricGroupDetails.
The namespace to which the metrics are published. It must be one of several predefined namespaces.
:return: The namespace of this CreateMetricGroupDetails.
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""
Sets the namespace of this CreateMetricGroupDetails.
The namespace to which the metrics are published. It must be one of several predefined namespaces.
:param namespace: The namespace of this CreateMetricGroupDetails.
:type: str
"""
self._namespace = namespace
@property
def dimensions(self):
"""
Gets the dimensions of this CreateMetricGroupDetails.
A list of dimensions for the metric. This variable should not be used.
:return: The dimensions of this CreateMetricGroupDetails.
:rtype: list[oci.apm_config.models.Dimension]
"""
return self._dimensions
@dimensions.setter
def dimensions(self, dimensions):
"""
Sets the dimensions of this CreateMetricGroupDetails.
A list of dimensions for the metric. This variable should not be used.
:param dimensions: The dimensions of this CreateMetricGroupDetails.
:type: list[oci.apm_config.models.Dimension]
"""
self._dimensions = dimensions
@property
def metrics(self):
"""
**[Required]** Gets the metrics of this CreateMetricGroupDetails.
The list of metrics in this group.
:return: The metrics of this CreateMetricGroupDetails.
:rtype: list[oci.apm_config.models.Metric]
"""
return self._metrics
@metrics.setter
def metrics(self, metrics):
"""
Sets the metrics of this CreateMetricGroupDetails.
The list of metrics in this group.
:param metrics: The metrics of this CreateMetricGroupDetails.
:type: list[oci.apm_config.models.Metric]
"""
self._metrics = metrics
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
en
| 0.677849
|
# coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. # noqa: F401 A metric group defines a set of metrics to collect from a span. It uses a span filter to specify which spans to process. The set is then published to a namespace, which is a product level subdivision of metrics. Initializes a new CreateMetricGroupDetails object with values from keyword arguments. The default value of the :py:attr:`~oci.apm_config.models.CreateMetricGroupDetails.config_type` attribute of this class is ``METRIC_GROUP`` and it should not be changed. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param config_type: The value to assign to the config_type property of this CreateMetricGroupDetails. Allowed values for this property are: "SPAN_FILTER", "METRIC_GROUP", "APDEX" :type config_type: str :param freeform_tags: The value to assign to the freeform_tags property of this CreateMetricGroupDetails. :type freeform_tags: dict(str, str) :param defined_tags: The value to assign to the defined_tags property of this CreateMetricGroupDetails. :type defined_tags: dict(str, dict(str, object)) :param display_name: The value to assign to the display_name property of this CreateMetricGroupDetails. :type display_name: str :param filter_id: The value to assign to the filter_id property of this CreateMetricGroupDetails. :type filter_id: str :param namespace: The value to assign to the namespace property of this CreateMetricGroupDetails. :type namespace: str :param dimensions: The value to assign to the dimensions property of this CreateMetricGroupDetails. :type dimensions: list[oci.apm_config.models.Dimension] :param metrics: The value to assign to the metrics property of this CreateMetricGroupDetails. :type metrics: list[oci.apm_config.models.Metric] **[Required]** Gets the display_name of this CreateMetricGroupDetails. The name of the metric group. :return: The display_name of this CreateMetricGroupDetails. :rtype: str Sets the display_name of this CreateMetricGroupDetails. The name of the metric group. :param display_name: The display_name of this CreateMetricGroupDetails. :type: str **[Required]** Gets the filter_id of this CreateMetricGroupDetails. The `OCID`__ of a Span Filter. The filterId is mandatory for the creation of MetricGroups. A filterId is generated when a Span Filter is created. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :return: The filter_id of this CreateMetricGroupDetails. :rtype: str Sets the filter_id of this CreateMetricGroupDetails. The `OCID`__ of a Span Filter. The filterId is mandatory for the creation of MetricGroups. A filterId is generated when a Span Filter is created. __ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm :param filter_id: The filter_id of this CreateMetricGroupDetails. :type: str Gets the namespace of this CreateMetricGroupDetails. The namespace to which the metrics are published. It must be one of several predefined namespaces. :return: The namespace of this CreateMetricGroupDetails. :rtype: str Sets the namespace of this CreateMetricGroupDetails. The namespace to which the metrics are published. It must be one of several predefined namespaces. :param namespace: The namespace of this CreateMetricGroupDetails. :type: str Gets the dimensions of this CreateMetricGroupDetails. A list of dimensions for the metric. This variable should not be used. :return: The dimensions of this CreateMetricGroupDetails. :rtype: list[oci.apm_config.models.Dimension] Sets the dimensions of this CreateMetricGroupDetails. A list of dimensions for the metric. This variable should not be used. :param dimensions: The dimensions of this CreateMetricGroupDetails. :type: list[oci.apm_config.models.Dimension] **[Required]** Gets the metrics of this CreateMetricGroupDetails. The list of metrics in this group. :return: The metrics of this CreateMetricGroupDetails. :rtype: list[oci.apm_config.models.Metric] Sets the metrics of this CreateMetricGroupDetails. The list of metrics in this group. :param metrics: The metrics of this CreateMetricGroupDetails. :type: list[oci.apm_config.models.Metric]
| 1.993859
| 2
|
equium/__init__.py
|
johnpaulett/equium-py
| 0
|
6627111
|
__version__ = '0.0.1'
class Object:
def __eq__(self, other):
return not super().__eq__(other)
|
__version__ = '0.0.1'
class Object:
def __eq__(self, other):
return not super().__eq__(other)
|
none
| 1
| 1.949007
| 2
|
|
src/sage/categories/category_types.py
|
fchapoton/sage
| 2
|
6627112
|
"""
Specific category classes
This is placed in a separate file from categories.py to avoid circular imports
(as morphisms must be very low in the hierarchy with the new coercion model).
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>> and
# <NAME> <<EMAIL>>
# 2008-2009 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.unknown import Unknown
from .category import JoinCategory, Category, CategoryWithParameters
from sage.misc.lazy_import import lazy_import
lazy_import('sage.categories.objects', 'Objects')
lazy_import('sage.misc.latex', 'latex')
####################################################################
# Different types of categories
####################################################################
#############################################################
# Category of elements of some object
#############################################################
class Elements(Category):
"""
The category of all elements of a given parent.
EXAMPLES::
sage: a = IntegerRing()(5)
sage: C = a.category(); C
Category of elements of Integer Ring
sage: a in C
True
sage: 2/3 in C
False
sage: loads(C.dumps()) == C
True
"""
def __init__(self, object):
"""
EXAMPLES::
sage: TestSuite(Elements(ZZ)).run()
"""
Category.__init__(self)
self.__object = object
@classmethod
def an_instance(cls):
"""
Returns an instance of this class
EXAMPLES::
sage: Elements.an_instance()
Category of elements of Rational Field
"""
from sage.rings.rational_field import QQ
return cls(QQ)
def _call_(self, x):
"""
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: x = V.0
sage: C = x.category()
sage: C
Category of elements of Vector space of dimension 3 over Rational Field
sage: w = C([1,2,3]); w # indirect doctest
(1, 2, 3)
sage: w.category()
Category of elements of Vector space of dimension 3 over Rational Field
"""
return self.__object(x)
def super_categories(self):
"""
EXAMPLES::
sage: Elements(ZZ).super_categories()
[Category of objects]
.. TODO::
Check that this is what we want.
"""
return [Objects()]
def object(self):
"""
EXAMPLES::
sage: Elements(ZZ).object()
Integer Ring
"""
return self.__object
def __reduce__(self):
"""
EXAMPLES::
sage: C = Elements(ZZ)
sage: loads(dumps(C)) == C
True
"""
return Elements, (self.__object, )
def _repr_object_names(self):
"""
EXAMPLES::
sage: Elements(ZZ)._repr_object_names()
'elements of Integer Ring'
"""
return "elements of %s"%self.object()
def _latex_(self):
r"""
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: x = V.0
sage: latex(x.category()) # indirect doctest
\mathbf{Elt}_{\Bold{Q}^{3}}
"""
return "\\mathbf{Elt}_{%s}"%latex(self.__object)
#############################################################
# Category of objects over some base object
#############################################################
class Category_over_base(CategoryWithParameters):
r"""
A base class for categories over some base object
INPUT:
- ``base`` -- a category `C` or an object of such a category
Assumption: the classes for the parents, elements, morphisms, of
``self`` should only depend on `C`. See :trac:`11935` for details.
EXAMPLES::
sage: Algebras(GF(2)).element_class is Algebras(GF(3)).element_class
True
sage: C = GF(2).category()
sage: Algebras(GF(2)).parent_class is Algebras(C).parent_class
True
sage: C = ZZ.category()
sage: Algebras(ZZ).element_class is Algebras(C).element_class
True
"""
def __init__(self, base, name=None):
r"""
Initialize ``self``.
EXAMPLES::
sage: S = Spec(ZZ)
sage: C = Schemes(S); C
Category of schemes over Integer Ring
sage: C.__class__.__init__ == sage.categories.category_types.Category_over_base.__init__
True
sage: C.base() is S
True
sage: TestSuite(C).run()
"""
self.__base = base
Category.__init__(self, name)
def _test_category_over_bases(self, **options):
"""
Run generic tests on this category with parameters.
.. SEEALSO:: :class:`TestSuite`.
EXAMPLES::
sage: Modules(QQ)._test_category_over_bases()
"""
tester = self._tester(**options)
from sage.categories.category_singleton import Category_singleton
from .bimodules import Bimodules
from .schemes import Schemes
for cat in self.super_categories():
tester.assertTrue(isinstance(cat, (Category_singleton, Category_over_base,
Bimodules, Schemes)),
"The super categories of a category over base should"
" be a category over base (or the related Bimodules)"
" or a singleton category")
def _make_named_class_key(self, name):
r"""
Return what the element/parent/... classes depend on.
Since :trac:`11935`, the element and parent classes of a
category over base only depend on the category of the base (or
the base itself if it is a category).
.. SEEALSO::
- :meth:`CategoryWithParameters`
- :meth:`CategoryWithParameters._make_named_class_key`
EXAMPLES::
sage: Modules(ZZ)._make_named_class_key('element_class')
Join of Category of euclidean domains
and Category of infinite enumerated sets
and Category of metric spaces
sage: Modules(QQ)._make_named_class_key('parent_class')
Join of Category of number fields
and Category of quotient fields
and Category of metric spaces
sage: Schemes(Spec(ZZ))._make_named_class_key('parent_class')
Category of schemes
sage: ModularAbelianVarieties(QQ)._make_named_class_key('parent_class')
Join of Category of number fields
and Category of quotient fields
and Category of metric spaces
sage: Algebras(Fields())._make_named_class_key('morphism_class')
Category of fields
"""
if isinstance(self.__base, Category):
return self.__base
return self.__base.category()
@classmethod
def an_instance(cls):
"""
Returns an instance of this class
EXAMPLES::
sage: Algebras.an_instance()
Category of algebras over Rational Field
"""
from sage.rings.rational_field import QQ
return cls(QQ)
def base(self):
"""
Return the base over which elements of this category are
defined.
EXAMPLES::
sage: C = Algebras(QQ)
sage: C.base()
Rational Field
"""
return self.__base
def _repr_object_names(self):
r"""
Return the name of the objects of this category.
.. SEEALSO:: :meth:`Category._repr_object_names`
EXAMPLES::
sage: Algebras(QQ)._repr_object_names()
'algebras over Rational Field'
sage: Algebras(Fields())._repr_object_names()
'algebras over fields'
sage: Algebras(GF(2).category())._repr_object_names()
'algebras over (finite enumerated fields and subquotients of monoids and quotients of semigroups)'
"""
base = self.__base
if isinstance(base, Category):
if isinstance(base, JoinCategory):
name = '('+' and '.join(C._repr_object_names() for C in base.super_categories())+')'
else:
name = base._repr_object_names()
else:
name = base
return Category._repr_object_names(self) + " over %s"%name
def _latex_(self):
r"""
EXAMPLES::
sage: latex(ModulesWithBasis(ZZ))
\mathbf{ModulesWithBasis}_{\Bold{Z}}
"""
return "\\mathbf{%s}_{%s}"%(self._label, latex(self.__base))
# def construction(self):
# return (self.__class__, self.__base)
# How to deal with HomsetWithBase
# def _homset(self, X, Y):
# """
# Given two objects X and Y in this category, returns the
# collection of the morphisms of this category between X and Y
# """
# assert(X in self and Y in self)
# from sage.categories.homset import Homset, HomsetWithBase
# if X._base is not X and X._base is not None: # does this ever fail?
# return HomsetWithBase(X, Y, self)
# else:
# return Homset(X, Y, self)
#############################################################
# Category of objects over some base ring
#############################################################
class AbelianCategory(Category):
def is_abelian(self):
"""
Return ``True`` as ``self`` is an abelian category.
EXAMPLES::
sage: CommutativeAdditiveGroups().is_abelian()
True
"""
return True
class Category_over_base_ring(Category_over_base):
def __init__(self, base, name=None):
"""
Initialize ``self``.
EXAMPLES::
sage: C = Algebras(GF(2)); C
Category of algebras over Finite Field of size 2
sage: TestSuite(C).run()
"""
from sage.categories.rings import Rings
if not (base in Rings or
isinstance(base, Category) and base.is_subcategory(Rings())):
raise ValueError("base must be a ring or a subcategory of Rings()")
Category_over_base.__init__(self, base, name)
def base_ring(self):
"""
Return the base ring over which elements of this category are
defined.
EXAMPLES::
sage: C = Algebras(GF(2))
sage: C.base_ring()
Finite Field of size 2
"""
return self.base()
def _subcategory_hook_(self, C):
"""
A quick test whether a category ``C`` may be a subcategory of
this category.
INPUT:
- ``C`` -- a category (type not tested)
OUTPUT:
A boolean if it is certain that ``C`` is (or is not) a
subcategory of self. :obj:`~sage.misc.unknown.Unknown`
otherwise.
EXAMPLES:
The answer is ``False`` if the subcategory class of ``C`` is
not a subclass of the subcategory class of ``self``::
sage: Algebras(QQ)._subcategory_hook_(VectorSpaces(QQ))
False
sage: VectorSpaces(QQ)._subcategory_hook_(Algebras(ZZ))
False
.. WARNING::
This test currently includes some false negatives::
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields().Finite()))
False
sage: Modules(Rings())._subcategory_hook_(Modules(GroupAlgebras(Rings())))
False
The answer is ``Unknown`` if ``C`` is not a category over base ring::
sage: VectorSpaces(QQ)._subcategory_hook_(VectorSpaces(QQ) & Rings())
Unknown
sage: Sym = SymmetricFunctions(QQ)
sage: from sage.combinat.sf.sfa import SymmetricFunctionsBases
sage: Modules(QQ)._subcategory_hook_(SymmetricFunctionsBases(Sym))
Unknown
sage: SymmetricFunctionsBases(Sym).is_subcategory(Modules(QQ))
True
Case 1: the two bases are categories; then the base of ``C``
shall be a subcategory of the base of ``self``::
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields()))
True
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields().Finite())) # todo: not implemented
True
sage: VectorSpaces(Fields().Finite())._subcategory_hook_(Algebras(Fields()))
False
Case 2: the base of ``self`` is a category; then the base of
``C`` shall be a parent in this category::
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(QQ)) # todo: not implemented
True
sage: VectorSpaces(Fields().Finite())._subcategory_hook_(Algebras(QQ))
False
Case 3: the two bases are parents; then they should coincide::
sage: VectorSpaces(QQ)._subcategory_hook_(Algebras(QQ))
True
sage: VectorSpaces(CC)._subcategory_hook_(Algebras(QQ)) # base ring in different categories
False
sage: VectorSpaces(GF(2))._subcategory_hook_(Algebras(GF(3))) # base ring in the same category
False
Note; we need both previous tests since the distinction is
made respectively using the parent class or the base ring::
sage: issubclass(Algebras(QQ).parent_class, VectorSpaces(CC).parent_class)
False
sage: issubclass(Algebras(GF(2)).parent_class, VectorSpaces(GF(3)).parent_class)
True
Check that :trac:`16618` is fixed: this `_subcategory_hook_`
method is only valid for :class:`Category_over_base_ring`, not
:class:`Category_over_base`::
sage: from sage.categories.category_types import Category_over_base
sage: D = Modules(Rings())
sage: class Cs(Category_over_base):
....: def super_categories(self):
....: return [D]
sage: C = Cs(SymmetricGroup(3))
sage: C.is_subcategory(D)
True
sage: D._subcategory_hook_(C)
Unknown
sage: import __main__
sage: __main__.Cs = Cs # Fake Cs being defined in a python module
sage: TestSuite(C).run()
"""
if not issubclass(C.parent_class, self.parent_class):
return False
if not isinstance(C, Category_over_base_ring):
return Unknown
base_ring = self.base_ring()
if C.base_ring() is base_ring:
return True
if isinstance(base_ring, Category):
if isinstance(C.base(), Category):
return C.base().is_subcategory(base_ring)
# otherwise, C.base() is a parent
return C.base() in base_ring
return False
def __contains__(self, x):
"""
Return whether ``x`` is an object of this category.
In most cases, ``x`` is an object in this category, if and
only if the category of ``x`` is a subcategory of ``self``.
Exception: ``x`` is also an object in this category if ``x``
is in a category over a base ring category ``C``, and ``self``
is a category over a base ring in ``C``.
This method implements this exception.
EXAMPLES::
sage: QQ['x'] in Algebras(QQ)
True
sage: ZZ['x'] in Algebras(ZZ)
True
We also would want the following to hold::
sage: QQ['x'] in Algebras(Fields()) # todo: not implemented
True
"""
try:
# The issubclass test handles extension types or when the
# category is not fully initialized
if isinstance(x, self.parent_class) or \
issubclass(x.category().parent_class, self.parent_class):
if isinstance(self.base(), Category):
return True
else:
return x.base_ring() is self.base_ring()
else:
return super(Category_over_base_ring, self).__contains__(x)
except AttributeError:
return False
#############################################################
# Category of objects in some ambient object
#############################################################
class Category_in_ambient(Category):
def __init__(self, ambient, name=None):
"""
Initialize ``self``.
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: TestSuite(C).run()
"""
self.__ambient = ambient
Category.__init__(self, name)
def ambient(self):
"""
Return the ambient object in which objects of this category are
embedded.
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: C.ambient()
Integer Ring
"""
return self.__ambient
def _repr_(self):
"""
EXAMPLES::
sage: Ideals(IntegerRing())
Category of ring ideals in Integer Ring
"""
return Category._repr_(self) + " in %s"%self.__ambient
# def construction(self):
# return (self.__class__, self.__ambient)
class Category_module(AbelianCategory, Category_over_base_ring):
pass
class Category_ideal(Category_in_ambient):
@classmethod
def an_instance(cls):
"""
Return an instance of this class.
EXAMPLES::
sage: AlgebraIdeals.an_instance()
Category of algebra ideals in Univariate Polynomial Ring in x over Rational Field
"""
from sage.rings.rational_field import QQ
return cls(QQ['x'])
def ring(self):
"""
Return the ambient ring used to describe objects ``self``.
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: C.ring()
Integer Ring
"""
return self.ambient()
def __contains__(self, x):
"""
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: IntegerRing().zero_ideal() in C
True
"""
if super(Category_ideal, self).__contains__(x):
return True
from sage.rings.ideal import is_Ideal
if is_Ideal(x) and x.ring() == self.ring():
return True
return False
def __call__(self, v):
"""
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Ig = [x, y]
sage: I = R.ideal(Ig)
sage: C = Ideals(R)
sage: C(Ig)
Ideal (x, y) of Multivariate Polynomial Ring in x, y over Integer Ring
sage: I == C(I)
True
"""
if v in self:
return v
return self.ring().ideal(v)
# TODO: make this into a better category
#############################################################
# ChainComplex
#############################################################
class ChainComplexes(Category_module):
"""
The category of all chain complexes over a base ring.
EXAMPLES::
sage: ChainComplexes(RationalField())
Category of chain complexes over Rational Field
sage: ChainComplexes(Integers(9))
Category of chain complexes over Ring of integers modulo 9
TESTS::
sage: TestSuite(ChainComplexes(RationalField())).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: ChainComplexes(Integers(9)).super_categories()
[Category of modules over Ring of integers modulo 9]
"""
from sage.categories.all import Fields, Modules, VectorSpaces
base_ring = self.base_ring()
if base_ring in Fields():
return [VectorSpaces(base_ring)]
return [Modules(base_ring)]
|
"""
Specific category classes
This is placed in a separate file from categories.py to avoid circular imports
(as morphisms must be very low in the hierarchy with the new coercion model).
"""
from __future__ import absolute_import
#*****************************************************************************
# Copyright (C) 2005 <NAME> <<EMAIL>> and
# <NAME> <<EMAIL>>
# 2008-2009 <NAME> <nthiery at users.sf.net>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#*****************************************************************************
from sage.misc.unknown import Unknown
from .category import JoinCategory, Category, CategoryWithParameters
from sage.misc.lazy_import import lazy_import
lazy_import('sage.categories.objects', 'Objects')
lazy_import('sage.misc.latex', 'latex')
####################################################################
# Different types of categories
####################################################################
#############################################################
# Category of elements of some object
#############################################################
class Elements(Category):
"""
The category of all elements of a given parent.
EXAMPLES::
sage: a = IntegerRing()(5)
sage: C = a.category(); C
Category of elements of Integer Ring
sage: a in C
True
sage: 2/3 in C
False
sage: loads(C.dumps()) == C
True
"""
def __init__(self, object):
"""
EXAMPLES::
sage: TestSuite(Elements(ZZ)).run()
"""
Category.__init__(self)
self.__object = object
@classmethod
def an_instance(cls):
"""
Returns an instance of this class
EXAMPLES::
sage: Elements.an_instance()
Category of elements of Rational Field
"""
from sage.rings.rational_field import QQ
return cls(QQ)
def _call_(self, x):
"""
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: x = V.0
sage: C = x.category()
sage: C
Category of elements of Vector space of dimension 3 over Rational Field
sage: w = C([1,2,3]); w # indirect doctest
(1, 2, 3)
sage: w.category()
Category of elements of Vector space of dimension 3 over Rational Field
"""
return self.__object(x)
def super_categories(self):
"""
EXAMPLES::
sage: Elements(ZZ).super_categories()
[Category of objects]
.. TODO::
Check that this is what we want.
"""
return [Objects()]
def object(self):
"""
EXAMPLES::
sage: Elements(ZZ).object()
Integer Ring
"""
return self.__object
def __reduce__(self):
"""
EXAMPLES::
sage: C = Elements(ZZ)
sage: loads(dumps(C)) == C
True
"""
return Elements, (self.__object, )
def _repr_object_names(self):
"""
EXAMPLES::
sage: Elements(ZZ)._repr_object_names()
'elements of Integer Ring'
"""
return "elements of %s"%self.object()
def _latex_(self):
r"""
EXAMPLES::
sage: V = VectorSpace(QQ,3)
sage: x = V.0
sage: latex(x.category()) # indirect doctest
\mathbf{Elt}_{\Bold{Q}^{3}}
"""
return "\\mathbf{Elt}_{%s}"%latex(self.__object)
#############################################################
# Category of objects over some base object
#############################################################
class Category_over_base(CategoryWithParameters):
r"""
A base class for categories over some base object
INPUT:
- ``base`` -- a category `C` or an object of such a category
Assumption: the classes for the parents, elements, morphisms, of
``self`` should only depend on `C`. See :trac:`11935` for details.
EXAMPLES::
sage: Algebras(GF(2)).element_class is Algebras(GF(3)).element_class
True
sage: C = GF(2).category()
sage: Algebras(GF(2)).parent_class is Algebras(C).parent_class
True
sage: C = ZZ.category()
sage: Algebras(ZZ).element_class is Algebras(C).element_class
True
"""
def __init__(self, base, name=None):
r"""
Initialize ``self``.
EXAMPLES::
sage: S = Spec(ZZ)
sage: C = Schemes(S); C
Category of schemes over Integer Ring
sage: C.__class__.__init__ == sage.categories.category_types.Category_over_base.__init__
True
sage: C.base() is S
True
sage: TestSuite(C).run()
"""
self.__base = base
Category.__init__(self, name)
def _test_category_over_bases(self, **options):
"""
Run generic tests on this category with parameters.
.. SEEALSO:: :class:`TestSuite`.
EXAMPLES::
sage: Modules(QQ)._test_category_over_bases()
"""
tester = self._tester(**options)
from sage.categories.category_singleton import Category_singleton
from .bimodules import Bimodules
from .schemes import Schemes
for cat in self.super_categories():
tester.assertTrue(isinstance(cat, (Category_singleton, Category_over_base,
Bimodules, Schemes)),
"The super categories of a category over base should"
" be a category over base (or the related Bimodules)"
" or a singleton category")
def _make_named_class_key(self, name):
r"""
Return what the element/parent/... classes depend on.
Since :trac:`11935`, the element and parent classes of a
category over base only depend on the category of the base (or
the base itself if it is a category).
.. SEEALSO::
- :meth:`CategoryWithParameters`
- :meth:`CategoryWithParameters._make_named_class_key`
EXAMPLES::
sage: Modules(ZZ)._make_named_class_key('element_class')
Join of Category of euclidean domains
and Category of infinite enumerated sets
and Category of metric spaces
sage: Modules(QQ)._make_named_class_key('parent_class')
Join of Category of number fields
and Category of quotient fields
and Category of metric spaces
sage: Schemes(Spec(ZZ))._make_named_class_key('parent_class')
Category of schemes
sage: ModularAbelianVarieties(QQ)._make_named_class_key('parent_class')
Join of Category of number fields
and Category of quotient fields
and Category of metric spaces
sage: Algebras(Fields())._make_named_class_key('morphism_class')
Category of fields
"""
if isinstance(self.__base, Category):
return self.__base
return self.__base.category()
@classmethod
def an_instance(cls):
"""
Returns an instance of this class
EXAMPLES::
sage: Algebras.an_instance()
Category of algebras over Rational Field
"""
from sage.rings.rational_field import QQ
return cls(QQ)
def base(self):
"""
Return the base over which elements of this category are
defined.
EXAMPLES::
sage: C = Algebras(QQ)
sage: C.base()
Rational Field
"""
return self.__base
def _repr_object_names(self):
r"""
Return the name of the objects of this category.
.. SEEALSO:: :meth:`Category._repr_object_names`
EXAMPLES::
sage: Algebras(QQ)._repr_object_names()
'algebras over Rational Field'
sage: Algebras(Fields())._repr_object_names()
'algebras over fields'
sage: Algebras(GF(2).category())._repr_object_names()
'algebras over (finite enumerated fields and subquotients of monoids and quotients of semigroups)'
"""
base = self.__base
if isinstance(base, Category):
if isinstance(base, JoinCategory):
name = '('+' and '.join(C._repr_object_names() for C in base.super_categories())+')'
else:
name = base._repr_object_names()
else:
name = base
return Category._repr_object_names(self) + " over %s"%name
def _latex_(self):
r"""
EXAMPLES::
sage: latex(ModulesWithBasis(ZZ))
\mathbf{ModulesWithBasis}_{\Bold{Z}}
"""
return "\\mathbf{%s}_{%s}"%(self._label, latex(self.__base))
# def construction(self):
# return (self.__class__, self.__base)
# How to deal with HomsetWithBase
# def _homset(self, X, Y):
# """
# Given two objects X and Y in this category, returns the
# collection of the morphisms of this category between X and Y
# """
# assert(X in self and Y in self)
# from sage.categories.homset import Homset, HomsetWithBase
# if X._base is not X and X._base is not None: # does this ever fail?
# return HomsetWithBase(X, Y, self)
# else:
# return Homset(X, Y, self)
#############################################################
# Category of objects over some base ring
#############################################################
class AbelianCategory(Category):
def is_abelian(self):
"""
Return ``True`` as ``self`` is an abelian category.
EXAMPLES::
sage: CommutativeAdditiveGroups().is_abelian()
True
"""
return True
class Category_over_base_ring(Category_over_base):
def __init__(self, base, name=None):
"""
Initialize ``self``.
EXAMPLES::
sage: C = Algebras(GF(2)); C
Category of algebras over Finite Field of size 2
sage: TestSuite(C).run()
"""
from sage.categories.rings import Rings
if not (base in Rings or
isinstance(base, Category) and base.is_subcategory(Rings())):
raise ValueError("base must be a ring or a subcategory of Rings()")
Category_over_base.__init__(self, base, name)
def base_ring(self):
"""
Return the base ring over which elements of this category are
defined.
EXAMPLES::
sage: C = Algebras(GF(2))
sage: C.base_ring()
Finite Field of size 2
"""
return self.base()
def _subcategory_hook_(self, C):
"""
A quick test whether a category ``C`` may be a subcategory of
this category.
INPUT:
- ``C`` -- a category (type not tested)
OUTPUT:
A boolean if it is certain that ``C`` is (or is not) a
subcategory of self. :obj:`~sage.misc.unknown.Unknown`
otherwise.
EXAMPLES:
The answer is ``False`` if the subcategory class of ``C`` is
not a subclass of the subcategory class of ``self``::
sage: Algebras(QQ)._subcategory_hook_(VectorSpaces(QQ))
False
sage: VectorSpaces(QQ)._subcategory_hook_(Algebras(ZZ))
False
.. WARNING::
This test currently includes some false negatives::
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields().Finite()))
False
sage: Modules(Rings())._subcategory_hook_(Modules(GroupAlgebras(Rings())))
False
The answer is ``Unknown`` if ``C`` is not a category over base ring::
sage: VectorSpaces(QQ)._subcategory_hook_(VectorSpaces(QQ) & Rings())
Unknown
sage: Sym = SymmetricFunctions(QQ)
sage: from sage.combinat.sf.sfa import SymmetricFunctionsBases
sage: Modules(QQ)._subcategory_hook_(SymmetricFunctionsBases(Sym))
Unknown
sage: SymmetricFunctionsBases(Sym).is_subcategory(Modules(QQ))
True
Case 1: the two bases are categories; then the base of ``C``
shall be a subcategory of the base of ``self``::
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields()))
True
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields().Finite())) # todo: not implemented
True
sage: VectorSpaces(Fields().Finite())._subcategory_hook_(Algebras(Fields()))
False
Case 2: the base of ``self`` is a category; then the base of
``C`` shall be a parent in this category::
sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(QQ)) # todo: not implemented
True
sage: VectorSpaces(Fields().Finite())._subcategory_hook_(Algebras(QQ))
False
Case 3: the two bases are parents; then they should coincide::
sage: VectorSpaces(QQ)._subcategory_hook_(Algebras(QQ))
True
sage: VectorSpaces(CC)._subcategory_hook_(Algebras(QQ)) # base ring in different categories
False
sage: VectorSpaces(GF(2))._subcategory_hook_(Algebras(GF(3))) # base ring in the same category
False
Note; we need both previous tests since the distinction is
made respectively using the parent class or the base ring::
sage: issubclass(Algebras(QQ).parent_class, VectorSpaces(CC).parent_class)
False
sage: issubclass(Algebras(GF(2)).parent_class, VectorSpaces(GF(3)).parent_class)
True
Check that :trac:`16618` is fixed: this `_subcategory_hook_`
method is only valid for :class:`Category_over_base_ring`, not
:class:`Category_over_base`::
sage: from sage.categories.category_types import Category_over_base
sage: D = Modules(Rings())
sage: class Cs(Category_over_base):
....: def super_categories(self):
....: return [D]
sage: C = Cs(SymmetricGroup(3))
sage: C.is_subcategory(D)
True
sage: D._subcategory_hook_(C)
Unknown
sage: import __main__
sage: __main__.Cs = Cs # Fake Cs being defined in a python module
sage: TestSuite(C).run()
"""
if not issubclass(C.parent_class, self.parent_class):
return False
if not isinstance(C, Category_over_base_ring):
return Unknown
base_ring = self.base_ring()
if C.base_ring() is base_ring:
return True
if isinstance(base_ring, Category):
if isinstance(C.base(), Category):
return C.base().is_subcategory(base_ring)
# otherwise, C.base() is a parent
return C.base() in base_ring
return False
def __contains__(self, x):
"""
Return whether ``x`` is an object of this category.
In most cases, ``x`` is an object in this category, if and
only if the category of ``x`` is a subcategory of ``self``.
Exception: ``x`` is also an object in this category if ``x``
is in a category over a base ring category ``C``, and ``self``
is a category over a base ring in ``C``.
This method implements this exception.
EXAMPLES::
sage: QQ['x'] in Algebras(QQ)
True
sage: ZZ['x'] in Algebras(ZZ)
True
We also would want the following to hold::
sage: QQ['x'] in Algebras(Fields()) # todo: not implemented
True
"""
try:
# The issubclass test handles extension types or when the
# category is not fully initialized
if isinstance(x, self.parent_class) or \
issubclass(x.category().parent_class, self.parent_class):
if isinstance(self.base(), Category):
return True
else:
return x.base_ring() is self.base_ring()
else:
return super(Category_over_base_ring, self).__contains__(x)
except AttributeError:
return False
#############################################################
# Category of objects in some ambient object
#############################################################
class Category_in_ambient(Category):
def __init__(self, ambient, name=None):
"""
Initialize ``self``.
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: TestSuite(C).run()
"""
self.__ambient = ambient
Category.__init__(self, name)
def ambient(self):
"""
Return the ambient object in which objects of this category are
embedded.
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: C.ambient()
Integer Ring
"""
return self.__ambient
def _repr_(self):
"""
EXAMPLES::
sage: Ideals(IntegerRing())
Category of ring ideals in Integer Ring
"""
return Category._repr_(self) + " in %s"%self.__ambient
# def construction(self):
# return (self.__class__, self.__ambient)
class Category_module(AbelianCategory, Category_over_base_ring):
pass
class Category_ideal(Category_in_ambient):
@classmethod
def an_instance(cls):
"""
Return an instance of this class.
EXAMPLES::
sage: AlgebraIdeals.an_instance()
Category of algebra ideals in Univariate Polynomial Ring in x over Rational Field
"""
from sage.rings.rational_field import QQ
return cls(QQ['x'])
def ring(self):
"""
Return the ambient ring used to describe objects ``self``.
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: C.ring()
Integer Ring
"""
return self.ambient()
def __contains__(self, x):
"""
EXAMPLES::
sage: C = Ideals(IntegerRing())
sage: IntegerRing().zero_ideal() in C
True
"""
if super(Category_ideal, self).__contains__(x):
return True
from sage.rings.ideal import is_Ideal
if is_Ideal(x) and x.ring() == self.ring():
return True
return False
def __call__(self, v):
"""
EXAMPLES::
sage: R.<x,y> = ZZ[]
sage: Ig = [x, y]
sage: I = R.ideal(Ig)
sage: C = Ideals(R)
sage: C(Ig)
Ideal (x, y) of Multivariate Polynomial Ring in x, y over Integer Ring
sage: I == C(I)
True
"""
if v in self:
return v
return self.ring().ideal(v)
# TODO: make this into a better category
#############################################################
# ChainComplex
#############################################################
class ChainComplexes(Category_module):
"""
The category of all chain complexes over a base ring.
EXAMPLES::
sage: ChainComplexes(RationalField())
Category of chain complexes over Rational Field
sage: ChainComplexes(Integers(9))
Category of chain complexes over Ring of integers modulo 9
TESTS::
sage: TestSuite(ChainComplexes(RationalField())).run()
"""
def super_categories(self):
"""
EXAMPLES::
sage: ChainComplexes(Integers(9)).super_categories()
[Category of modules over Ring of integers modulo 9]
"""
from sage.categories.all import Fields, Modules, VectorSpaces
base_ring = self.base_ring()
if base_ring in Fields():
return [VectorSpaces(base_ring)]
return [Modules(base_ring)]
|
en
| 0.541304
|
Specific category classes This is placed in a separate file from categories.py to avoid circular imports (as morphisms must be very low in the hierarchy with the new coercion model). #***************************************************************************** # Copyright (C) 2005 <NAME> <<EMAIL>> and # <NAME> <<EMAIL>> # 2008-2009 <NAME> <nthiery at users.sf.net> # # Distributed under the terms of the GNU General Public License (GPL) # http://www.gnu.org/licenses/ #***************************************************************************** #################################################################### # Different types of categories #################################################################### ############################################################# # Category of elements of some object ############################################################# The category of all elements of a given parent. EXAMPLES:: sage: a = IntegerRing()(5) sage: C = a.category(); C Category of elements of Integer Ring sage: a in C True sage: 2/3 in C False sage: loads(C.dumps()) == C True EXAMPLES:: sage: TestSuite(Elements(ZZ)).run() Returns an instance of this class EXAMPLES:: sage: Elements.an_instance() Category of elements of Rational Field EXAMPLES:: sage: V = VectorSpace(QQ,3) sage: x = V.0 sage: C = x.category() sage: C Category of elements of Vector space of dimension 3 over Rational Field sage: w = C([1,2,3]); w # indirect doctest (1, 2, 3) sage: w.category() Category of elements of Vector space of dimension 3 over Rational Field EXAMPLES:: sage: Elements(ZZ).super_categories() [Category of objects] .. TODO:: Check that this is what we want. EXAMPLES:: sage: Elements(ZZ).object() Integer Ring EXAMPLES:: sage: C = Elements(ZZ) sage: loads(dumps(C)) == C True EXAMPLES:: sage: Elements(ZZ)._repr_object_names() 'elements of Integer Ring' EXAMPLES:: sage: V = VectorSpace(QQ,3) sage: x = V.0 sage: latex(x.category()) # indirect doctest \mathbf{Elt}_{\Bold{Q}^{3}} ############################################################# # Category of objects over some base object ############################################################# A base class for categories over some base object INPUT: - ``base`` -- a category `C` or an object of such a category Assumption: the classes for the parents, elements, morphisms, of ``self`` should only depend on `C`. See :trac:`11935` for details. EXAMPLES:: sage: Algebras(GF(2)).element_class is Algebras(GF(3)).element_class True sage: C = GF(2).category() sage: Algebras(GF(2)).parent_class is Algebras(C).parent_class True sage: C = ZZ.category() sage: Algebras(ZZ).element_class is Algebras(C).element_class True Initialize ``self``. EXAMPLES:: sage: S = Spec(ZZ) sage: C = Schemes(S); C Category of schemes over Integer Ring sage: C.__class__.__init__ == sage.categories.category_types.Category_over_base.__init__ True sage: C.base() is S True sage: TestSuite(C).run() Run generic tests on this category with parameters. .. SEEALSO:: :class:`TestSuite`. EXAMPLES:: sage: Modules(QQ)._test_category_over_bases() Return what the element/parent/... classes depend on. Since :trac:`11935`, the element and parent classes of a category over base only depend on the category of the base (or the base itself if it is a category). .. SEEALSO:: - :meth:`CategoryWithParameters` - :meth:`CategoryWithParameters._make_named_class_key` EXAMPLES:: sage: Modules(ZZ)._make_named_class_key('element_class') Join of Category of euclidean domains and Category of infinite enumerated sets and Category of metric spaces sage: Modules(QQ)._make_named_class_key('parent_class') Join of Category of number fields and Category of quotient fields and Category of metric spaces sage: Schemes(Spec(ZZ))._make_named_class_key('parent_class') Category of schemes sage: ModularAbelianVarieties(QQ)._make_named_class_key('parent_class') Join of Category of number fields and Category of quotient fields and Category of metric spaces sage: Algebras(Fields())._make_named_class_key('morphism_class') Category of fields Returns an instance of this class EXAMPLES:: sage: Algebras.an_instance() Category of algebras over Rational Field Return the base over which elements of this category are defined. EXAMPLES:: sage: C = Algebras(QQ) sage: C.base() Rational Field Return the name of the objects of this category. .. SEEALSO:: :meth:`Category._repr_object_names` EXAMPLES:: sage: Algebras(QQ)._repr_object_names() 'algebras over Rational Field' sage: Algebras(Fields())._repr_object_names() 'algebras over fields' sage: Algebras(GF(2).category())._repr_object_names() 'algebras over (finite enumerated fields and subquotients of monoids and quotients of semigroups)' EXAMPLES:: sage: latex(ModulesWithBasis(ZZ)) \mathbf{ModulesWithBasis}_{\Bold{Z}} # def construction(self): # return (self.__class__, self.__base) # How to deal with HomsetWithBase # def _homset(self, X, Y): # """ # Given two objects X and Y in this category, returns the # collection of the morphisms of this category between X and Y # """ # assert(X in self and Y in self) # from sage.categories.homset import Homset, HomsetWithBase # if X._base is not X and X._base is not None: # does this ever fail? # return HomsetWithBase(X, Y, self) # else: # return Homset(X, Y, self) ############################################################# # Category of objects over some base ring ############################################################# Return ``True`` as ``self`` is an abelian category. EXAMPLES:: sage: CommutativeAdditiveGroups().is_abelian() True Initialize ``self``. EXAMPLES:: sage: C = Algebras(GF(2)); C Category of algebras over Finite Field of size 2 sage: TestSuite(C).run() Return the base ring over which elements of this category are defined. EXAMPLES:: sage: C = Algebras(GF(2)) sage: C.base_ring() Finite Field of size 2 A quick test whether a category ``C`` may be a subcategory of this category. INPUT: - ``C`` -- a category (type not tested) OUTPUT: A boolean if it is certain that ``C`` is (or is not) a subcategory of self. :obj:`~sage.misc.unknown.Unknown` otherwise. EXAMPLES: The answer is ``False`` if the subcategory class of ``C`` is not a subclass of the subcategory class of ``self``:: sage: Algebras(QQ)._subcategory_hook_(VectorSpaces(QQ)) False sage: VectorSpaces(QQ)._subcategory_hook_(Algebras(ZZ)) False .. WARNING:: This test currently includes some false negatives:: sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields().Finite())) False sage: Modules(Rings())._subcategory_hook_(Modules(GroupAlgebras(Rings()))) False The answer is ``Unknown`` if ``C`` is not a category over base ring:: sage: VectorSpaces(QQ)._subcategory_hook_(VectorSpaces(QQ) & Rings()) Unknown sage: Sym = SymmetricFunctions(QQ) sage: from sage.combinat.sf.sfa import SymmetricFunctionsBases sage: Modules(QQ)._subcategory_hook_(SymmetricFunctionsBases(Sym)) Unknown sage: SymmetricFunctionsBases(Sym).is_subcategory(Modules(QQ)) True Case 1: the two bases are categories; then the base of ``C`` shall be a subcategory of the base of ``self``:: sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields())) True sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(Fields().Finite())) # todo: not implemented True sage: VectorSpaces(Fields().Finite())._subcategory_hook_(Algebras(Fields())) False Case 2: the base of ``self`` is a category; then the base of ``C`` shall be a parent in this category:: sage: VectorSpaces(Fields())._subcategory_hook_(Algebras(QQ)) # todo: not implemented True sage: VectorSpaces(Fields().Finite())._subcategory_hook_(Algebras(QQ)) False Case 3: the two bases are parents; then they should coincide:: sage: VectorSpaces(QQ)._subcategory_hook_(Algebras(QQ)) True sage: VectorSpaces(CC)._subcategory_hook_(Algebras(QQ)) # base ring in different categories False sage: VectorSpaces(GF(2))._subcategory_hook_(Algebras(GF(3))) # base ring in the same category False Note; we need both previous tests since the distinction is made respectively using the parent class or the base ring:: sage: issubclass(Algebras(QQ).parent_class, VectorSpaces(CC).parent_class) False sage: issubclass(Algebras(GF(2)).parent_class, VectorSpaces(GF(3)).parent_class) True Check that :trac:`16618` is fixed: this `_subcategory_hook_` method is only valid for :class:`Category_over_base_ring`, not :class:`Category_over_base`:: sage: from sage.categories.category_types import Category_over_base sage: D = Modules(Rings()) sage: class Cs(Category_over_base): ....: def super_categories(self): ....: return [D] sage: C = Cs(SymmetricGroup(3)) sage: C.is_subcategory(D) True sage: D._subcategory_hook_(C) Unknown sage: import __main__ sage: __main__.Cs = Cs # Fake Cs being defined in a python module sage: TestSuite(C).run() # otherwise, C.base() is a parent Return whether ``x`` is an object of this category. In most cases, ``x`` is an object in this category, if and only if the category of ``x`` is a subcategory of ``self``. Exception: ``x`` is also an object in this category if ``x`` is in a category over a base ring category ``C``, and ``self`` is a category over a base ring in ``C``. This method implements this exception. EXAMPLES:: sage: QQ['x'] in Algebras(QQ) True sage: ZZ['x'] in Algebras(ZZ) True We also would want the following to hold:: sage: QQ['x'] in Algebras(Fields()) # todo: not implemented True # The issubclass test handles extension types or when the # category is not fully initialized ############################################################# # Category of objects in some ambient object ############################################################# Initialize ``self``. EXAMPLES:: sage: C = Ideals(IntegerRing()) sage: TestSuite(C).run() Return the ambient object in which objects of this category are embedded. EXAMPLES:: sage: C = Ideals(IntegerRing()) sage: C.ambient() Integer Ring EXAMPLES:: sage: Ideals(IntegerRing()) Category of ring ideals in Integer Ring # def construction(self): # return (self.__class__, self.__ambient) Return an instance of this class. EXAMPLES:: sage: AlgebraIdeals.an_instance() Category of algebra ideals in Univariate Polynomial Ring in x over Rational Field Return the ambient ring used to describe objects ``self``. EXAMPLES:: sage: C = Ideals(IntegerRing()) sage: C.ring() Integer Ring EXAMPLES:: sage: C = Ideals(IntegerRing()) sage: IntegerRing().zero_ideal() in C True EXAMPLES:: sage: R.<x,y> = ZZ[] sage: Ig = [x, y] sage: I = R.ideal(Ig) sage: C = Ideals(R) sage: C(Ig) Ideal (x, y) of Multivariate Polynomial Ring in x, y over Integer Ring sage: I == C(I) True # TODO: make this into a better category ############################################################# # ChainComplex ############################################################# The category of all chain complexes over a base ring. EXAMPLES:: sage: ChainComplexes(RationalField()) Category of chain complexes over Rational Field sage: ChainComplexes(Integers(9)) Category of chain complexes over Ring of integers modulo 9 TESTS:: sage: TestSuite(ChainComplexes(RationalField())).run() EXAMPLES:: sage: ChainComplexes(Integers(9)).super_categories() [Category of modules over Ring of integers modulo 9]
| 1.948597
| 2
|
app/service/learning_svc.py
|
mihaid-b/caldera
| 0
|
6627113
|
import itertools
import glob
import re
from base64 import b64decode
from importlib import import_module
from app.objects.secondclass.c_relationship import Relationship
from app.objects.secondclass.c_link import update_scores
from app.service.interfaces.i_learning_svc import LearningServiceInterface
from app.utility.base_service import BaseService
class LearningService(LearningServiceInterface, BaseService):
def __init__(self):
self.log = self.add_service('learning_svc', self)
self.model = set()
self.parsers = self.add_parsers('app/learning')
self.re_variable = re.compile(r'#{(.*?)}', flags=re.DOTALL)
self.log.debug('Loaded %d parsers' % len(self.parsers))
@staticmethod
def add_parsers(directory):
parsers = []
for filepath in glob.iglob('%s/**.py' % directory):
module = import_module(filepath.replace('/', '.').replace('\\', '.').replace('.py', ''))
parsers.append(module.Parser())
return parsers
async def build_model(self):
for ability in await self.get_service('data_svc').locate('abilities'):
for executor in ability.executors:
if executor.command:
variables = frozenset(re.findall(self.re_variable, executor.test))
if len(variables) > 1: # relationships require at least 2 variables
self.model.add(variables)
self.model = set(self.model)
async def learn(self, facts, link, blob, operation=None):
decoded_blob = b64decode(blob).decode('utf-8')
found_facts = []
for parser in self.parsers:
try:
for fact in parser.parse(decoded_blob):
found_facts.append(fact)
except Exception as e:
self.log.error(e)
await update_scores(operation=None, increment=len(found_facts), used=facts, facts=link.facts)
await self._store_results(link, found_facts, operation)
async def _store_results(self, link, facts, operation=None):
facts_covered = []
for relationship in self.model:
matches = []
for fact in facts:
if fact.trait in relationship:
matches.append(fact)
facts_covered.append(fact)
for pair in itertools.combinations(matches, r=2):
if pair[0].trait != pair[1].trait:
await link.create_relationships([Relationship(source=pair[0], edge='has', target=pair[1])],
operation=operation)
for f in [x for x in facts if x not in facts_covered]:
await link.save_fact(operation=operation, fact=f, score=1, relationship=[])
|
import itertools
import glob
import re
from base64 import b64decode
from importlib import import_module
from app.objects.secondclass.c_relationship import Relationship
from app.objects.secondclass.c_link import update_scores
from app.service.interfaces.i_learning_svc import LearningServiceInterface
from app.utility.base_service import BaseService
class LearningService(LearningServiceInterface, BaseService):
def __init__(self):
self.log = self.add_service('learning_svc', self)
self.model = set()
self.parsers = self.add_parsers('app/learning')
self.re_variable = re.compile(r'#{(.*?)}', flags=re.DOTALL)
self.log.debug('Loaded %d parsers' % len(self.parsers))
@staticmethod
def add_parsers(directory):
parsers = []
for filepath in glob.iglob('%s/**.py' % directory):
module = import_module(filepath.replace('/', '.').replace('\\', '.').replace('.py', ''))
parsers.append(module.Parser())
return parsers
async def build_model(self):
for ability in await self.get_service('data_svc').locate('abilities'):
for executor in ability.executors:
if executor.command:
variables = frozenset(re.findall(self.re_variable, executor.test))
if len(variables) > 1: # relationships require at least 2 variables
self.model.add(variables)
self.model = set(self.model)
async def learn(self, facts, link, blob, operation=None):
decoded_blob = b64decode(blob).decode('utf-8')
found_facts = []
for parser in self.parsers:
try:
for fact in parser.parse(decoded_blob):
found_facts.append(fact)
except Exception as e:
self.log.error(e)
await update_scores(operation=None, increment=len(found_facts), used=facts, facts=link.facts)
await self._store_results(link, found_facts, operation)
async def _store_results(self, link, facts, operation=None):
facts_covered = []
for relationship in self.model:
matches = []
for fact in facts:
if fact.trait in relationship:
matches.append(fact)
facts_covered.append(fact)
for pair in itertools.combinations(matches, r=2):
if pair[0].trait != pair[1].trait:
await link.create_relationships([Relationship(source=pair[0], edge='has', target=pair[1])],
operation=operation)
for f in [x for x in facts if x not in facts_covered]:
await link.save_fact(operation=operation, fact=f, score=1, relationship=[])
|
en
| 0.841349
|
# relationships require at least 2 variables
| 2.142203
| 2
|
beluga/continuation/__init__.py
|
doublefloyd/beluga
| 20
|
6627114
|
"""
Module: continuation
"""
from beluga.continuation.continuation import (ContinuationList, ContinuationVariable, ManualStrategy,
ProductStrategy, BisectionStrategy, run_continuation_set)
from beluga.continuation.guess_generators import guess_generator, GuessGenerator, match_constants_to_states
|
"""
Module: continuation
"""
from beluga.continuation.continuation import (ContinuationList, ContinuationVariable, ManualStrategy,
ProductStrategy, BisectionStrategy, run_continuation_set)
from beluga.continuation.guess_generators import guess_generator, GuessGenerator, match_constants_to_states
|
fr
| 0.108381
|
Module: continuation
| 1.475788
| 1
|
describe.py
|
jeffh/describe
| 3
|
6627115
|
<gh_stars>1-10
#!/usr/bin/env python
import os
execfile(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'describe', 'main.py')
)
|
#!/usr/bin/env python
import os
execfile(
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'describe', 'main.py')
)
|
ru
| 0.26433
|
#!/usr/bin/env python
| 1.224636
| 1
|
BeiKeZuFangSpider/spiders/BeiKeSpider.py
|
sunhailin-Leo/BeiKeZuFangSpider
| 4
|
6627116
|
# -*- coding: UTF-8 -*-
"""
Created on 2018年9月17日
@author: Leo
"""
import re
import uuid
from collections import OrderedDict
# Scrapy
import scrapy
# 项目内部库
from BeiKeZuFangSpider.utils.city_util import CityInfoSpider
from BeiKeZuFangSpider.utils.common_utils import DictMatching, flatten
class BeiKeScrapySpider(scrapy.Spider):
name = "BeiKeErShouFang"
def __init__(self, city: str, area: str, metro: str):
"""
数据初始化
:param city: 城市名称
:param area: 区域名称
:param metro: 地铁线路名称
"""
self._area = area
self._metro = metro
# 启动的爬虫的URL
self.start_urls = []
# 城市
if city != "":
# 城市不能为空
self._c = CityInfoSpider(city=city)
self._city_info = self._c.get_city_data()
if self._city_info[0] != "Error":
# 存在当前城市的二手房数据
if self._area == "" and self._metro == "":
# 爬取当前城市的全量数据
self.start_urls[0] = self._city_info[0]
elif self._area != "" and self._metro == "":
dict_m = DictMatching(matching_dict=self._city_info[1], words=self._area, max_result=True)
self.start_urls.append(list(dict_m.dict_keys_matching()[0][1].values())[0])
elif self._area == "" and self._metro != "":
dict_m = DictMatching(matching_dict=self._city_info[2], words=self._metro, max_result=True)
self.start_urls.append(list(dict_m.dict_keys_matching()[0][2].values())[0])
else:
raise ValueError("城市名称和地铁线路名称不能同时存在!")
else:
raise ValueError(self._city_info[1])
else:
raise ValueError("城市名称不能为空!")
super(BeiKeScrapySpider).__init__()
def start_requests(self):
self.logger.info("开始爬取!当前网址为: {}".format(self.start_urls[0]))
yield scrapy.Request(url=self.start_urls[0],
callback=self.parse_page)
def parse_page(self, response):
self.logger.info("获取到当前页面源代码!正在解析获取总页数...")
# 总页数
total_page = response.xpath('//div[@class="content__pg"]/@data-totalpage').extract()[0]
self.logger.info("当前爬取页面类型总页数为: {} 页".format(total_page))
# 开始爬取第一页
yield scrapy.Request(url="{}pg{}/#contentList".format(self.start_urls[0], 1),
callback=self.parse,
meta={'tp': int(total_page), 'cp': 1})
def parse(self, response):
self.logger.info("获取到当前页面源代码!正在解析...")
# 房屋数据字典(为了兼容其他3.X版本使用了有序字典OrderDict)
house_data = OrderedDict()
# 房屋列表
house_list = response.xpath('//div[@class="content__list"]/div')
# 列表解析
for house in house_list:
# ID
house_data['_id'] = str(uuid.uuid4()).replace("-", "")
# 房屋所属区域
house_data['area'] = self._area
# 房屋详情页
house_data['house_url'] = \
"https://{}{}".format(response.meta['download_slot'], house.xpath('a[1]/@href').extract_first())
# 房屋封面图
house_data['cover_pic'] = house.xpath('a[2]/img/@data-src').extract_first()
# 房屋标题
house_data['title'] = house.xpath('string(div/p[1]/a)').extract_first().strip()
# 房屋基本信息
house_data['house_base_info'] = re.sub(r'\s+', '', house.xpath('string(div/p[2])').extract_first())
# 房屋来源
house_data['house_src'] = re.sub(r'\s+', '', house.xpath('string(div/p[3])').extract_first())
# 房屋发布时间
house_data['publish_date'] = house.xpath('string(div/p[4])').extract_first()
# 房屋特点
house_feature = house.xpath('string(div/p[5])').extract_first().split("\n")
house_data['house_feature'] = [d for d in [d.strip() for d in house_feature] if d != ""]
# 租房价格
house_price = house.xpath('string(div/span)').extract_first().split(" ")
house_price = ["{}-{}".format(d, d)
if len(d.split("-")) == 1 and i != 1
else d for i, d in enumerate(house_price)]
house_price = [d.split("-") if i != 1 else d for i, d in enumerate(house_price)]
house_price = flatten(nested_list=house_price)
# 弹出单位字段
house_unit = house_price.pop()
house_data['price'] = ["{}{}".format(d, house_unit) for d in house_price]
# 输出数据
yield house_data
self.logger.info("############################################################################################")
if response.meta['cp'] < response.meta['tp']:
next_page = response.meta['cp'] + 1
self.logger.info("开始下一页! 下一页为第 {} 页, 总共 {} 页!".format(next_page, response.meta['tp']))
yield scrapy.Request(url="{}pg{}/#contentList".format(self.start_urls[0], next_page),
callback=self.parse,
meta={'tp': response.meta['tp'], 'cp': next_page})
else:
self.logger.info("爬取结束!")
|
# -*- coding: UTF-8 -*-
"""
Created on 2018年9月17日
@author: Leo
"""
import re
import uuid
from collections import OrderedDict
# Scrapy
import scrapy
# 项目内部库
from BeiKeZuFangSpider.utils.city_util import CityInfoSpider
from BeiKeZuFangSpider.utils.common_utils import DictMatching, flatten
class BeiKeScrapySpider(scrapy.Spider):
name = "BeiKeErShouFang"
def __init__(self, city: str, area: str, metro: str):
"""
数据初始化
:param city: 城市名称
:param area: 区域名称
:param metro: 地铁线路名称
"""
self._area = area
self._metro = metro
# 启动的爬虫的URL
self.start_urls = []
# 城市
if city != "":
# 城市不能为空
self._c = CityInfoSpider(city=city)
self._city_info = self._c.get_city_data()
if self._city_info[0] != "Error":
# 存在当前城市的二手房数据
if self._area == "" and self._metro == "":
# 爬取当前城市的全量数据
self.start_urls[0] = self._city_info[0]
elif self._area != "" and self._metro == "":
dict_m = DictMatching(matching_dict=self._city_info[1], words=self._area, max_result=True)
self.start_urls.append(list(dict_m.dict_keys_matching()[0][1].values())[0])
elif self._area == "" and self._metro != "":
dict_m = DictMatching(matching_dict=self._city_info[2], words=self._metro, max_result=True)
self.start_urls.append(list(dict_m.dict_keys_matching()[0][2].values())[0])
else:
raise ValueError("城市名称和地铁线路名称不能同时存在!")
else:
raise ValueError(self._city_info[1])
else:
raise ValueError("城市名称不能为空!")
super(BeiKeScrapySpider).__init__()
def start_requests(self):
self.logger.info("开始爬取!当前网址为: {}".format(self.start_urls[0]))
yield scrapy.Request(url=self.start_urls[0],
callback=self.parse_page)
def parse_page(self, response):
self.logger.info("获取到当前页面源代码!正在解析获取总页数...")
# 总页数
total_page = response.xpath('//div[@class="content__pg"]/@data-totalpage').extract()[0]
self.logger.info("当前爬取页面类型总页数为: {} 页".format(total_page))
# 开始爬取第一页
yield scrapy.Request(url="{}pg{}/#contentList".format(self.start_urls[0], 1),
callback=self.parse,
meta={'tp': int(total_page), 'cp': 1})
def parse(self, response):
self.logger.info("获取到当前页面源代码!正在解析...")
# 房屋数据字典(为了兼容其他3.X版本使用了有序字典OrderDict)
house_data = OrderedDict()
# 房屋列表
house_list = response.xpath('//div[@class="content__list"]/div')
# 列表解析
for house in house_list:
# ID
house_data['_id'] = str(uuid.uuid4()).replace("-", "")
# 房屋所属区域
house_data['area'] = self._area
# 房屋详情页
house_data['house_url'] = \
"https://{}{}".format(response.meta['download_slot'], house.xpath('a[1]/@href').extract_first())
# 房屋封面图
house_data['cover_pic'] = house.xpath('a[2]/img/@data-src').extract_first()
# 房屋标题
house_data['title'] = house.xpath('string(div/p[1]/a)').extract_first().strip()
# 房屋基本信息
house_data['house_base_info'] = re.sub(r'\s+', '', house.xpath('string(div/p[2])').extract_first())
# 房屋来源
house_data['house_src'] = re.sub(r'\s+', '', house.xpath('string(div/p[3])').extract_first())
# 房屋发布时间
house_data['publish_date'] = house.xpath('string(div/p[4])').extract_first()
# 房屋特点
house_feature = house.xpath('string(div/p[5])').extract_first().split("\n")
house_data['house_feature'] = [d for d in [d.strip() for d in house_feature] if d != ""]
# 租房价格
house_price = house.xpath('string(div/span)').extract_first().split(" ")
house_price = ["{}-{}".format(d, d)
if len(d.split("-")) == 1 and i != 1
else d for i, d in enumerate(house_price)]
house_price = [d.split("-") if i != 1 else d for i, d in enumerate(house_price)]
house_price = flatten(nested_list=house_price)
# 弹出单位字段
house_unit = house_price.pop()
house_data['price'] = ["{}{}".format(d, house_unit) for d in house_price]
# 输出数据
yield house_data
self.logger.info("############################################################################################")
if response.meta['cp'] < response.meta['tp']:
next_page = response.meta['cp'] + 1
self.logger.info("开始下一页! 下一页为第 {} 页, 总共 {} 页!".format(next_page, response.meta['tp']))
yield scrapy.Request(url="{}pg{}/#contentList".format(self.start_urls[0], next_page),
callback=self.parse,
meta={'tp': response.meta['tp'], 'cp': next_page})
else:
self.logger.info("爬取结束!")
|
zh
| 0.860542
|
# -*- coding: UTF-8 -*- Created on 2018年9月17日 @author: Leo # Scrapy # 项目内部库 数据初始化 :param city: 城市名称 :param area: 区域名称 :param metro: 地铁线路名称 # 启动的爬虫的URL # 城市 # 城市不能为空 # 存在当前城市的二手房数据 # 爬取当前城市的全量数据 # 总页数 # 开始爬取第一页 #contentList".format(self.start_urls[0], 1), # 房屋数据字典(为了兼容其他3.X版本使用了有序字典OrderDict) # 房屋列表 # 列表解析 # ID # 房屋所属区域 # 房屋详情页 # 房屋封面图 # 房屋标题 # 房屋基本信息 # 房屋来源 # 房屋发布时间 # 房屋特点 # 租房价格 # 弹出单位字段 # 输出数据 ###########################################################################################") #contentList".format(self.start_urls[0], next_page),
| 2.519023
| 3
|
test/trainer_class_test.py
|
SamiIbishi/applied-machine-learning
| 7
|
6627117
|
import torch
import torchvision
from src.utils import utils_tensorboard
from src.data_loader.FaceRecognitionDataset import FaceRecognitionDataset
from src.data_loader import DataSplitter
from src.model.FaceNet import FaceNet
from src.trainer.FaceNetTrainer import FaceNetTrainer
import time
import src.utils.utils_images as img_util
batch_size = 16
if __name__ == '__main__':
to_pil_image = torchvision.transforms.ToPILImage()
dataset = FaceRecognitionDataset(dataset_dir="../src/data/celeba_dataset/images/")
print("Created dataset, len:", len(dataset))
train_dataset, val_dataset = DataSplitter.split_train_test(dataset=dataset, val_ratio=0.1)
print("Splitted dataset")
model = FaceNet()
print("Created model")
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=True, sampler=None,
collate_fn=None)
print("Created train loader")
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=False, sampler=None,
collate_fn=None)
print("Created val_loader")
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
print("Created optimizer")
tensorboard_writer = utils_tensorboard.MySummaryWriter(
numb_batches=len(train_loader), batch_size=batch_size, experiment_name="FaceNet")
print("Created tensorboard_writer")
trainer = FaceNetTrainer(model=model, train_loader=train_loader,
valid_loader=val_loader, test_loader=val_loader,
optimizer=optimizer, tensorboard_writer=tensorboard_writer,
device="cuda")
print("Created trainer")
dataiter = iter(train_loader)
images, labels = dataiter.next()
anchors = images[0]
positives = images[1]
negatives = images[2]
# write graph of model to tensorboard
# tensorboard_writer.add_graph(model, images)
# write sample images to tensorboard
# Deleting image variables to free RAM
anchors_grid = None
positives_grid = None
negatives_grid = None
total_grid = None
fig = None
print("start training")
trainer.train(epochs=5)
|
import torch
import torchvision
from src.utils import utils_tensorboard
from src.data_loader.FaceRecognitionDataset import FaceRecognitionDataset
from src.data_loader import DataSplitter
from src.model.FaceNet import FaceNet
from src.trainer.FaceNetTrainer import FaceNetTrainer
import time
import src.utils.utils_images as img_util
batch_size = 16
if __name__ == '__main__':
to_pil_image = torchvision.transforms.ToPILImage()
dataset = FaceRecognitionDataset(dataset_dir="../src/data/celeba_dataset/images/")
print("Created dataset, len:", len(dataset))
train_dataset, val_dataset = DataSplitter.split_train_test(dataset=dataset, val_ratio=0.1)
print("Splitted dataset")
model = FaceNet()
print("Created model")
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=True, sampler=None,
collate_fn=None)
print("Created train loader")
val_loader = torch.utils.data.DataLoader(dataset=val_dataset,
batch_size=batch_size,
num_workers=4,
shuffle=False, sampler=None,
collate_fn=None)
print("Created val_loader")
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
print("Created optimizer")
tensorboard_writer = utils_tensorboard.MySummaryWriter(
numb_batches=len(train_loader), batch_size=batch_size, experiment_name="FaceNet")
print("Created tensorboard_writer")
trainer = FaceNetTrainer(model=model, train_loader=train_loader,
valid_loader=val_loader, test_loader=val_loader,
optimizer=optimizer, tensorboard_writer=tensorboard_writer,
device="cuda")
print("Created trainer")
dataiter = iter(train_loader)
images, labels = dataiter.next()
anchors = images[0]
positives = images[1]
negatives = images[2]
# write graph of model to tensorboard
# tensorboard_writer.add_graph(model, images)
# write sample images to tensorboard
# Deleting image variables to free RAM
anchors_grid = None
positives_grid = None
negatives_grid = None
total_grid = None
fig = None
print("start training")
trainer.train(epochs=5)
|
en
| 0.829459
|
# write graph of model to tensorboard # tensorboard_writer.add_graph(model, images) # write sample images to tensorboard # Deleting image variables to free RAM
| 2.202111
| 2
|
Python/0191_number_of_1_bits.py
|
codingyen/CodeAlone
| 2
|
6627118
|
<filename>Python/0191_number_of_1_bits.py
# Try to use bit manipulation.
class Solution:
def hammingWeight(self, n):
b = bin(n)[2:]
counter = 0
for i in b:
if i == '1':
counter += 1
return counter
if __name__ == "__main__":
n = 11
s = Solution()
print(s.hammingWeight(n))
|
<filename>Python/0191_number_of_1_bits.py
# Try to use bit manipulation.
class Solution:
def hammingWeight(self, n):
b = bin(n)[2:]
counter = 0
for i in b:
if i == '1':
counter += 1
return counter
if __name__ == "__main__":
n = 11
s = Solution()
print(s.hammingWeight(n))
|
en
| 0.848346
|
# Try to use bit manipulation.
| 3.384423
| 3
|
slope/gaussian_hill_grad.py
|
UP-RS-ESP/GEW-DAP04-WS201819
| 2
|
6627119
|
import numpy as np
from matplotlib import pyplot as pl
shape = (14, 20)
width = 0.15
xstart = -1.5
ystart = -1.1
xb = np.arange(xstart, xstart+(shape[1]+1) * width, width)
yb = np.arange(ystart, ystart+(shape[0]+1) * width, width)
xc = xb[:-1] + width/2
yc = yb[:-1] + width/2
X, Y = np.meshgrid(xc, yc)
dem = np.exp(-Y*Y-X*X)
dR = -2 * np.sqrt(X*X+Y*Y) * np.exp(-Y*Y-X*X)
dx = np.nan * np.ones(shape)
dy = np.nan * np.ones(shape)
dx[:, :-1] = (dem[:, 1:] - dem[:, :-1]) / width
dy[:-1, :] = (dem[1:, :] - dem[:-1, :]) / width
dx = np.ma.masked_invalid(dx)
dy = np.ma.masked_invalid(dy)
gr = np.sqrt(dx*dx+dy*dy)
fg, ax = pl.subplots(ncols = 2)
im = ax[0].pcolormesh(xb, yb, gr)
cb = fg.colorbar(im, ax = ax[0],
orientation = 'horizontal')
cb.set_label('Gradient')
ax[0].set_aspect('equal')
im = ax[1].pcolormesh(xb, yb, dR, cmap = pl.cm.viridis_r)
cb = fg.colorbar(im, ax = ax[1],
orientation = 'horizontal')
cb.set_label('dz / dr')
ax[1].set_aspect('equal')
pl.show()
|
import numpy as np
from matplotlib import pyplot as pl
shape = (14, 20)
width = 0.15
xstart = -1.5
ystart = -1.1
xb = np.arange(xstart, xstart+(shape[1]+1) * width, width)
yb = np.arange(ystart, ystart+(shape[0]+1) * width, width)
xc = xb[:-1] + width/2
yc = yb[:-1] + width/2
X, Y = np.meshgrid(xc, yc)
dem = np.exp(-Y*Y-X*X)
dR = -2 * np.sqrt(X*X+Y*Y) * np.exp(-Y*Y-X*X)
dx = np.nan * np.ones(shape)
dy = np.nan * np.ones(shape)
dx[:, :-1] = (dem[:, 1:] - dem[:, :-1]) / width
dy[:-1, :] = (dem[1:, :] - dem[:-1, :]) / width
dx = np.ma.masked_invalid(dx)
dy = np.ma.masked_invalid(dy)
gr = np.sqrt(dx*dx+dy*dy)
fg, ax = pl.subplots(ncols = 2)
im = ax[0].pcolormesh(xb, yb, gr)
cb = fg.colorbar(im, ax = ax[0],
orientation = 'horizontal')
cb.set_label('Gradient')
ax[0].set_aspect('equal')
im = ax[1].pcolormesh(xb, yb, dR, cmap = pl.cm.viridis_r)
cb = fg.colorbar(im, ax = ax[1],
orientation = 'horizontal')
cb.set_label('dz / dr')
ax[1].set_aspect('equal')
pl.show()
|
none
| 1
| 2.260411
| 2
|
|
imagespace/server/imageprefix_rest.py
|
amirhosf/image_space
| 0
|
6627120
|
<filename>imagespace/server/imageprefix_rest.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
from .settings import ImageSpaceSetting
class ImagePrefix(Resource):
def __init__(self):
self.resourceName = 'imageprefix'
self.route('GET', (), self.getImagePrefix)
@access.public
def getImagePrefix(self, params):
setting = ImageSpaceSetting()
return {
'prefix': setting.get('IMAGE_SPACE_PREFIX'),
'solrPrefix': setting.get('IMAGE_SPACE_SOLR_PREFIX'),
'stolenCameraPrefix': setting.get('IMAGE_SPACE_STOLEN_CAMERA') or 'http://www.stolencamerafinder.com/search',
'facetviewAdsUrl': setting.get('IMAGE_SPACE_FACETVIEW_ADS_URL'),
'localBasicAuth': setting.get('IMAGE_SPACE_LOCAL_BASIC_AUTH'),
'defaultSimilaritySearch': setting.get('IMAGE_SPACE_DEFAULT_SIMILARITY_SEARCH')
}
getImagePrefix.description = Description('Returns image URL prefix')
|
<filename>imagespace/server/imageprefix_rest.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
from girder.api import access
from girder.api.describe import Description
from girder.api.rest import Resource
from .settings import ImageSpaceSetting
class ImagePrefix(Resource):
def __init__(self):
self.resourceName = 'imageprefix'
self.route('GET', (), self.getImagePrefix)
@access.public
def getImagePrefix(self, params):
setting = ImageSpaceSetting()
return {
'prefix': setting.get('IMAGE_SPACE_PREFIX'),
'solrPrefix': setting.get('IMAGE_SPACE_SOLR_PREFIX'),
'stolenCameraPrefix': setting.get('IMAGE_SPACE_STOLEN_CAMERA') or 'http://www.stolencamerafinder.com/search',
'facetviewAdsUrl': setting.get('IMAGE_SPACE_FACETVIEW_ADS_URL'),
'localBasicAuth': setting.get('IMAGE_SPACE_LOCAL_BASIC_AUTH'),
'defaultSimilaritySearch': setting.get('IMAGE_SPACE_DEFAULT_SIMILARITY_SEARCH')
}
getImagePrefix.description = Description('Returns image URL prefix')
|
en
| 0.565926
|
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ###############################################################################
| 2.019131
| 2
|
galaxy/api/views/views.py
|
bmclaughlin/galaxy
| 904
|
6627121
|
<gh_stars>100-1000
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import logging
from collections import OrderedDict
from allauth.socialaccount.models import SocialToken
from django.conf import settings
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.urls import reverse
from django.db.models import Count, Max
from django.http import Http404, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
# TODO move all github interactions to githubapi
# Github
from github import Github
from github.GithubException import GithubException
# rest framework stuff
from rest_framework import status
from rest_framework.authentication import (
TokenAuthentication, SessionAuthentication
)
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework import filters as drf_filters
from galaxy import constants
from galaxy.accounts.models import CustomUser as User
from galaxy.api.permissions import ModelAccessPermission
from galaxy.api import filters as galaxy_filters
from galaxy.api import serializers
from galaxy.api import tasks
from galaxy.api.views import base_views
from galaxy.main.celerytasks import tasks as celerytasks
from galaxy.main import models
from galaxy.common import version, sanitize_content_name
logger = logging.getLogger(__name__)
__all__ = [
'ActiveProviderDetail',
'ActiveProviderList',
'ApiRootView',
'ApiV1ReposView',
'ApiV1RootView',
'CloudPlatformDetail',
'CloudPlatformList',
'ImportTaskDetail',
'ImportTaskLatestList',
'ImportTaskList',
'ImportTaskNotificationList',
'PlatformDetail',
'PlatformList',
'ProviderRootView',
'RefreshUserRepos',
'RemoveRole',
'RoleDependenciesList',
'RoleDownloads',
'RoleImportTaskList',
'RoleImportTaskList',
'RoleTypes',
'RoleUsersList',
'RoleVersionList',
'SubscriptionDetail',
'SubscriptionList',
'TagDetail',
'TagList',
'TopContributorsList',
]
# -----------------------------------------------------------------------------
# Helper functions
def filter_user_queryset(qs):
return qs.filter(is_active=True)
def filter_role_queryset(qs):
return qs.filter(active=True, is_valid=True)
def filter_rating_queryset(qs):
return qs.filter(
active=True,
role__active=True,
role__is_valid=True,
owner__is_active=True,
)
# -----------------------------------------------------------------------------
class ApiRootView(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'REST API'
def get(self, request, format=None):
# list supported API versions
data = dict(
description='GALAXY REST API',
current_version='v1',
available_versions=dict(
v1="v1/",
v2="v2/",
),
server_version=version.get_package_version('galaxy'),
version_name=version.get_version_name(),
team_members=version.get_team_members(),
)
return Response(data)
class ApiV1RootView(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'Version 1'
def get(self, request, format=None):
# list top level resources
data = OrderedDict()
data['cloud_platforms'] = reverse('api:cloud_platform_list')
data['content'] = reverse('api:content_list')
data['content_blocks'] = reverse('api:content_block_list')
data['content_types'] = reverse('api:content_type_list')
data['imports'] = reverse('api:import_task_list')
data['latest_imports'] = reverse('api:import_task_latest_list')
data['me'] = reverse('api:active_user_view')
data['namespaces'] = reverse('api:namespace_list')
data['notifications'] = reverse('api:notification_list')
data['platforms'] = reverse('api:platform_list')
data['provider_namespaces'] = reverse('api:provider_namespace_list')
data['providers'] = reverse('api:provider_root_view')
data['repositories'] = reverse('api:repository_list')
data['role_types'] = reverse('api:role_types')
data['roles'] = reverse('api:role_list')
data['search'] = reverse('api:search_view')
data['tags'] = reverse('api:tag_list')
data['users'] = reverse('api:user_list')
data['emails'] = reverse('api:email_list')
return Response(data)
class ProviderRootView(base_views.APIView):
""" Provider resources """
permission_classes = (AllowAny,)
def get(self, request, format=None):
data = OrderedDict()
data['active'] = reverse('api:active_provider_list')
data['sources'] = reverse('api:provider_source_list')
return Response(data)
class ActiveProviderList(base_views.ListAPIView):
""" Active providers """
model = models.Provider
serializer_class = serializers.ProviderSerializer
permission_classes = (AllowAny,)
def get_queryset(self):
return self.model.objects.filter(active=True)
class ActiveProviderDetail(base_views.RetrieveAPIView):
""" Active providers """
model = models.Provider
serializer_class = serializers.ProviderSerializer
permission_classes = (AllowAny,)
def get_queryset(self):
return self.model.objects.filter(active=True)
class RoleTypes(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'Role Types'
def get(self, request, format=None):
roles = [role for role in constants.RoleType.choices()
if role[0] in settings.ROLE_TYPES_ENABLED]
return Response(roles)
class ApiV1ReposView(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'Repos'
def get(self, request, *args, **kwargs):
data = OrderedDict()
data['list'] = reverse('api:repository_list')
data['refresh'] = reverse('api:refresh_user_repos')
data['subscriptions'] = reverse('api:subscription_list')
return Response(data)
class TagList(base_views.ListAPIView):
model = models.Tag
serializer_class = serializers.TagSerializer
def get_queryset(self):
return self.model.objects.filter(active=True)
class TagDetail(base_views.RetrieveAPIView):
model = models.Tag
serializer_class = serializers.TagSerializer
class PlatformList(base_views.ListAPIView):
model = models.Platform
serializer_class = serializers.PlatformSerializer
paginate_by = None
class PlatformDetail(base_views.RetrieveAPIView):
model = models.Platform
serializer_class = serializers.PlatformSerializer
class CloudPlatformList(base_views.ListAPIView):
model = models.CloudPlatform
serializer_class = serializers.CloudPlatformSerializer
paginate_by = None
class CloudPlatformDetail(base_views.RetrieveAPIView):
model = models.CloudPlatform
serializer_class = serializers.CloudPlatformSerializer
class RoleDependenciesList(base_views.SubListAPIView):
model = models.Content
serializer_class = serializers.RoleDetailSerializer
parent_model = models.Content
relationship = 'dependencies'
def get_queryset(self):
qs = super().get_queryset()
return filter_role_queryset(qs)
class RoleUsersList(base_views.SubListAPIView):
model = User
serializer_class = serializers.UserSerializer
parent_model = models.Content
relationship = 'created_by'
def get_queryset(self):
qs = super().get_queryset()
return filter_user_queryset(qs)
class RoleImportTaskList(base_views.ListAPIView):
model = models.ImportTask
serializer_class = serializers.ImportTaskSerializer
def list(self, request, *args, **kwargs):
id = kwargs.pop('pk')
try:
content = models.Content.objects.get(pk=id)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
qs = content.repository.import_tasks.select_related(
'owner',
'repository',
'repository__provider_namespace',
'repository__provider_namespace__provider',
'repository__provider_namespace__namespace',
).all()
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class RoleDownloads(base_views.APIView):
def post(self, request, pk):
obj = get_object_or_404(models.Content, pk=pk)
obj.download_count += 1
obj.save()
return Response(status=status.HTTP_201_CREATED)
class RoleVersionList(base_views.ListAPIView):
model = models.RepositoryVersion
serializer_class = serializers.RoleVersionSerializer
def list(self, request, *args, **kwargs):
id = kwargs.pop('pk')
try:
content = models.Content.objects.get(pk=id)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
qs = content.repository.versions.all()
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class ImportTaskList(base_views.ListCreateAPIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (ModelAccessPermission,)
model = models.ImportTask
serializer_class = serializers.ImportTaskSerializer
filter_backends = (
galaxy_filters.ActiveOnlyBackend,
galaxy_filters._FieldLookupBackend,
drf_filters.SearchFilter,
galaxy_filters.OrderByBackend,
)
def get_queryset(self):
qs = models.ImportTask.objects.select_related(
'owner',
'repository',
'repository__provider_namespace',
'repository__provider_namespace__provider',
'repository__provider_namespace__namespace',
)
return qs
def get_serializer_class(self):
# NOTE(cutwater): This is for compatibility with ansible-galaxy client.
if 'id' in self.request.GET:
return serializers.ImportTaskDetailSerializer
return super().get_serializer_class()
def list(self, request, *args, **kwargs):
github_user = request.GET.get('github_user')
github_repo = request.GET.get('github_repo')
qs = self.get_queryset()
if github_user and github_repo:
# Support ansible-galaxy <= 2.6
qs = qs.filter(
repository__provider_namespace__name=github_user,
repository__original_name=github_repo)
else:
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
github_user = request.data.get('github_user')
github_repo = request.data.get('github_repo')
github_reference = request.data.get('github_reference', '')
repository_id = request.data.get('repository_id')
if not repository_id:
# request received from old client
if not github_user or not github_repo:
raise ValidationError({
'detail': "Invalid request. "
"Expecting github_user and github_repo."
})
namespace = models.ProviderNamespace.objects.get(
provider__name=constants.PROVIDER_GITHUB,
name=github_user
)
if not request.user.is_staff and \
not namespace.namespace.owners.filter(
username=request.user.get_username()):
# User is not an onwer of the Namespace
raise PermissionDenied(
"You are not an owner of {0}"
.format(namespace.namespace.name)
)
try:
repository = models.Repository.objects.get(
provider_namespace=namespace,
original_name=github_repo
)
except ObjectDoesNotExist:
repository, created = models.Repository.objects.get_or_create(
provider_namespace=namespace,
name=sanitize_content_name(github_repo),
defaults={
'is_enabled': False,
'original_name': github_repo,
'is_new': True
}
)
else:
try:
repository = models.Repository.objects.get(pk=repository_id)
except ObjectDoesNotExist:
raise ValidationError({
'detail': "Repository {0} not found, or you do not "
"have access".format(repository_id)
})
if not request.user.is_staff and \
not repository.provider_namespace.namespace.owners.filter(
username=request.user.get_username()):
# User is not an onwer of the Namespace
raise PermissionDenied(
"You are not an owner of {0}".format(repository.name)
)
task = tasks.create_import_task(
repository, request.user,
import_branch=github_reference, user_initiated=True)
serializer = self.get_serializer(instance=task)
response = {'results': [serializer.data]}
return Response(response,
status=status.HTTP_201_CREATED,
headers=self.get_success_headers(response))
class ImportTaskDetail(base_views.RetrieveAPIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (ModelAccessPermission,)
model = models.ImportTask
serializer_class = serializers.ImportTaskDetailSerializer
def get_object(self, qs=None):
obj = super().get_object()
if not obj.active:
raise Http404()
return obj
class ImportTaskNotificationList(base_views.SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.ImportTask
relationship = 'notifications'
class ImportTaskLatestList(base_views.ListAPIView):
"""Return the most recent import for each of the user's repositories."""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
model = models.ImportTask
serializer_class = serializers.ImportTaskLatestSerializer
def list(self, request, *args, **kwargs):
qs = models.ImportTask.objects.filter(
repository__provider_namespace__namespace__isnull=False
).values(
'repository__provider_namespace__namespace__name',
'repository__name',
'repository__id',
).order_by(
'repository__provider_namespace__namespace__name',
'repository__name'
).annotate(last_id=Max('id'))
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class SubscriptionList(base_views.ListCreateAPIView):
model = models.Subscription
serializer_class = serializers.SubscriptionSerializer
def post(self, request, *args, **kwargs):
github_user = request.data.get('github_user', None)
github_repo = request.data.get('github_repo', None)
if not github_user or not github_repo:
raise ValidationError({
'detail': "Invalid request. "
"Missing one or more required values."
})
try:
token = SocialToken.objects.get(
account__user=request.user,
account__provider='github'
)
except Exception:
msg = (
"Failed to connect to GitHub account for Galaxy user {}. "
"You must first authenticate with Github."
.format(request.user.username)
)
raise ValidationError(dict(detail=msg))
gh_api = Github(token.token)
try:
gh_repo = gh_api.get_repo(github_user + '/' + github_repo)
except GithubException as e:
msg = (
"GitHub API failed to return repo for {}/{}. {} - {}"
.format(github_user, github_repo, e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user = gh_api.get_user()
except GithubException as e:
msg = (
"GitHub API failed to return authorized user. {} - {}"
.format(e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user.add_to_subscriptions(gh_repo)
except GithubException:
msg = (
"GitHub API failed to subscribe user {} to for {}/{}"
.format(request.user.username, github_user, github_repo)
)
raise ValidationError(dict(detail=msg))
new_sub, created = models.Subscription.objects.get_or_create(
owner=request.user,
github_user=github_user,
github_repo=github_repo,
defaults={
'owner': request.user,
'github_user': github_user,
'github_repo': github_repo
})
sub_count = 0
for s in gh_repo.get_subscribers():
sub_count += 1 # only way to get subscriber count via pygithub
repo = models.Repository.objects.get(github_user=github_user,
github_repo=github_repo)
repo.watchers_count = sub_count
repo.save()
return Response(dict(
result=dict(
id=new_sub.id,
github_user=new_sub.github_user,
github_repo=new_sub.github_repo,
watchers_count=sub_count
)
), status=status.HTTP_201_CREATED)
class SubscriptionDetail(base_views.RetrieveUpdateDestroyAPIView):
model = models.Subscription
serializer_class = serializers.SubscriptionSerializer
def destroy(self, request, *args, **kwargs):
obj = super().get_object()
try:
token = SocialToken.objects.get(
account__user=request.user, account__provider='github'
)
except Exception:
msg = (
"Failed to access GitHub account for Galaxy user {}. "
"You must first authenticate with GitHub."
.format(request.user.username)
)
raise ValidationError(dict(detail=msg))
gh_api = Github(token.token)
try:
gh_repo = gh_api.get_repo(obj.github_user + '/' + obj.github_repo)
except GithubException as e:
msg = (
"GitHub API failed to return repo for {}/{}. {} - {}"
.format(obj.github_user, obj.github_repo, e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user = gh_api.get_user()
except GithubException as e:
msg = (
"GitHub API failed to return authorized user. {} - {}"
.format(e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user.remove_from_subscriptions(gh_repo)
except GithubException as e:
msg = (
"GitHub API failed to unsubscribe {} from {}/{}. {} - {}"
.format(request.user.username, obj.github_user,
obj.github_repo, e.data, e.status)
)
raise ValidationError(dict(detail=msg))
obj.delete()
sub_count = 0
for sub in gh_repo.get_subscribers():
sub_count += 1 # only way to get subscriber count via pygithub
repo = models.Repository.objects.get(github_user=obj.github_user,
github_repo=obj.github_repo)
repo.watchers_count = sub_count
repo.save()
result = (
"unsubscribed {} from {}/{}."
.format(request.user.username, obj.github_user, obj.github_repo)
)
return Response(dict(detail=result), status=status.HTTP_202_ACCEPTED)
class TopContributorsList(base_views.ListAPIView):
model = models.Content
serializer_class = serializers.TopContributorsSerializer
def list(self, request, *args, **kwargs):
qs = (models.Content.objects.values('namespace')
.annotate(count=Count('id'))
.order_by('-count', 'namespace'))
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class RemoveRole(base_views.APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
def delete(self, request, *args, **kwargs):
gh_user = request.query_params.get('github_user', None)
gh_repo = request.query_params.get('github_repo', None)
if not gh_user or not gh_repo:
raise ValidationError(dict(detail="Invalid request."))
if not request.user.is_staff:
# Verify via GitHub API that user has access to requested role
try:
token = SocialToken.objects.get(
account__user=request.user, account__provider='github'
)
except Exception:
msg = (
"Failed to get Github account for Galaxy user {}. "
"You must first authenticate with Github."
.format(request.user.username)
)
raise ValidationError({'detail': msg})
gh_api = Github(token.token)
try:
ghu = gh_api.get_user()
except Exception:
raise ValidationError(
{'detail': "Failed to get Github authorized user."}
)
allowed = False
repo_full_name = "{}/{}".format(gh_user, gh_repo)
for r in ghu.get_repos():
if r.full_name == repo_full_name:
allowed = True
continue
if not allowed:
msg = (
"Galaxy user {0} does not have access to repo {1}"
.format(request.user.username, repo_full_name)
)
raise ValidationError(dict(detail=msg))
# User has access. Delete requested role and associated bits.
response = OrderedDict([
('deleted_roles', []),
('status', '')
])
roles = models.Content.objects.filter(
repository__provider_namespace__name=gh_user,
repository__original_name=gh_repo)
cnt = len(roles)
if cnt == 0:
response['status'] = (
"Role {}.{} not found. Maybe it was deleted previously?"
.format(gh_user, gh_repo)
)
return Response(response)
elif cnt == 1:
response['status'] = "Role {}.{} deleted".format(gh_user, gh_repo)
else:
response['status'] = (
"Deleted {:d} roles associated with {}/{}"
.format(len(roles), gh_user, gh_repo)
)
for role in roles:
response['deleted_roles'].append({
"id": role.id,
"namespace": role.namespace.name,
"name": role.name,
"github_user": role.github_user,
"github_repo": role.github_repo
})
repo = models.Repository.objects.get(
provider_namespace__name=gh_user,
original_name=gh_repo)
models.Notification.objects.filter(repository=repo).delete()
models.Content.objects.filter(repository=repo).delete()
models.ImportTask.objects.filter(repository=repo).delete()
repo.delete()
return Response(response)
class RefreshUserRepos(base_views.APIView):
"""
Return user GitHub repos directly from GitHub.
Use to refresh cache for the authenticated user.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
# Return a the list of user's repositories directly from GitHub
try:
token = SocialToken.objects.get(
account__user=request.user, account__provider='github'
)
except Exception:
msg = (
"Failed to connect to GitHub account for Galaxy user {} "
"You must first authenticate with GitHub."
.format(request.user.username)
)
logger.error(msg)
return HttpResponseBadRequest({'detail': msg})
gh_api = Github(token.token)
try:
ghu = gh_api.get_user()
except Exception:
msg = "Failed to get GitHub authorized user."
logger.error(msg)
return HttpResponseBadRequest({'detail': msg})
try:
user_repos = ghu.get_repos()
except Exception:
msg = "Failed to get user repositories from GitHub."
logger.error(msg)
return HttpResponseBadRequest({'detail': msg})
try:
celerytasks.refresh_existing_user_repos(token.token, ghu)
except Exception as exc:
logger.error("Error: refresh_user_repos - {0}".format(exc))
raise
try:
celerytasks.update_user_repos(user_repos, request.user)
except Exception as exc:
logger.error("Error: update_user_repos - {0}".format(exc))
raise
qs = request.user.repositories.all()
serializer = serializers.RepositorySerializer(qs, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
# (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
import logging
from collections import OrderedDict
from allauth.socialaccount.models import SocialToken
from django.conf import settings
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.urls import reverse
from django.db.models import Count, Max
from django.http import Http404, HttpResponseBadRequest
from django.shortcuts import get_object_or_404
# TODO move all github interactions to githubapi
# Github
from github import Github
from github.GithubException import GithubException
# rest framework stuff
from rest_framework import status
from rest_framework.authentication import (
TokenAuthentication, SessionAuthentication
)
from rest_framework.exceptions import ValidationError
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework import filters as drf_filters
from galaxy import constants
from galaxy.accounts.models import CustomUser as User
from galaxy.api.permissions import ModelAccessPermission
from galaxy.api import filters as galaxy_filters
from galaxy.api import serializers
from galaxy.api import tasks
from galaxy.api.views import base_views
from galaxy.main.celerytasks import tasks as celerytasks
from galaxy.main import models
from galaxy.common import version, sanitize_content_name
logger = logging.getLogger(__name__)
__all__ = [
'ActiveProviderDetail',
'ActiveProviderList',
'ApiRootView',
'ApiV1ReposView',
'ApiV1RootView',
'CloudPlatformDetail',
'CloudPlatformList',
'ImportTaskDetail',
'ImportTaskLatestList',
'ImportTaskList',
'ImportTaskNotificationList',
'PlatformDetail',
'PlatformList',
'ProviderRootView',
'RefreshUserRepos',
'RemoveRole',
'RoleDependenciesList',
'RoleDownloads',
'RoleImportTaskList',
'RoleImportTaskList',
'RoleTypes',
'RoleUsersList',
'RoleVersionList',
'SubscriptionDetail',
'SubscriptionList',
'TagDetail',
'TagList',
'TopContributorsList',
]
# -----------------------------------------------------------------------------
# Helper functions
def filter_user_queryset(qs):
return qs.filter(is_active=True)
def filter_role_queryset(qs):
return qs.filter(active=True, is_valid=True)
def filter_rating_queryset(qs):
return qs.filter(
active=True,
role__active=True,
role__is_valid=True,
owner__is_active=True,
)
# -----------------------------------------------------------------------------
class ApiRootView(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'REST API'
def get(self, request, format=None):
# list supported API versions
data = dict(
description='GALAXY REST API',
current_version='v1',
available_versions=dict(
v1="v1/",
v2="v2/",
),
server_version=version.get_package_version('galaxy'),
version_name=version.get_version_name(),
team_members=version.get_team_members(),
)
return Response(data)
class ApiV1RootView(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'Version 1'
def get(self, request, format=None):
# list top level resources
data = OrderedDict()
data['cloud_platforms'] = reverse('api:cloud_platform_list')
data['content'] = reverse('api:content_list')
data['content_blocks'] = reverse('api:content_block_list')
data['content_types'] = reverse('api:content_type_list')
data['imports'] = reverse('api:import_task_list')
data['latest_imports'] = reverse('api:import_task_latest_list')
data['me'] = reverse('api:active_user_view')
data['namespaces'] = reverse('api:namespace_list')
data['notifications'] = reverse('api:notification_list')
data['platforms'] = reverse('api:platform_list')
data['provider_namespaces'] = reverse('api:provider_namespace_list')
data['providers'] = reverse('api:provider_root_view')
data['repositories'] = reverse('api:repository_list')
data['role_types'] = reverse('api:role_types')
data['roles'] = reverse('api:role_list')
data['search'] = reverse('api:search_view')
data['tags'] = reverse('api:tag_list')
data['users'] = reverse('api:user_list')
data['emails'] = reverse('api:email_list')
return Response(data)
class ProviderRootView(base_views.APIView):
""" Provider resources """
permission_classes = (AllowAny,)
def get(self, request, format=None):
data = OrderedDict()
data['active'] = reverse('api:active_provider_list')
data['sources'] = reverse('api:provider_source_list')
return Response(data)
class ActiveProviderList(base_views.ListAPIView):
""" Active providers """
model = models.Provider
serializer_class = serializers.ProviderSerializer
permission_classes = (AllowAny,)
def get_queryset(self):
return self.model.objects.filter(active=True)
class ActiveProviderDetail(base_views.RetrieveAPIView):
""" Active providers """
model = models.Provider
serializer_class = serializers.ProviderSerializer
permission_classes = (AllowAny,)
def get_queryset(self):
return self.model.objects.filter(active=True)
class RoleTypes(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'Role Types'
def get(self, request, format=None):
roles = [role for role in constants.RoleType.choices()
if role[0] in settings.ROLE_TYPES_ENABLED]
return Response(roles)
class ApiV1ReposView(base_views.APIView):
permission_classes = (AllowAny,)
view_name = 'Repos'
def get(self, request, *args, **kwargs):
data = OrderedDict()
data['list'] = reverse('api:repository_list')
data['refresh'] = reverse('api:refresh_user_repos')
data['subscriptions'] = reverse('api:subscription_list')
return Response(data)
class TagList(base_views.ListAPIView):
model = models.Tag
serializer_class = serializers.TagSerializer
def get_queryset(self):
return self.model.objects.filter(active=True)
class TagDetail(base_views.RetrieveAPIView):
model = models.Tag
serializer_class = serializers.TagSerializer
class PlatformList(base_views.ListAPIView):
model = models.Platform
serializer_class = serializers.PlatformSerializer
paginate_by = None
class PlatformDetail(base_views.RetrieveAPIView):
model = models.Platform
serializer_class = serializers.PlatformSerializer
class CloudPlatformList(base_views.ListAPIView):
model = models.CloudPlatform
serializer_class = serializers.CloudPlatformSerializer
paginate_by = None
class CloudPlatformDetail(base_views.RetrieveAPIView):
model = models.CloudPlatform
serializer_class = serializers.CloudPlatformSerializer
class RoleDependenciesList(base_views.SubListAPIView):
model = models.Content
serializer_class = serializers.RoleDetailSerializer
parent_model = models.Content
relationship = 'dependencies'
def get_queryset(self):
qs = super().get_queryset()
return filter_role_queryset(qs)
class RoleUsersList(base_views.SubListAPIView):
model = User
serializer_class = serializers.UserSerializer
parent_model = models.Content
relationship = 'created_by'
def get_queryset(self):
qs = super().get_queryset()
return filter_user_queryset(qs)
class RoleImportTaskList(base_views.ListAPIView):
model = models.ImportTask
serializer_class = serializers.ImportTaskSerializer
def list(self, request, *args, **kwargs):
id = kwargs.pop('pk')
try:
content = models.Content.objects.get(pk=id)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
qs = content.repository.import_tasks.select_related(
'owner',
'repository',
'repository__provider_namespace',
'repository__provider_namespace__provider',
'repository__provider_namespace__namespace',
).all()
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class RoleDownloads(base_views.APIView):
def post(self, request, pk):
obj = get_object_or_404(models.Content, pk=pk)
obj.download_count += 1
obj.save()
return Response(status=status.HTTP_201_CREATED)
class RoleVersionList(base_views.ListAPIView):
model = models.RepositoryVersion
serializer_class = serializers.RoleVersionSerializer
def list(self, request, *args, **kwargs):
id = kwargs.pop('pk')
try:
content = models.Content.objects.get(pk=id)
except ObjectDoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
qs = content.repository.versions.all()
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class ImportTaskList(base_views.ListCreateAPIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (ModelAccessPermission,)
model = models.ImportTask
serializer_class = serializers.ImportTaskSerializer
filter_backends = (
galaxy_filters.ActiveOnlyBackend,
galaxy_filters._FieldLookupBackend,
drf_filters.SearchFilter,
galaxy_filters.OrderByBackend,
)
def get_queryset(self):
qs = models.ImportTask.objects.select_related(
'owner',
'repository',
'repository__provider_namespace',
'repository__provider_namespace__provider',
'repository__provider_namespace__namespace',
)
return qs
def get_serializer_class(self):
# NOTE(cutwater): This is for compatibility with ansible-galaxy client.
if 'id' in self.request.GET:
return serializers.ImportTaskDetailSerializer
return super().get_serializer_class()
def list(self, request, *args, **kwargs):
github_user = request.GET.get('github_user')
github_repo = request.GET.get('github_repo')
qs = self.get_queryset()
if github_user and github_repo:
# Support ansible-galaxy <= 2.6
qs = qs.filter(
repository__provider_namespace__name=github_user,
repository__original_name=github_repo)
else:
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
def post(self, request, *args, **kwargs):
github_user = request.data.get('github_user')
github_repo = request.data.get('github_repo')
github_reference = request.data.get('github_reference', '')
repository_id = request.data.get('repository_id')
if not repository_id:
# request received from old client
if not github_user or not github_repo:
raise ValidationError({
'detail': "Invalid request. "
"Expecting github_user and github_repo."
})
namespace = models.ProviderNamespace.objects.get(
provider__name=constants.PROVIDER_GITHUB,
name=github_user
)
if not request.user.is_staff and \
not namespace.namespace.owners.filter(
username=request.user.get_username()):
# User is not an onwer of the Namespace
raise PermissionDenied(
"You are not an owner of {0}"
.format(namespace.namespace.name)
)
try:
repository = models.Repository.objects.get(
provider_namespace=namespace,
original_name=github_repo
)
except ObjectDoesNotExist:
repository, created = models.Repository.objects.get_or_create(
provider_namespace=namespace,
name=sanitize_content_name(github_repo),
defaults={
'is_enabled': False,
'original_name': github_repo,
'is_new': True
}
)
else:
try:
repository = models.Repository.objects.get(pk=repository_id)
except ObjectDoesNotExist:
raise ValidationError({
'detail': "Repository {0} not found, or you do not "
"have access".format(repository_id)
})
if not request.user.is_staff and \
not repository.provider_namespace.namespace.owners.filter(
username=request.user.get_username()):
# User is not an onwer of the Namespace
raise PermissionDenied(
"You are not an owner of {0}".format(repository.name)
)
task = tasks.create_import_task(
repository, request.user,
import_branch=github_reference, user_initiated=True)
serializer = self.get_serializer(instance=task)
response = {'results': [serializer.data]}
return Response(response,
status=status.HTTP_201_CREATED,
headers=self.get_success_headers(response))
class ImportTaskDetail(base_views.RetrieveAPIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (ModelAccessPermission,)
model = models.ImportTask
serializer_class = serializers.ImportTaskDetailSerializer
def get_object(self, qs=None):
obj = super().get_object()
if not obj.active:
raise Http404()
return obj
class ImportTaskNotificationList(base_views.SubListAPIView):
model = models.Notification
serializer_class = serializers.NotificationSerializer
parent_model = models.ImportTask
relationship = 'notifications'
class ImportTaskLatestList(base_views.ListAPIView):
"""Return the most recent import for each of the user's repositories."""
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
model = models.ImportTask
serializer_class = serializers.ImportTaskLatestSerializer
def list(self, request, *args, **kwargs):
qs = models.ImportTask.objects.filter(
repository__provider_namespace__namespace__isnull=False
).values(
'repository__provider_namespace__namespace__name',
'repository__name',
'repository__id',
).order_by(
'repository__provider_namespace__namespace__name',
'repository__name'
).annotate(last_id=Max('id'))
qs = self.filter_queryset(qs)
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class SubscriptionList(base_views.ListCreateAPIView):
model = models.Subscription
serializer_class = serializers.SubscriptionSerializer
def post(self, request, *args, **kwargs):
github_user = request.data.get('github_user', None)
github_repo = request.data.get('github_repo', None)
if not github_user or not github_repo:
raise ValidationError({
'detail': "Invalid request. "
"Missing one or more required values."
})
try:
token = SocialToken.objects.get(
account__user=request.user,
account__provider='github'
)
except Exception:
msg = (
"Failed to connect to GitHub account for Galaxy user {}. "
"You must first authenticate with Github."
.format(request.user.username)
)
raise ValidationError(dict(detail=msg))
gh_api = Github(token.token)
try:
gh_repo = gh_api.get_repo(github_user + '/' + github_repo)
except GithubException as e:
msg = (
"GitHub API failed to return repo for {}/{}. {} - {}"
.format(github_user, github_repo, e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user = gh_api.get_user()
except GithubException as e:
msg = (
"GitHub API failed to return authorized user. {} - {}"
.format(e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user.add_to_subscriptions(gh_repo)
except GithubException:
msg = (
"GitHub API failed to subscribe user {} to for {}/{}"
.format(request.user.username, github_user, github_repo)
)
raise ValidationError(dict(detail=msg))
new_sub, created = models.Subscription.objects.get_or_create(
owner=request.user,
github_user=github_user,
github_repo=github_repo,
defaults={
'owner': request.user,
'github_user': github_user,
'github_repo': github_repo
})
sub_count = 0
for s in gh_repo.get_subscribers():
sub_count += 1 # only way to get subscriber count via pygithub
repo = models.Repository.objects.get(github_user=github_user,
github_repo=github_repo)
repo.watchers_count = sub_count
repo.save()
return Response(dict(
result=dict(
id=new_sub.id,
github_user=new_sub.github_user,
github_repo=new_sub.github_repo,
watchers_count=sub_count
)
), status=status.HTTP_201_CREATED)
class SubscriptionDetail(base_views.RetrieveUpdateDestroyAPIView):
model = models.Subscription
serializer_class = serializers.SubscriptionSerializer
def destroy(self, request, *args, **kwargs):
obj = super().get_object()
try:
token = SocialToken.objects.get(
account__user=request.user, account__provider='github'
)
except Exception:
msg = (
"Failed to access GitHub account for Galaxy user {}. "
"You must first authenticate with GitHub."
.format(request.user.username)
)
raise ValidationError(dict(detail=msg))
gh_api = Github(token.token)
try:
gh_repo = gh_api.get_repo(obj.github_user + '/' + obj.github_repo)
except GithubException as e:
msg = (
"GitHub API failed to return repo for {}/{}. {} - {}"
.format(obj.github_user, obj.github_repo, e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user = gh_api.get_user()
except GithubException as e:
msg = (
"GitHub API failed to return authorized user. {} - {}"
.format(e.data, e.status)
)
raise ValidationError(dict(detail=msg))
try:
gh_user.remove_from_subscriptions(gh_repo)
except GithubException as e:
msg = (
"GitHub API failed to unsubscribe {} from {}/{}. {} - {}"
.format(request.user.username, obj.github_user,
obj.github_repo, e.data, e.status)
)
raise ValidationError(dict(detail=msg))
obj.delete()
sub_count = 0
for sub in gh_repo.get_subscribers():
sub_count += 1 # only way to get subscriber count via pygithub
repo = models.Repository.objects.get(github_user=obj.github_user,
github_repo=obj.github_repo)
repo.watchers_count = sub_count
repo.save()
result = (
"unsubscribed {} from {}/{}."
.format(request.user.username, obj.github_user, obj.github_repo)
)
return Response(dict(detail=result), status=status.HTTP_202_ACCEPTED)
class TopContributorsList(base_views.ListAPIView):
model = models.Content
serializer_class = serializers.TopContributorsSerializer
def list(self, request, *args, **kwargs):
qs = (models.Content.objects.values('namespace')
.annotate(count=Count('id'))
.order_by('-count', 'namespace'))
page = self.paginate_queryset(qs)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(qs, many=True)
return Response(serializer.data)
class RemoveRole(base_views.APIView):
authentication_classes = (TokenAuthentication, SessionAuthentication)
permission_classes = (IsAuthenticated,)
def delete(self, request, *args, **kwargs):
gh_user = request.query_params.get('github_user', None)
gh_repo = request.query_params.get('github_repo', None)
if not gh_user or not gh_repo:
raise ValidationError(dict(detail="Invalid request."))
if not request.user.is_staff:
# Verify via GitHub API that user has access to requested role
try:
token = SocialToken.objects.get(
account__user=request.user, account__provider='github'
)
except Exception:
msg = (
"Failed to get Github account for Galaxy user {}. "
"You must first authenticate with Github."
.format(request.user.username)
)
raise ValidationError({'detail': msg})
gh_api = Github(token.token)
try:
ghu = gh_api.get_user()
except Exception:
raise ValidationError(
{'detail': "Failed to get Github authorized user."}
)
allowed = False
repo_full_name = "{}/{}".format(gh_user, gh_repo)
for r in ghu.get_repos():
if r.full_name == repo_full_name:
allowed = True
continue
if not allowed:
msg = (
"Galaxy user {0} does not have access to repo {1}"
.format(request.user.username, repo_full_name)
)
raise ValidationError(dict(detail=msg))
# User has access. Delete requested role and associated bits.
response = OrderedDict([
('deleted_roles', []),
('status', '')
])
roles = models.Content.objects.filter(
repository__provider_namespace__name=gh_user,
repository__original_name=gh_repo)
cnt = len(roles)
if cnt == 0:
response['status'] = (
"Role {}.{} not found. Maybe it was deleted previously?"
.format(gh_user, gh_repo)
)
return Response(response)
elif cnt == 1:
response['status'] = "Role {}.{} deleted".format(gh_user, gh_repo)
else:
response['status'] = (
"Deleted {:d} roles associated with {}/{}"
.format(len(roles), gh_user, gh_repo)
)
for role in roles:
response['deleted_roles'].append({
"id": role.id,
"namespace": role.namespace.name,
"name": role.name,
"github_user": role.github_user,
"github_repo": role.github_repo
})
repo = models.Repository.objects.get(
provider_namespace__name=gh_user,
original_name=gh_repo)
models.Notification.objects.filter(repository=repo).delete()
models.Content.objects.filter(repository=repo).delete()
models.ImportTask.objects.filter(repository=repo).delete()
repo.delete()
return Response(response)
class RefreshUserRepos(base_views.APIView):
"""
Return user GitHub repos directly from GitHub.
Use to refresh cache for the authenticated user.
"""
authentication_classes = (SessionAuthentication,)
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
# Return a the list of user's repositories directly from GitHub
try:
token = SocialToken.objects.get(
account__user=request.user, account__provider='github'
)
except Exception:
msg = (
"Failed to connect to GitHub account for Galaxy user {} "
"You must first authenticate with GitHub."
.format(request.user.username)
)
logger.error(msg)
return HttpResponseBadRequest({'detail': msg})
gh_api = Github(token.token)
try:
ghu = gh_api.get_user()
except Exception:
msg = "Failed to get GitHub authorized user."
logger.error(msg)
return HttpResponseBadRequest({'detail': msg})
try:
user_repos = ghu.get_repos()
except Exception:
msg = "Failed to get user repositories from GitHub."
logger.error(msg)
return HttpResponseBadRequest({'detail': msg})
try:
celerytasks.refresh_existing_user_repos(token.token, ghu)
except Exception as exc:
logger.error("Error: refresh_user_repos - {0}".format(exc))
raise
try:
celerytasks.update_user_repos(user_repos, request.user)
except Exception as exc:
logger.error("Error: update_user_repos - {0}".format(exc))
raise
qs = request.user.repositories.all()
serializer = serializers.RepositorySerializer(qs, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
|
en
| 0.801559
|
# (c) 2012-2018, Ansible by Red Hat # # This file is part of Ansible Galaxy # # Ansible Galaxy is free software: you can redistribute it and/or modify # it under the terms of the Apache License as published by # the Apache Software Foundation, either version 2 of the License, or # (at your option) any later version. # # Ansible Galaxy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Apache License for more details. # # You should have received a copy of the Apache License # along with Galaxy. If not, see <http://www.apache.org/licenses/>. # TODO move all github interactions to githubapi # Github # rest framework stuff # ----------------------------------------------------------------------------- # Helper functions # ----------------------------------------------------------------------------- # list supported API versions # list top level resources Provider resources Active providers Active providers # NOTE(cutwater): This is for compatibility with ansible-galaxy client. # Support ansible-galaxy <= 2.6 # request received from old client # User is not an onwer of the Namespace # User is not an onwer of the Namespace Return the most recent import for each of the user's repositories. # only way to get subscriber count via pygithub # only way to get subscriber count via pygithub # Verify via GitHub API that user has access to requested role # User has access. Delete requested role and associated bits. Return user GitHub repos directly from GitHub. Use to refresh cache for the authenticated user. # Return a the list of user's repositories directly from GitHub
| 1.45225
| 1
|
yt_dlp/extractor/arte.py
|
ouwou/yt-dlp
| 2
|
6627122
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
parse_qs,
qualities,
try_get,
unified_strdate,
url_or_none,
)
class ArteTVBaseIE(InfoExtractor):
_ARTE_LANGUAGES = 'fr|de|en|es|it|pl'
_API_BASE = 'https://api.arte.tv/api/player/v1'
class ArteTVIE(ArteTVBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?arte\.tv/(?P<lang>%(langs)s)/videos|
api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>%(langs)s)
)
/(?P<id>\d{6}-\d{3}-[AF])
''' % {'langs': ArteTVBaseIE._ARTE_LANGUAGES}
_TESTS = [{
'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
'info_dict': {
'id': '088501-000-A',
'ext': 'mp4',
'title': 'Mexico: Stealing Petrol to Survive',
'upload_date': '20190628',
},
}, {
'url': 'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/',
'only_matching': True,
}, {
'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
lang = mobj.group('lang') or mobj.group('lang_2')
info = self._download_json(
'%s/config/%s/%s' % (self._API_BASE, lang, video_id), video_id)
player_info = info['videoJsonPlayer']
vsr = try_get(player_info, lambda x: x['VSR'], dict)
if not vsr:
error = None
if try_get(player_info, lambda x: x['custom_msg']['type']) == 'error':
error = try_get(
player_info, lambda x: x['custom_msg']['msg'], compat_str)
if not error:
error = 'Video %s is not available' % player_info.get('VID') or video_id
raise ExtractorError(error, expected=True)
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
title = (player_info.get('VTI') or player_info['VID']).strip()
subtitle = player_info.get('VSU', '').strip()
if subtitle:
title += ' - %s' % subtitle
qfunc = qualities(['MQ', 'HQ', 'EQ', 'SQ'])
LANGS = {
'fr': 'F',
'de': 'A',
'en': 'E[ANG]',
'es': 'E[ESP]',
'it': 'E[ITA]',
'pl': 'E[POL]',
}
langcode = LANGS.get(lang, lang)
formats = []
for format_id, format_dict in vsr.items():
f = dict(format_dict)
format_url = url_or_none(f.get('url'))
streamer = f.get('streamer')
if not format_url and not streamer:
continue
versionCode = f.get('versionCode')
l = re.escape(langcode)
# Language preference from most to least priority
# Reference: section 6.8 of
# https://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-07-1.pdf
PREFERENCES = (
# original version in requested language, without subtitles
r'VO{0}$'.format(l),
# original version in requested language, with partial subtitles in requested language
r'VO{0}-ST{0}$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO{0}-STM{0}$'.format(l),
# non-original (dubbed) version in requested language, without subtitles
r'V{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language
r'V{0}-ST{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'V{0}-STM{0}$'.format(l),
# original version in requested language, with partial subtitles in different language
r'VO{0}-ST(?!{0}).+?$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in different language
r'VO{0}-STM(?!{0}).+?$'.format(l),
# original version in different language, with partial subtitles in requested language
r'VO(?:(?!{0}).+?)?-ST{0}$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO(?:(?!{0}).+?)?-STM{0}$'.format(l),
# original version in different language, without subtitles
r'VO(?:(?!{0}))?$'.format(l),
# original version in different language, with partial subtitles in different language
r'VO(?:(?!{0}).+?)?-ST(?!{0}).+?$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in different language
r'VO(?:(?!{0}).+?)?-STM(?!{0}).+?$'.format(l),
)
for pref, p in enumerate(PREFERENCES):
if re.match(p, versionCode):
lang_pref = len(PREFERENCES) - pref
break
else:
lang_pref = -1
media_type = f.get('mediaType')
if media_type == 'hls':
m3u8_formats = self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False)
for m3u8_format in m3u8_formats:
m3u8_format['language_preference'] = lang_pref
formats.extend(m3u8_formats)
continue
format = {
'format_id': format_id,
'language_preference': lang_pref,
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'tbr': int_or_none(f.get('bitrate')),
'quality': qfunc(f.get('quality')),
}
if media_type == 'rtmp':
format['url'] = f['streamer']
format['play_path'] = 'mp4:' + f['url']
format['ext'] = 'flv'
else:
format['url'] = f['url']
formats.append(format)
# For this extractor, quality only represents the relative quality
# with respect to other formats with the same resolution
self._sort_formats(formats, ('res', 'quality'))
return {
'id': player_info.get('VID') or video_id,
'title': title,
'description': player_info.get('VDE'),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
'formats': formats,
}
class ArteTVEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+'
_TESTS = [{
'url': 'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A',
'info_dict': {
'id': '100605-013-A',
'ext': 'mp4',
'title': 'United we Stream November Lockdown Edition #13',
'description': 'md5:be40b667f45189632b78c1425c7c2ce1',
'upload_date': '20201116',
},
}, {
'url': 'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1',
webpage)]
def _real_extract(self, url):
qs = parse_qs(url)
json_url = qs['json_url'][0]
video_id = ArteTVIE._match_id(json_url)
return self.url_result(
json_url, ie=ArteTVIE.ie_key(), video_id=video_id)
class ArteTVPlaylistIE(ArteTVBaseIE):
_VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>RC-\d{6})' % ArteTVBaseIE._ARTE_LANGUAGES
_TESTS = [{
'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/',
'info_dict': {
'id': 'RC-016954',
'title': 'Earn a Living',
'description': 'md5:d322c55011514b3a7241f7fb80d494c2',
},
'playlist_mincount': 6,
}, {
'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/',
'only_matching': True,
}]
def _real_extract(self, url):
lang, playlist_id = self._match_valid_url(url).groups()
collection = self._download_json(
'%s/collectionData/%s/%s?source=videos'
% (self._API_BASE, lang, playlist_id), playlist_id)
entries = []
for video in collection['videos']:
if not isinstance(video, dict):
continue
video_url = url_or_none(video.get('url')) or url_or_none(video.get('jsonUrl'))
if not video_url:
continue
video_id = video.get('programId')
entries.append({
'_type': 'url_transparent',
'url': video_url,
'id': video_id,
'title': video.get('title'),
'alt_title': video.get('subtitle'),
'thumbnail': url_or_none(try_get(video, lambda x: x['mainImage']['url'], compat_str)),
'duration': int_or_none(video.get('durationSeconds')),
'view_count': int_or_none(video.get('views')),
'ie_key': ArteTVIE.ie_key(),
})
title = collection.get('title')
description = collection.get('shortDescription') or collection.get('teaserText')
return self.playlist_result(entries, playlist_id, title, description)
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_str,
)
from ..utils import (
ExtractorError,
int_or_none,
parse_qs,
qualities,
try_get,
unified_strdate,
url_or_none,
)
class ArteTVBaseIE(InfoExtractor):
_ARTE_LANGUAGES = 'fr|de|en|es|it|pl'
_API_BASE = 'https://api.arte.tv/api/player/v1'
class ArteTVIE(ArteTVBaseIE):
_VALID_URL = r'''(?x)
https?://
(?:
(?:www\.)?arte\.tv/(?P<lang>%(langs)s)/videos|
api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>%(langs)s)
)
/(?P<id>\d{6}-\d{3}-[AF])
''' % {'langs': ArteTVBaseIE._ARTE_LANGUAGES}
_TESTS = [{
'url': 'https://www.arte.tv/en/videos/088501-000-A/mexico-stealing-petrol-to-survive/',
'info_dict': {
'id': '088501-000-A',
'ext': 'mp4',
'title': 'Mexico: Stealing Petrol to Survive',
'upload_date': '20190628',
},
}, {
'url': 'https://www.arte.tv/pl/videos/100103-000-A/usa-dyskryminacja-na-porodowce/',
'only_matching': True,
}, {
'url': 'https://api.arte.tv/api/player/v2/config/de/100605-013-A',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = self._match_valid_url(url)
video_id = mobj.group('id')
lang = mobj.group('lang') or mobj.group('lang_2')
info = self._download_json(
'%s/config/%s/%s' % (self._API_BASE, lang, video_id), video_id)
player_info = info['videoJsonPlayer']
vsr = try_get(player_info, lambda x: x['VSR'], dict)
if not vsr:
error = None
if try_get(player_info, lambda x: x['custom_msg']['type']) == 'error':
error = try_get(
player_info, lambda x: x['custom_msg']['msg'], compat_str)
if not error:
error = 'Video %s is not available' % player_info.get('VID') or video_id
raise ExtractorError(error, expected=True)
upload_date_str = player_info.get('shootingDate')
if not upload_date_str:
upload_date_str = (player_info.get('VRA') or player_info.get('VDA') or '').split(' ')[0]
title = (player_info.get('VTI') or player_info['VID']).strip()
subtitle = player_info.get('VSU', '').strip()
if subtitle:
title += ' - %s' % subtitle
qfunc = qualities(['MQ', 'HQ', 'EQ', 'SQ'])
LANGS = {
'fr': 'F',
'de': 'A',
'en': 'E[ANG]',
'es': 'E[ESP]',
'it': 'E[ITA]',
'pl': 'E[POL]',
}
langcode = LANGS.get(lang, lang)
formats = []
for format_id, format_dict in vsr.items():
f = dict(format_dict)
format_url = url_or_none(f.get('url'))
streamer = f.get('streamer')
if not format_url and not streamer:
continue
versionCode = f.get('versionCode')
l = re.escape(langcode)
# Language preference from most to least priority
# Reference: section 6.8 of
# https://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-07-1.pdf
PREFERENCES = (
# original version in requested language, without subtitles
r'VO{0}$'.format(l),
# original version in requested language, with partial subtitles in requested language
r'VO{0}-ST{0}$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO{0}-STM{0}$'.format(l),
# non-original (dubbed) version in requested language, without subtitles
r'V{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language
r'V{0}-ST{0}$'.format(l),
# non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language
r'V{0}-STM{0}$'.format(l),
# original version in requested language, with partial subtitles in different language
r'VO{0}-ST(?!{0}).+?$'.format(l),
# original version in requested language, with subtitles for the deaf and hard-of-hearing in different language
r'VO{0}-STM(?!{0}).+?$'.format(l),
# original version in different language, with partial subtitles in requested language
r'VO(?:(?!{0}).+?)?-ST{0}$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in requested language
r'VO(?:(?!{0}).+?)?-STM{0}$'.format(l),
# original version in different language, without subtitles
r'VO(?:(?!{0}))?$'.format(l),
# original version in different language, with partial subtitles in different language
r'VO(?:(?!{0}).+?)?-ST(?!{0}).+?$'.format(l),
# original version in different language, with subtitles for the deaf and hard-of-hearing in different language
r'VO(?:(?!{0}).+?)?-STM(?!{0}).+?$'.format(l),
)
for pref, p in enumerate(PREFERENCES):
if re.match(p, versionCode):
lang_pref = len(PREFERENCES) - pref
break
else:
lang_pref = -1
media_type = f.get('mediaType')
if media_type == 'hls':
m3u8_formats = self._extract_m3u8_formats(
format_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id=format_id, fatal=False)
for m3u8_format in m3u8_formats:
m3u8_format['language_preference'] = lang_pref
formats.extend(m3u8_formats)
continue
format = {
'format_id': format_id,
'language_preference': lang_pref,
'format_note': '%s, %s' % (f.get('versionCode'), f.get('versionLibelle')),
'width': int_or_none(f.get('width')),
'height': int_or_none(f.get('height')),
'tbr': int_or_none(f.get('bitrate')),
'quality': qfunc(f.get('quality')),
}
if media_type == 'rtmp':
format['url'] = f['streamer']
format['play_path'] = 'mp4:' + f['url']
format['ext'] = 'flv'
else:
format['url'] = f['url']
formats.append(format)
# For this extractor, quality only represents the relative quality
# with respect to other formats with the same resolution
self._sort_formats(formats, ('res', 'quality'))
return {
'id': player_info.get('VID') or video_id,
'title': title,
'description': player_info.get('VDE'),
'upload_date': unified_strdate(upload_date_str),
'thumbnail': player_info.get('programImage') or player_info.get('VTU', {}).get('IUR'),
'formats': formats,
}
class ArteTVEmbedIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+'
_TESTS = [{
'url': 'https://www.arte.tv/player/v5/index.php?json_url=https%3A%2F%2Fapi.arte.tv%2Fapi%2Fplayer%2Fv2%2Fconfig%2Fde%2F100605-013-A&lang=de&autoplay=true&mute=0100605-013-A',
'info_dict': {
'id': '100605-013-A',
'ext': 'mp4',
'title': 'United we Stream November Lockdown Edition #13',
'description': 'md5:be40b667f45189632b78c1425c7c2ce1',
'upload_date': '20201116',
},
}, {
'url': 'https://www.arte.tv/player/v3/index.php?json_url=https://api.arte.tv/api/player/v2/config/de/100605-013-A',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return [url for _, url in re.findall(
r'<(?:iframe|script)[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?arte\.tv/player/v\d+/index\.php\?.*?\bjson_url=.+?)\1',
webpage)]
def _real_extract(self, url):
qs = parse_qs(url)
json_url = qs['json_url'][0]
video_id = ArteTVIE._match_id(json_url)
return self.url_result(
json_url, ie=ArteTVIE.ie_key(), video_id=video_id)
class ArteTVPlaylistIE(ArteTVBaseIE):
_VALID_URL = r'https?://(?:www\.)?arte\.tv/(?P<lang>%s)/videos/(?P<id>RC-\d{6})' % ArteTVBaseIE._ARTE_LANGUAGES
_TESTS = [{
'url': 'https://www.arte.tv/en/videos/RC-016954/earn-a-living/',
'info_dict': {
'id': 'RC-016954',
'title': 'Earn a Living',
'description': 'md5:d322c55011514b3a7241f7fb80d494c2',
},
'playlist_mincount': 6,
}, {
'url': 'https://www.arte.tv/pl/videos/RC-014123/arte-reportage/',
'only_matching': True,
}]
def _real_extract(self, url):
lang, playlist_id = self._match_valid_url(url).groups()
collection = self._download_json(
'%s/collectionData/%s/%s?source=videos'
% (self._API_BASE, lang, playlist_id), playlist_id)
entries = []
for video in collection['videos']:
if not isinstance(video, dict):
continue
video_url = url_or_none(video.get('url')) or url_or_none(video.get('jsonUrl'))
if not video_url:
continue
video_id = video.get('programId')
entries.append({
'_type': 'url_transparent',
'url': video_url,
'id': video_id,
'title': video.get('title'),
'alt_title': video.get('subtitle'),
'thumbnail': url_or_none(try_get(video, lambda x: x['mainImage']['url'], compat_str)),
'duration': int_or_none(video.get('durationSeconds')),
'view_count': int_or_none(video.get('views')),
'ie_key': ArteTVIE.ie_key(),
})
title = collection.get('title')
description = collection.get('shortDescription') or collection.get('teaserText')
return self.playlist_result(entries, playlist_id, title, description)
|
en
| 0.816777
|
# coding: utf-8 (?x) https?:// (?: (?:www\.)?arte\.tv/(?P<lang>%(langs)s)/videos| api\.arte\.tv/api/player/v\d+/config/(?P<lang_2>%(langs)s) ) /(?P<id>\d{6}-\d{3}-[AF]) # Language preference from most to least priority # Reference: section 6.8 of # https://www.arte.tv/sites/en/corporate/files/complete-technical-guidelines-arte-geie-v1-07-1.pdf # original version in requested language, without subtitles # original version in requested language, with partial subtitles in requested language # original version in requested language, with subtitles for the deaf and hard-of-hearing in requested language # non-original (dubbed) version in requested language, without subtitles # non-original (dubbed) version in requested language, with subtitles partial subtitles in requested language # non-original (dubbed) version in requested language, with subtitles for the deaf and hard-of-hearing in requested language # original version in requested language, with partial subtitles in different language # original version in requested language, with subtitles for the deaf and hard-of-hearing in different language # original version in different language, with partial subtitles in requested language # original version in different language, with subtitles for the deaf and hard-of-hearing in requested language # original version in different language, without subtitles # original version in different language, with partial subtitles in different language # original version in different language, with subtitles for the deaf and hard-of-hearing in different language # For this extractor, quality only represents the relative quality # with respect to other formats with the same resolution #13',
| 2.012004
| 2
|
evaluate.py
|
nihalsid/texture_fields
| 78
|
6627123
|
<reponame>nihalsid/texture_fields
import argparse
import pandas as pd
import os
import glob
from mesh2tex import config
from mesh2tex.eval import evaluate_generated_images
categories = {'02958343': 'cars', '03001627': 'chairs',
'02691156': 'airplanes', '04379243': 'tables',
'02828884': 'benches', '02933112': 'cabinets',
'04256520': 'sofa', '03636649': 'lamps',
'04530566': 'vessels'}
parser = argparse.ArgumentParser(
description='Generate Color for given mesh.'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/default.yaml')
base_path = cfg['test']['vis_dir']
if cfg['data']['shapes_multiclass']:
category_paths = glob.glob(os.path.join(base_path, '*'))
else:
category_paths = [base_path]
for category_path in category_paths:
cat_id = os.path.basename(category_path)
category = categories.get(cat_id, cat_id)
path1 = os.path.join(category_path, 'fake/')
path2 = os.path.join(category_path, 'real/')
print('Evaluating %s (%s)' % (category, category_path))
evaluation = evaluate_generated_images('all', path1, path2)
name = base_path
df = pd.DataFrame(evaluation, index=[category])
df.to_pickle(os.path.join(category_path, 'eval.pkl'))
df.to_csv(os.path.join(category_path, 'eval.csv'))
print('Evaluation finished')
|
import argparse
import pandas as pd
import os
import glob
from mesh2tex import config
from mesh2tex.eval import evaluate_generated_images
categories = {'02958343': 'cars', '03001627': 'chairs',
'02691156': 'airplanes', '04379243': 'tables',
'02828884': 'benches', '02933112': 'cabinets',
'04256520': 'sofa', '03636649': 'lamps',
'04530566': 'vessels'}
parser = argparse.ArgumentParser(
description='Generate Color for given mesh.'
)
parser.add_argument('config', type=str, help='Path to config file.')
args = parser.parse_args()
cfg = config.load_config(args.config, 'configs/default.yaml')
base_path = cfg['test']['vis_dir']
if cfg['data']['shapes_multiclass']:
category_paths = glob.glob(os.path.join(base_path, '*'))
else:
category_paths = [base_path]
for category_path in category_paths:
cat_id = os.path.basename(category_path)
category = categories.get(cat_id, cat_id)
path1 = os.path.join(category_path, 'fake/')
path2 = os.path.join(category_path, 'real/')
print('Evaluating %s (%s)' % (category, category_path))
evaluation = evaluate_generated_images('all', path1, path2)
name = base_path
df = pd.DataFrame(evaluation, index=[category])
df.to_pickle(os.path.join(category_path, 'eval.pkl'))
df.to_csv(os.path.join(category_path, 'eval.csv'))
print('Evaluation finished')
|
none
| 1
| 2.513184
| 3
|
|
step1/taptap.py
|
karoyqiu/xbmc-kodi-private-china-addons
| 420
|
6627124
|
<reponame>karoyqiu/xbmc-kodi-private-china-addons
#编辑推荐视频列表
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/webapiv2/video/v1/refresh?type=editors_choice&from=1&limit=10&X-UA=V%3D1%26PN%3DWebApp%26LANG%3Dzh_CN%26VN%3D0.1.0%26LOC%3DCN%26PLT%3DPC'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
j = json.loads(rec.text)
#print(j['data'])
for index in range(len(j['data']['list'])):
imgurl = j['data']['list'][index]['image']['url']
id = j['data']['list'][index]['id']
print(j['data']['list'][index]['title'])
print('http' + imgurl[5:])
print('https://taptap.com/video/' + str(id))
#为你推荐视频列表
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/webapiv2/video/v1/refresh?type=recommend&from=0&limit=30&X-UA=V%3D1%26PN%3DWebApp%26LANG%3Dzh_CN%26VN%3D0.1.0%26LOC%3DCN%26PLT%3DPC'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
j = json.loads(rec.text)
#print(j['data'])
for index in range(len(j['data']['list'])):
imgurl = j['data']['list'][index]['data']['image']['url']
id = j['data']['list'][index]['data']['id']
print(j['data']['list'][index]['data']['title'])
print('http' + imgurl[5:])
print('https://taptap.com/video/' + str(id))
#由视频链接爬出视频m3u8
import json
import requests
from bs4 import BeautifulSoup
import re
url = 'https://www.taptap.com/video/1310782'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
rectext = rec.text
str1 = rectext.find('{url:B,url_h265:')
str2 = rectext.find(',url_expires:C}')
#print(rectext[str1+17:str2-1])
mainm3u8 = rectext[str1+17:str2-1]
#print(type(mainm3u8))
mainm3u8 = mainm3u8.replace(r'\u002F','/')
#print(mainm3u8)
#j = json.loads(rec.text)
#print(j['data'])
rec = requests.get(mainm3u8,headers=headers)
rectext = rec.text
#print(rectext)
prule = re.compile(r'\d+[p|k]\d?\d?') # 查找数字
pname = prule.findall(rectext)
urlrule = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') # 查找数字
m3u8url = urlrule.findall(rectext)
cuttext = rectext
#print(m3u8url)
for index in range(len(m3u8url)):
print(pname[index])
print(m3u8url[index])
print('-----------'*30)
#排行榜
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/top/download'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
soup = BeautifulSoup(rec.text, 'html.parser')
rankitem = soup.find_all('div',class_='taptap-top-card')
for index in range(len(rankitem)):
data = rankitem[index].find('a',class_='card-left-image')
img = data.find('img')
print(img['alt'])
print('http'+img['src'][5:])
print(data['href'])
print('------'*30)
#游戏分类
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/category/recommend'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
soup = BeautifulSoup(rec.text, 'html.parser')
appitem = soup.find_all('div',class_='taptap-app-item swiper-slide')
for index in range(len(rankitem)):
data = appitem[index].find('a',class_='app-item-image taptap-link')
img = data.find('img')
print(img['alt'])
print('http'+img['data-src'][5:])
print(data['href'])
print('------'*30)
#详情获取视频
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/app/39186/video?type=not_official'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
soup = BeautifulSoup(rec.text, 'html.parser')
if soup.find_all('div',class_='no-content'):
print('没有视频')
else:
#print('cunzai')
videoitem = soup.find_all('div',class_='video-item')
for index in range(len(videoitem)):
img = videoitem[index].find('div',class_='video-thumb-box')
img = img['style']
cutimg = img.split("'")
img = cutimg[1]
data = videoitem[index].find('div',class_='video-content')
print(data.a.text)
print('http' + img[5:])
print(data.a['href'])
|
#编辑推荐视频列表
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/webapiv2/video/v1/refresh?type=editors_choice&from=1&limit=10&X-UA=V%3D1%26PN%3DWebApp%26LANG%3Dzh_CN%26VN%3D0.1.0%26LOC%3DCN%26PLT%3DPC'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
j = json.loads(rec.text)
#print(j['data'])
for index in range(len(j['data']['list'])):
imgurl = j['data']['list'][index]['image']['url']
id = j['data']['list'][index]['id']
print(j['data']['list'][index]['title'])
print('http' + imgurl[5:])
print('https://taptap.com/video/' + str(id))
#为你推荐视频列表
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/webapiv2/video/v1/refresh?type=recommend&from=0&limit=30&X-UA=V%3D1%26PN%3DWebApp%26LANG%3Dzh_CN%26VN%3D0.1.0%26LOC%3DCN%26PLT%3DPC'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
j = json.loads(rec.text)
#print(j['data'])
for index in range(len(j['data']['list'])):
imgurl = j['data']['list'][index]['data']['image']['url']
id = j['data']['list'][index]['data']['id']
print(j['data']['list'][index]['data']['title'])
print('http' + imgurl[5:])
print('https://taptap.com/video/' + str(id))
#由视频链接爬出视频m3u8
import json
import requests
from bs4 import BeautifulSoup
import re
url = 'https://www.taptap.com/video/1310782'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
rectext = rec.text
str1 = rectext.find('{url:B,url_h265:')
str2 = rectext.find(',url_expires:C}')
#print(rectext[str1+17:str2-1])
mainm3u8 = rectext[str1+17:str2-1]
#print(type(mainm3u8))
mainm3u8 = mainm3u8.replace(r'\u002F','/')
#print(mainm3u8)
#j = json.loads(rec.text)
#print(j['data'])
rec = requests.get(mainm3u8,headers=headers)
rectext = rec.text
#print(rectext)
prule = re.compile(r'\d+[p|k]\d?\d?') # 查找数字
pname = prule.findall(rectext)
urlrule = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+') # 查找数字
m3u8url = urlrule.findall(rectext)
cuttext = rectext
#print(m3u8url)
for index in range(len(m3u8url)):
print(pname[index])
print(m3u8url[index])
print('-----------'*30)
#排行榜
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/top/download'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
soup = BeautifulSoup(rec.text, 'html.parser')
rankitem = soup.find_all('div',class_='taptap-top-card')
for index in range(len(rankitem)):
data = rankitem[index].find('a',class_='card-left-image')
img = data.find('img')
print(img['alt'])
print('http'+img['src'][5:])
print(data['href'])
print('------'*30)
#游戏分类
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/category/recommend'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
soup = BeautifulSoup(rec.text, 'html.parser')
appitem = soup.find_all('div',class_='taptap-app-item swiper-slide')
for index in range(len(rankitem)):
data = appitem[index].find('a',class_='app-item-image taptap-link')
img = data.find('img')
print(img['alt'])
print('http'+img['data-src'][5:])
print(data['href'])
print('------'*30)
#详情获取视频
import json
import requests
from bs4 import BeautifulSoup
url = 'https://www.taptap.com/app/39186/video?type=not_official'
headers = {'user-agent' : 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.122 Safari/537.36'}
rec = requests.get(url,headers=headers)
#print(rec.text)
soup = BeautifulSoup(rec.text, 'html.parser')
if soup.find_all('div',class_='no-content'):
print('没有视频')
else:
#print('cunzai')
videoitem = soup.find_all('div',class_='video-item')
for index in range(len(videoitem)):
img = videoitem[index].find('div',class_='video-thumb-box')
img = img['style']
cutimg = img.split("'")
img = cutimg[1]
data = videoitem[index].find('div',class_='video-content')
print(data.a.text)
print('http' + img[5:])
print(data.a['href'])
|
zh
| 0.150416
|
#编辑推荐视频列表 #print(rec.text) #print(j['data']) #为你推荐视频列表 #print(rec.text) #print(j['data']) #由视频链接爬出视频m3u8 #print(rectext[str1+17:str2-1]) #print(type(mainm3u8)) #print(mainm3u8) #j = json.loads(rec.text) #print(j['data']) #print(rectext) # 查找数字 # 查找数字 #print(m3u8url) #排行榜 #print(rec.text) #游戏分类 #print(rec.text) #详情获取视频 #print(rec.text) #print('cunzai')
| 2.994557
| 3
|
utils/loss/dice_loss.py
|
bhklab/ptl-oar-segmentation
| 3
|
6627125
|
"""
get_tp_fp_fn, SoftDiceLoss, and DC_and_CE/TopK_loss are from https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/training/loss_functions
"""
import torch
from .ND_Crossentropy import CrossentropyND, TopKLoss, WeightedCrossEntropyLoss
from torch import nn
from torch.autograd import Variable
from torch import einsum
import numpy as np
import monai.metrics as monmet
import monai.transforms as montran
def softmax_helper(x):
# copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/nd_softmax.py
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
def sum_tensor(inp, axes, keepdim=False):
# copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/tensor_utilities.py
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
return tp, fp, fn
class GDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-5):
"""
Generalized Dice;
Copy from: https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L29
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super(GDiceLoss, self).__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, net_output, gt):
shp_x = net_output.shape # (batch size,class_num,x,y,z)
shp_y = gt.shape # (batch size,1,x,y,z)
# one hot code for gt
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
softmax_output = self.apply_nonlin(net_output)
# copy from https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L29
w: torch.Tensor = 1 / (einsum("bcxyz->bc", y_onehot).type(torch.float32) + 1e-10)**2
intersection: torch.Tensor = w * einsum("bcxyz, bcxyz->bc", softmax_output, y_onehot)
union: torch.Tensor = w * (einsum("bcxyz->bc", softmax_output) + einsum("bcxyz->bc", y_onehot))
divided: torch.Tensor = 1 - 2 * (einsum("bc->b", intersection) + self.smooth) / (einsum("bc->b", union) + self.smooth)
gdc = divided.mean()
return gdc
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order).contiguous()
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.view(C, -1)
class GDiceLossV2(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-5):
"""
Generalized Dice;
Copy from: https://github.com/wolny/pytorch-3dunet/blob/6e5a24b6438f8c631289c10638a17dea14d42051/unet3d/losses.py#L75
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super(GDiceLossV2, self).__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, net_output, gt):
shp_x = net_output.shape # (batch size,class_num,x,y,z)
shp_y = gt.shape # (batch size,1,x,y,z)
# one hot code for gt
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
softmax_output = self.apply_nonlin(net_output)
input = flatten(softmax_output)
target = flatten(y_onehot)
target = target.float()
target_sum = target.sum(-1)
class_weights = Variable(1. / (target_sum * target_sum).clamp(min=self.smooth), requires_grad=False)
intersect = (input * target).sum(-1) * class_weights
intersect = intersect.sum()
denominator = ((input + target).sum(-1) * class_weights).sum()
return 1. - 2. * intersect / denominator.clamp(min=self.smooth)
class SSLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False):
"""
Sensitivity-Specifity loss
paper: http://www.rogertam.ca/Brosch_MICCAI_2015.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/df0f86733357fdc92bbc191c8fec0dcf49aa5499/niftynet/layer/loss_segmentation.py#L392
"""
super(SSLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.r = 0.1 # weight parameter in SS paper
def forward(self, net_output, gt, loss_mask=None):
shp_x = net_output.shape
shp_y = gt.shape
# class_num = shp_x[1]
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
softmax_output = self.apply_nonlin(net_output)
# no object value
bg_onehot = 1 - y_onehot
squared_error = (y_onehot - softmax_output)**2
specificity_part = sum_tensor(squared_error*y_onehot, axes)/(sum_tensor(y_onehot, axes)+self.smooth)
sensitivity_part = sum_tensor(squared_error*bg_onehot, axes)/(sum_tensor(bg_onehot, axes)+self.smooth)
ss = self.r * specificity_part + (1-self.r) * sensitivity_part
if not self.do_bg:
if self.batch_dice:
ss = ss[1:]
else:
ss = ss[:, 1:]
ss = ss.mean()
return ss
class SoftDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, weight=None):
"""
paper: https://arxiv.org/pdf/1606.04797.pdf
"""
super(SoftDiceLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.weight=weight
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)
# class imbalance...
if self.weight is not None:
for i, val in enumerate(self.weight):
dc[:,i]*=val
if not self.do_bg:
# multiple by weights (class imbalance...)
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class IoULoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, weight=None):
"""
paper: https://link.springer.com/chapter/10.1007/978-3-319-50835-1_22
"""
super(IoULoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.weight = weight
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
iou = (tp + self.smooth) / (tp + fp + fn + self.smooth)
if self.weight is not None:
for i, val in enumerate(self.weight):
tversky[:,i] *= val
if not self.do_bg:
if self.batch_dice:
iou = iou[1:]
else:
iou = iou[:, 1:]
iou = iou.mean()
return -iou
class TverskyLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=True, do_bg=True , smooth=1.,
square=False, weight=None):
"""
paper: https://arxiv.org/pdf/1706.05721.pdf
"""
super(TverskyLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.alpha = 0.3
self.beta = 0.7
self.weight = weight
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
tversky = (tp + self.smooth) / (tp + self.alpha*fp + self.beta*fn + self.smooth)
if self.weight is not None:
for i, val in enumerate(self.weight):
tversky[:,i] *= val
if not self.do_bg:
if self.batch_dice:
tversky = tversky[1:]
else:
tversky = tversky[:, 1:]
tversky = tversky.mean()
return -tversky
class FocalTversky_loss(nn.Module):
"""
paper: https://arxiv.org/pdf/1810.07842.pdf
author code: https://github.com/nabsabraham/focal-tversky-unet/blob/347d39117c24540400dfe80d106d2fb06d2b99e1/losses.py#L65
"""
def __init__(self, tversky_kwargs, gamma=0.75):
super(FocalTversky_loss, self).__init__()
self.gamma = gamma
self.tversky = TverskyLoss(apply_nonlin=softmax_helper, **tversky_kwargs)
def forward(self, net_output, target):
tversky_loss = 1 + self.tversky(net_output, target) # = 1-tversky(net_output, target)
focal_tversky = torch.pow(tversky_loss, self.gamma)
return focal_tversky
# this is the function to one-hot-encode data
def onehot(outs, targ, argmax=True):
# Metrics requiring one hot encoded targets, pass through sigmoid or softmax
# convert to one hot encoded target...
shape = targ.size()
batch = shape[0]
# calculate argmax...
if argmax is True:
outs = torch.softmax(outs, dim=1)
outs = torch.argmax(outs, dim=1)
class_ = torch.max(targ)
print(class_)
if len(shape) == 4:
sh = (batch, class_ + 1, shape[1], shape[2], shape[3])
targets_dice = torch.zeros(sh, dtype=torch.float)
targets_out = torch.zeros(sh, dtype=torch.float)
for i in range( class_ + 1):
targets_dice[:, i][targ == i] = 1
targets_out[:, i][outs == i] = 1
return targets_out, targets_dice
class HD_Loss3D(nn.Module):
def __init__(self, percentile=95, gamma=0.75):
super(HD_Loss3D, self).__init__()
self.gamma = gamma
self.percentile = percentile
def forward(self, net_output, target):
# one_hot_encode, pass through loss...
net_output, target = onehot(net_output, target)
out = monmet.compute_hausdorff_distance(net_output, target, percentile=self.percentile,
include_background=True)
hd = torch.pow(out, .25) # hyperparameter, can varry...
hd = hd[~torch.isnan(hd)]
hd_max = hd.max()*.25
# can also implement topK HD loss...
return hd.mean() , hd_max
class FocalTversky_and_topk_loss(nn.Module):
def __init__(self, tversky_kwargs, ce_kwargs, aggregate="sumcorrect", gamma=0.75):
super(FocalTversky_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
self.ft = FocalTversky_loss(tversky_kwargs, gamma=gamma)
self.hd = HD_Loss3D(gamma=gamma)
# self.ad = montran.AsDiscrete(argmax)
def forward(self, net_output, target):
ft_loss = 0
s = net_output.size()
for i in range(s[0]):
ft_loss += self.ft(net_output[i].unsqueeze(0),target[i].unsqueeze(0))
ft_loss/=s[0]
ft_loss = self.ft(net_output,target)
ce_loss = self.ce(net_output, target)
# use this instead of ft_loss in second round of finetuning...
hd_loss, hd_max = self.hd(net_output, target)
# this was used for fold 0 (added hd_loss ONLY) ...changed parameters (version_2617993)
# fold 1 no hd_loss added (turing test completed with this...) ...used for turing... (version_2784634)
# fold 2 (added hd_loss and corrected with hd_max term...) (version_2770034)
# fold 3 same as fold 0 ... (version_2771355)
# fold 4 same as fold 2 (version_2784520)
# if hd_loss > 10.:
# hd_loss = 10.
# we need to use a switch here, after convergence in dice, change to HD
# minimization.
if self.aggregate == "sum":
result = ce_loss + ft_loss + hd_loss
elif self.aggregate == "sumcorrect":
result = ce_loss + ft_loss + hd_loss + (hd_max/hd_loss)
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
class AsymLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False):
"""
paper: https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8573779
"""
super(AsymLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.beta = 1.5
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)# shape: (batch size, class num)
weight = (self.beta**2)/(1+self.beta**2)
asym = (tp + self.smooth) / (tp + weight*fn + (1-weight)*fp + self.smooth)
if not self.do_bg:
if self.batch_dice:
asym = asym[1:]
else:
asym = asym[:, 1:]
asym = asym.mean()
return -asym
class DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum"):
super(DC_and_CE_loss, self).__init__()
self.aggregate = aggregate
self.ce = CrossentropyND(**ce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class PenaltyGDiceLoss(nn.Module):
"""
paper: https://openreview.net/forum?id=H1lTh8unKN
"""
def __init__(self, gdice_kwargs):
super(PenaltyGDiceLoss, self).__init__()
self.k = 2.5
self.gdc = GDiceLoss(apply_nonlin=softmax_helper, **gdice_kwargs)
def forward(self, net_output, target):
gdc_loss = self.gdc(net_output, target)
penalty_gdc = gdc_loss / (1 + self.k * (1 - gdc_loss))
return penalty_gdc
class DC_and_topk_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum"):
super(DC_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
class ExpLog_loss(nn.Module):
"""
paper: 3D Segmentation with Exponential Logarithmic Loss for Highly Unbalanced Object Sizes
https://arxiv.org/pdf/1809.00076.pdf
"""
def __init__(self, soft_dice_kwargs, wce_kwargs, gamma=0.3):
super(ExpLog_loss, self).__init__()
self.wce = WeightedCrossEntropyLoss(**wce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
self.gamma = gamma
def forward(self, net_output, target):
dc_loss = -self.dc(net_output, target) # weight=0.8
wce_loss = self.wce(net_output, target) # weight=0.2
# with torch.no_grad():
# print('dc loss:', dc_loss.cpu().numpy(), 'ce loss:', ce_loss.cpu().numpy())
# a = torch.pow(-torch.log(torch.clamp(dc_loss, 1e-6)), self.gamma)
# b = torch.pow(-torch.log(torch.clamp(ce_loss, 1e-6)), self.gamma)
# print('ExpLog dc loss:', a.cpu().numpy(), 'ExpLogce loss:', b.cpu().numpy())
# print('*'*20)
explog_loss = 0.8*torch.pow(-torch.log(torch.clamp(dc_loss, 1e-6)), self.gamma) + \
0.2*wce_loss
return explog_loss
|
"""
get_tp_fp_fn, SoftDiceLoss, and DC_and_CE/TopK_loss are from https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/training/loss_functions
"""
import torch
from .ND_Crossentropy import CrossentropyND, TopKLoss, WeightedCrossEntropyLoss
from torch import nn
from torch.autograd import Variable
from torch import einsum
import numpy as np
import monai.metrics as monmet
import monai.transforms as montran
def softmax_helper(x):
# copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/nd_softmax.py
rpt = [1 for _ in range(len(x.size()))]
rpt[1] = x.size(1)
x_max = x.max(1, keepdim=True)[0].repeat(*rpt)
e_x = torch.exp(x - x_max)
return e_x / e_x.sum(1, keepdim=True).repeat(*rpt)
def sum_tensor(inp, axes, keepdim=False):
# copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/tensor_utilities.py
axes = np.unique(axes).astype(int)
if keepdim:
for ax in axes:
inp = inp.sum(int(ax), keepdim=True)
else:
for ax in sorted(axes, reverse=True):
inp = inp.sum(int(ax))
return inp
def get_tp_fp_fn(net_output, gt, axes=None, mask=None, square=False):
"""
net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return:
"""
if axes is None:
axes = tuple(range(2, len(net_output.size())))
shp_x = net_output.shape
shp_y = gt.shape
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
tp = net_output * y_onehot
fp = net_output * (1 - y_onehot)
fn = (1 - net_output) * y_onehot
if mask is not None:
tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)
fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)
fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)
if square:
tp = tp ** 2
fp = fp ** 2
fn = fn ** 2
tp = sum_tensor(tp, axes, keepdim=False)
fp = sum_tensor(fp, axes, keepdim=False)
fn = sum_tensor(fn, axes, keepdim=False)
return tp, fp, fn
class GDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-5):
"""
Generalized Dice;
Copy from: https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L29
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super(GDiceLoss, self).__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, net_output, gt):
shp_x = net_output.shape # (batch size,class_num,x,y,z)
shp_y = gt.shape # (batch size,1,x,y,z)
# one hot code for gt
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
softmax_output = self.apply_nonlin(net_output)
# copy from https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L29
w: torch.Tensor = 1 / (einsum("bcxyz->bc", y_onehot).type(torch.float32) + 1e-10)**2
intersection: torch.Tensor = w * einsum("bcxyz, bcxyz->bc", softmax_output, y_onehot)
union: torch.Tensor = w * (einsum("bcxyz->bc", softmax_output) + einsum("bcxyz->bc", y_onehot))
divided: torch.Tensor = 1 - 2 * (einsum("bc->b", intersection) + self.smooth) / (einsum("bc->b", union) + self.smooth)
gdc = divided.mean()
return gdc
def flatten(tensor):
"""Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W)
"""
C = tensor.size(1)
# new axis order
axis_order = (1, 0) + tuple(range(2, tensor.dim()))
# Transpose: (N, C, D, H, W) -> (C, N, D, H, W)
transposed = tensor.permute(axis_order).contiguous()
# Flatten: (C, N, D, H, W) -> (C, N * D * H * W)
return transposed.view(C, -1)
class GDiceLossV2(nn.Module):
def __init__(self, apply_nonlin=None, smooth=1e-5):
"""
Generalized Dice;
Copy from: https://github.com/wolny/pytorch-3dunet/blob/6e5a24b6438f8c631289c10638a17dea14d42051/unet3d/losses.py#L75
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279
"""
super(GDiceLossV2, self).__init__()
self.apply_nonlin = apply_nonlin
self.smooth = smooth
def forward(self, net_output, gt):
shp_x = net_output.shape # (batch size,class_num,x,y,z)
shp_y = gt.shape # (batch size,1,x,y,z)
# one hot code for gt
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
if self.apply_nonlin is not None:
softmax_output = self.apply_nonlin(net_output)
input = flatten(softmax_output)
target = flatten(y_onehot)
target = target.float()
target_sum = target.sum(-1)
class_weights = Variable(1. / (target_sum * target_sum).clamp(min=self.smooth), requires_grad=False)
intersect = (input * target).sum(-1) * class_weights
intersect = intersect.sum()
denominator = ((input + target).sum(-1) * class_weights).sum()
return 1. - 2. * intersect / denominator.clamp(min=self.smooth)
class SSLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False):
"""
Sensitivity-Specifity loss
paper: http://www.rogertam.ca/Brosch_MICCAI_2015.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/df0f86733357fdc92bbc191c8fec0dcf49aa5499/niftynet/layer/loss_segmentation.py#L392
"""
super(SSLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.r = 0.1 # weight parameter in SS paper
def forward(self, net_output, gt, loss_mask=None):
shp_x = net_output.shape
shp_y = gt.shape
# class_num = shp_x[1]
with torch.no_grad():
if len(shp_x) != len(shp_y):
gt = gt.view((shp_y[0], 1, *shp_y[1:]))
if all([i == j for i, j in zip(net_output.shape, gt.shape)]):
# if this is the case then gt is probably already a one hot encoding
y_onehot = gt
else:
gt = gt.long()
y_onehot = torch.zeros(shp_x)
if net_output.device.type == "cuda":
y_onehot = y_onehot.cuda(net_output.device.index)
y_onehot.scatter_(1, gt, 1)
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
softmax_output = self.apply_nonlin(net_output)
# no object value
bg_onehot = 1 - y_onehot
squared_error = (y_onehot - softmax_output)**2
specificity_part = sum_tensor(squared_error*y_onehot, axes)/(sum_tensor(y_onehot, axes)+self.smooth)
sensitivity_part = sum_tensor(squared_error*bg_onehot, axes)/(sum_tensor(bg_onehot, axes)+self.smooth)
ss = self.r * specificity_part + (1-self.r) * sensitivity_part
if not self.do_bg:
if self.batch_dice:
ss = ss[1:]
else:
ss = ss[:, 1:]
ss = ss.mean()
return ss
class SoftDiceLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, weight=None):
"""
paper: https://arxiv.org/pdf/1606.04797.pdf
"""
super(SoftDiceLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.weight=weight
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
dc = (2 * tp + self.smooth) / (2 * tp + fp + fn + self.smooth)
# class imbalance...
if self.weight is not None:
for i, val in enumerate(self.weight):
dc[:,i]*=val
if not self.do_bg:
# multiple by weights (class imbalance...)
if self.batch_dice:
dc = dc[1:]
else:
dc = dc[:, 1:]
dc = dc.mean()
return -dc
class IoULoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False, weight=None):
"""
paper: https://link.springer.com/chapter/10.1007/978-3-319-50835-1_22
"""
super(IoULoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.weight = weight
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
iou = (tp + self.smooth) / (tp + fp + fn + self.smooth)
if self.weight is not None:
for i, val in enumerate(self.weight):
tversky[:,i] *= val
if not self.do_bg:
if self.batch_dice:
iou = iou[1:]
else:
iou = iou[:, 1:]
iou = iou.mean()
return -iou
class TverskyLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=True, do_bg=True , smooth=1.,
square=False, weight=None):
"""
paper: https://arxiv.org/pdf/1706.05721.pdf
"""
super(TverskyLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.alpha = 0.3
self.beta = 0.7
self.weight = weight
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)
tversky = (tp + self.smooth) / (tp + self.alpha*fp + self.beta*fn + self.smooth)
if self.weight is not None:
for i, val in enumerate(self.weight):
tversky[:,i] *= val
if not self.do_bg:
if self.batch_dice:
tversky = tversky[1:]
else:
tversky = tversky[:, 1:]
tversky = tversky.mean()
return -tversky
class FocalTversky_loss(nn.Module):
"""
paper: https://arxiv.org/pdf/1810.07842.pdf
author code: https://github.com/nabsabraham/focal-tversky-unet/blob/347d39117c24540400dfe80d106d2fb06d2b99e1/losses.py#L65
"""
def __init__(self, tversky_kwargs, gamma=0.75):
super(FocalTversky_loss, self).__init__()
self.gamma = gamma
self.tversky = TverskyLoss(apply_nonlin=softmax_helper, **tversky_kwargs)
def forward(self, net_output, target):
tversky_loss = 1 + self.tversky(net_output, target) # = 1-tversky(net_output, target)
focal_tversky = torch.pow(tversky_loss, self.gamma)
return focal_tversky
# this is the function to one-hot-encode data
def onehot(outs, targ, argmax=True):
# Metrics requiring one hot encoded targets, pass through sigmoid or softmax
# convert to one hot encoded target...
shape = targ.size()
batch = shape[0]
# calculate argmax...
if argmax is True:
outs = torch.softmax(outs, dim=1)
outs = torch.argmax(outs, dim=1)
class_ = torch.max(targ)
print(class_)
if len(shape) == 4:
sh = (batch, class_ + 1, shape[1], shape[2], shape[3])
targets_dice = torch.zeros(sh, dtype=torch.float)
targets_out = torch.zeros(sh, dtype=torch.float)
for i in range( class_ + 1):
targets_dice[:, i][targ == i] = 1
targets_out[:, i][outs == i] = 1
return targets_out, targets_dice
class HD_Loss3D(nn.Module):
def __init__(self, percentile=95, gamma=0.75):
super(HD_Loss3D, self).__init__()
self.gamma = gamma
self.percentile = percentile
def forward(self, net_output, target):
# one_hot_encode, pass through loss...
net_output, target = onehot(net_output, target)
out = monmet.compute_hausdorff_distance(net_output, target, percentile=self.percentile,
include_background=True)
hd = torch.pow(out, .25) # hyperparameter, can varry...
hd = hd[~torch.isnan(hd)]
hd_max = hd.max()*.25
# can also implement topK HD loss...
return hd.mean() , hd_max
class FocalTversky_and_topk_loss(nn.Module):
def __init__(self, tversky_kwargs, ce_kwargs, aggregate="sumcorrect", gamma=0.75):
super(FocalTversky_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
self.ft = FocalTversky_loss(tversky_kwargs, gamma=gamma)
self.hd = HD_Loss3D(gamma=gamma)
# self.ad = montran.AsDiscrete(argmax)
def forward(self, net_output, target):
ft_loss = 0
s = net_output.size()
for i in range(s[0]):
ft_loss += self.ft(net_output[i].unsqueeze(0),target[i].unsqueeze(0))
ft_loss/=s[0]
ft_loss = self.ft(net_output,target)
ce_loss = self.ce(net_output, target)
# use this instead of ft_loss in second round of finetuning...
hd_loss, hd_max = self.hd(net_output, target)
# this was used for fold 0 (added hd_loss ONLY) ...changed parameters (version_2617993)
# fold 1 no hd_loss added (turing test completed with this...) ...used for turing... (version_2784634)
# fold 2 (added hd_loss and corrected with hd_max term...) (version_2770034)
# fold 3 same as fold 0 ... (version_2771355)
# fold 4 same as fold 2 (version_2784520)
# if hd_loss > 10.:
# hd_loss = 10.
# we need to use a switch here, after convergence in dice, change to HD
# minimization.
if self.aggregate == "sum":
result = ce_loss + ft_loss + hd_loss
elif self.aggregate == "sumcorrect":
result = ce_loss + ft_loss + hd_loss + (hd_max/hd_loss)
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
class AsymLoss(nn.Module):
def __init__(self, apply_nonlin=None, batch_dice=False, do_bg=True, smooth=1.,
square=False):
"""
paper: https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8573779
"""
super(AsymLoss, self).__init__()
self.square = square
self.do_bg = do_bg
self.batch_dice = batch_dice
self.apply_nonlin = apply_nonlin
self.smooth = smooth
self.beta = 1.5
def forward(self, x, y, loss_mask=None):
shp_x = x.shape
if self.batch_dice:
axes = [0] + list(range(2, len(shp_x)))
else:
axes = list(range(2, len(shp_x)))
if self.apply_nonlin is not None:
x = self.apply_nonlin(x)
tp, fp, fn = get_tp_fp_fn(x, y, axes, loss_mask, self.square)# shape: (batch size, class num)
weight = (self.beta**2)/(1+self.beta**2)
asym = (tp + self.smooth) / (tp + weight*fn + (1-weight)*fp + self.smooth)
if not self.do_bg:
if self.batch_dice:
asym = asym[1:]
else:
asym = asym[:, 1:]
asym = asym.mean()
return -asym
class DC_and_CE_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum"):
super(DC_and_CE_loss, self).__init__()
self.aggregate = aggregate
self.ce = CrossentropyND(**ce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later)
return result
class PenaltyGDiceLoss(nn.Module):
"""
paper: https://openreview.net/forum?id=H1lTh8unKN
"""
def __init__(self, gdice_kwargs):
super(PenaltyGDiceLoss, self).__init__()
self.k = 2.5
self.gdc = GDiceLoss(apply_nonlin=softmax_helper, **gdice_kwargs)
def forward(self, net_output, target):
gdc_loss = self.gdc(net_output, target)
penalty_gdc = gdc_loss / (1 + self.k * (1 - gdc_loss))
return penalty_gdc
class DC_and_topk_loss(nn.Module):
def __init__(self, soft_dice_kwargs, ce_kwargs, aggregate="sum"):
super(DC_and_topk_loss, self).__init__()
self.aggregate = aggregate
self.ce = TopKLoss(**ce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
def forward(self, net_output, target):
dc_loss = self.dc(net_output, target)
ce_loss = self.ce(net_output, target)
if self.aggregate == "sum":
result = ce_loss + dc_loss
else:
raise NotImplementedError("nah son") # reserved for other stuff (later?)
return result
class ExpLog_loss(nn.Module):
"""
paper: 3D Segmentation with Exponential Logarithmic Loss for Highly Unbalanced Object Sizes
https://arxiv.org/pdf/1809.00076.pdf
"""
def __init__(self, soft_dice_kwargs, wce_kwargs, gamma=0.3):
super(ExpLog_loss, self).__init__()
self.wce = WeightedCrossEntropyLoss(**wce_kwargs)
self.dc = SoftDiceLoss(apply_nonlin=softmax_helper, **soft_dice_kwargs)
self.gamma = gamma
def forward(self, net_output, target):
dc_loss = -self.dc(net_output, target) # weight=0.8
wce_loss = self.wce(net_output, target) # weight=0.2
# with torch.no_grad():
# print('dc loss:', dc_loss.cpu().numpy(), 'ce loss:', ce_loss.cpu().numpy())
# a = torch.pow(-torch.log(torch.clamp(dc_loss, 1e-6)), self.gamma)
# b = torch.pow(-torch.log(torch.clamp(ce_loss, 1e-6)), self.gamma)
# print('ExpLog dc loss:', a.cpu().numpy(), 'ExpLogce loss:', b.cpu().numpy())
# print('*'*20)
explog_loss = 0.8*torch.pow(-torch.log(torch.clamp(dc_loss, 1e-6)), self.gamma) + \
0.2*wce_loss
return explog_loss
|
en
| 0.671026
|
get_tp_fp_fn, SoftDiceLoss, and DC_and_CE/TopK_loss are from https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/training/loss_functions # copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/nd_softmax.py # copy from: https://github.com/MIC-DKFZ/nnUNet/blob/master/nnunet/utilities/tensor_utilities.py net_output must be (b, c, x, y(, z)))
gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))
if mask is provided it must have shape (b, 1, x, y(, z)))
:param net_output:
:param gt:
:param axes:
:param mask: mask must be 1 for valid pixels and 0 for invalid pixels
:param square: if True then fp, tp and fn will be squared before summation
:return: # if this is the case then gt is probably already a one hot encoding Generalized Dice;
Copy from: https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L29
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279 # (batch size,class_num,x,y,z) # (batch size,1,x,y,z) # one hot code for gt # if this is the case then gt is probably already a one hot encoding # copy from https://github.com/LIVIAETS/surface-loss/blob/108bd9892adca476e6cdf424124bc6268707498e/losses.py#L29 Flattens a given tensor such that the channel axis is first.
The shapes are transformed as follows:
(N, C, D, H, W) -> (C, N * D * H * W) # new axis order # Transpose: (N, C, D, H, W) -> (C, N, D, H, W) # Flatten: (C, N, D, H, W) -> (C, N * D * H * W) Generalized Dice;
Copy from: https://github.com/wolny/pytorch-3dunet/blob/6e5a24b6438f8c631289c10638a17dea14d42051/unet3d/losses.py#L75
paper: https://arxiv.org/pdf/1707.03237.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/dev/niftynet/layer/loss_segmentation.py#L279 # (batch size,class_num,x,y,z) # (batch size,1,x,y,z) # one hot code for gt # if this is the case then gt is probably already a one hot encoding Sensitivity-Specifity loss
paper: http://www.rogertam.ca/Brosch_MICCAI_2015.pdf
tf code: https://github.com/NifTK/NiftyNet/blob/df0f86733357fdc92bbc191c8fec0dcf49aa5499/niftynet/layer/loss_segmentation.py#L392 # weight parameter in SS paper # class_num = shp_x[1] # if this is the case then gt is probably already a one hot encoding # no object value paper: https://arxiv.org/pdf/1606.04797.pdf # class imbalance... # multiple by weights (class imbalance...) paper: https://link.springer.com/chapter/10.1007/978-3-319-50835-1_22 paper: https://arxiv.org/pdf/1706.05721.pdf paper: https://arxiv.org/pdf/1810.07842.pdf
author code: https://github.com/nabsabraham/focal-tversky-unet/blob/347d39117c24540400dfe80d106d2fb06d2b99e1/losses.py#L65 # = 1-tversky(net_output, target) # this is the function to one-hot-encode data # Metrics requiring one hot encoded targets, pass through sigmoid or softmax # convert to one hot encoded target... # calculate argmax... # one_hot_encode, pass through loss... # hyperparameter, can varry... # can also implement topK HD loss... # self.ad = montran.AsDiscrete(argmax) # use this instead of ft_loss in second round of finetuning... # this was used for fold 0 (added hd_loss ONLY) ...changed parameters (version_2617993) # fold 1 no hd_loss added (turing test completed with this...) ...used for turing... (version_2784634) # fold 2 (added hd_loss and corrected with hd_max term...) (version_2770034) # fold 3 same as fold 0 ... (version_2771355) # fold 4 same as fold 2 (version_2784520) # if hd_loss > 10.: # hd_loss = 10. # we need to use a switch here, after convergence in dice, change to HD # minimization. # reserved for other stuff (later?) paper: https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8573779 # shape: (batch size, class num) # reserved for other stuff (later) paper: https://openreview.net/forum?id=H1lTh8unKN # reserved for other stuff (later?) paper: 3D Segmentation with Exponential Logarithmic Loss for Highly Unbalanced Object Sizes
https://arxiv.org/pdf/1809.00076.pdf # weight=0.8 # weight=0.2 # with torch.no_grad(): # print('dc loss:', dc_loss.cpu().numpy(), 'ce loss:', ce_loss.cpu().numpy()) # a = torch.pow(-torch.log(torch.clamp(dc_loss, 1e-6)), self.gamma) # b = torch.pow(-torch.log(torch.clamp(ce_loss, 1e-6)), self.gamma) # print('ExpLog dc loss:', a.cpu().numpy(), 'ExpLogce loss:', b.cpu().numpy()) # print('*'*20)
| 2.060425
| 2
|
Calibrador.py
|
osmarnds/DeMOLidor
| 1
|
6627126
|
<filename>Calibrador.py
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#######################################################################
##### #####
##### <NAME> #####
##### <EMAIL> #####
##### 09/24/2018 #####
##### LABIO - PUCRS #####
##### Real-time "Color Calibrator" #####
##### #####
#######################################################################
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import os
import pygame
import sys
import itertools as it
from contextlib import contextmanager
import numpy as np
import cv2 as cv
import cv2
import tkinter as tk
import RPi.GPIO as gpio
import matplotlib.pyplot as plt
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
############################ Controller ##############################
cal = 0
pwm = 0
graph = 0
window = 1
def nothing(x):
pass
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
#Configuring don’t show warnings
gpio.setwarnings(False)
#Configuring GPIO
gpio.setmode(gpio.BOARD)
gpio.setup(38,gpio.OUT)
gpio.setup(36,gpio.OUT)
gpio.setup(40,gpio.OUT)
LDR_1 = 7
LDR_2 = 11
LDR_3 = 13
#Configure the pwm objects and initialize its value
pwm36 = gpio.PWM(36,100)
pwm36.start(100)
time.sleep(1)
pwm38 = gpio.PWM(38,100)
pwm38.start(0)
time.sleep(1)
pwm40 = gpio.PWM(40,100)
pwm40.start(0)
time.sleep(1)
pwm38.start(100)
time.sleep(1)
pwm40.start(100)
time.sleep(1)
#Create the dutycycle variables
dc36 = 0
dc38 = 0
dc40 = 0
pygame.mixer.init()
pygame.mixer.music.load("/home/pi/Documents/Demolidor/Testes/audio/calibrador_ini.mp3")
pygame.mixer.music.play()
print('inicialized')
####################### LDR 1 ################################
def ldr_1(LDR_1):
count = 0
#Output on the pin for
gpio.setup(LDR_1, gpio.OUT)
gpio.output(LDR_1, gpio.LOW)
time.sleep(0.01)
#Change the pin back to input
gpio.setup(LDR_1, gpio.IN)
while (gpio.input(LDR_1) == gpio.LOW):
count += 1
return count
####################### LDR 2 ################################
def ldr_2(LDR_2):
count = 0
#Output on the pin for
gpio.setup(LDR_2, gpio.OUT)
gpio.output(LDR_2, gpio.LOW)
time.sleep(0.001)
#Change the pin back to input
gpio.setup(LDR_2, gpio.IN)
while (gpio.input(LDR_2) == gpio.LOW):
count += 1
return count
####################### LDR 3 ################################
def ldr_3(LDR_3):
count = 0
#Output on the pin for
gpio.setup(LDR_3, gpio.OUT)
gpio.output(LDR_3, gpio.LOW)
time.sleep(0.001)
#Change the pin back to input
gpio.setup(LDR_3, gpio.IN)
while (gpio.input(LDR_3) == gpio.LOW):
count += 1
return count
####################### LDR ################################
LDR1 = []
LDR2 = []
LDR3 = []
MED = []
####################### MMA ################################
def med(MED):
n = 9
soma = []
somatorio = 0
MMA = 0
for i in range(0, n):
soma.append(ldr_1(LDR_1))
time.sleep(0.0001)
#print ('soma: ',soma)
somatorio = (sum(soma))
#print ('soma todos ele: ',somatorio)
MMA = (somatorio / len(soma))
#print ('MMA: ',int(MMA))
#time.sleep(0.004)
return int(MMA)
####################### MMA ################################
def plotNow():
plt.clf()
plt.ylim(0,3000)
plt.title('LDR Sensor')
plt.grid(True)
plt.ylabel('Value')
plt.xlabel('Time(ms)')
plt.plot(LDR1, 'r-', label='LDR_1')
plt.plot(LDR2, 'b-', label='LDR_2')
plt.plot(LDR3, 'y-', label='LDR_3')
plt.plot(MED, 'g-', label='MMA_9')
plt.legend(loc='upper right')
plt.pause(0.01)
plt.show()
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
cal = ldr_1(LDR_1)/1000
#print(cal)
#print(int(cal))
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
#lower_blue = np.array([l_h, l_s, l_v*int(cal)+1])
lower_blue = np.array([l_h, l_s, l_v])
upper_blue = np.array([u_h, u_s, u_v])
#lower_blue = np.array([l_h, l_s, l_v*int(cal)+1])
#upper_blue = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(image, image, mask=mask)
#print ('MMA_1: ',med(MED))
if pwm == 1 :
'''
if med(MED) > 2500:
pwm38.ChangeDutyCycle(0)
#print('Intensidade 100')
elif med(MED) < 2500 and med(MED) > 2000:
pwm38.ChangeDutyCycle(30)
#print('Intensidade 70')
elif med(MED) < 2000 and med(MED) > 1500:
pwm38.ChangeDutyCycle(60)
#print('Intensidade 40')
elif med(MED) < 1500 and med(MED) > 1000:
pwm38.ChangeDutyCycle(90)
#print('Intensidade 10')
elif med(MED) < 1000 and med(MED) > 0:
pwm38.ChangeDutyCycle(100)
#print('Intensidade 0')
'''
if ldr_1(LDR_1) > 2500:
pwm38.ChangeDutyCycle(0)
print('Intensidade 100')
elif ldr_1(LDR_1) < 2500 and ldr_1(LDR_1) > 2000:
pwm38.ChangeDutyCycle(30)
print('Intensidade 70')
elif ldr_1(LDR_1) < 2000 and ldr_1(LDR_1) > 1500:
pwm38.ChangeDutyCycle(60)
print('Intensidade 40')
elif ldr_1(LDR_1) < 1500 and ldr_1(LDR_1) > 1000:
pwm38.ChangeDutyCycle(90)
print('Intensidade 10')
elif ldr_1(LDR_1) < 1000 and ldr_1(LDR_1) > 0:
pwm38.ChangeDutyCycle(100)
print('Intensidade 0')
if graph == 1:
plt.ion()
osvalue = os.popen('vcgencmd measure_value').readline()
value = (osvalue.replace("value=", "").replace("'C\n", ""))
LDR1.append(ldr_1(LDR_1))
#LDR2.append(ldr_2(LDR_2))
#LDR3.append(ldr_3(LDR_3))
MED.append(med(MED))
plotNow()
#cv2.putText(image, "LDR_1: "+str(ldr_1(LDR_1)),(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_2: "+str(ldr_2(LDR_2)),(10,60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_3: "+str(ldr_3(LDR_3)),(10,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "Fator multiplicador: "+str(cal),(10,120),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_1 compensado: " +str(l_v*int(cal)),(140,60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_1 REAL: " +str(l_v),(140,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "MMA LDR_1: " +str(med(MED)),(140,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
if window == 1:
cv2.imshow('Real image', image)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
gpio.cleanup()
exit()
if key == ord("t"):
print('ldr_1 : ' , ldr_1(LDR_1))
print('ldr_2 : ' , ldr_2(LDR_2))
print('ldr_3 : ' , ldr_3(LDR_3))
print('MMA LDR_1 : ' , med(MED))
print('\n')
if key == ord("1"):
print('DC 38 ON')
pwm38.ChangeDutyCycle(dc38)
dc38 = 0
if key == ord("2"):
print('DC 38 OFF')
pwm38.start(dc38)
dc38 = 100
if key == ord("3"):
print('DC 40 ON')
pwm40.start(dc40)
dc40 = 0
if key == ord("4"):
print('DC 40 OFF')
pwm40.start(dc40)
dc40 = 100
|
<filename>Calibrador.py
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#######################################################################
##### #####
##### <NAME> #####
##### <EMAIL> #####
##### 09/24/2018 #####
##### LABIO - PUCRS #####
##### Real-time "Color Calibrator" #####
##### #####
#######################################################################
# import the necessary packages
from picamera.array import PiRGBArray
from picamera import PiCamera
import time
import os
import pygame
import sys
import itertools as it
from contextlib import contextmanager
import numpy as np
import cv2 as cv
import cv2
import tkinter as tk
import RPi.GPIO as gpio
import matplotlib.pyplot as plt
# initialize the camera and grab a reference to the raw camera capture
camera = PiCamera()
camera.resolution = (640, 480)
camera.framerate = 32
rawCapture = PiRGBArray(camera, size=(640, 480))
time.sleep(0.1)
############################ Controller ##############################
cal = 0
pwm = 0
graph = 0
window = 1
def nothing(x):
pass
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
#Configuring don’t show warnings
gpio.setwarnings(False)
#Configuring GPIO
gpio.setmode(gpio.BOARD)
gpio.setup(38,gpio.OUT)
gpio.setup(36,gpio.OUT)
gpio.setup(40,gpio.OUT)
LDR_1 = 7
LDR_2 = 11
LDR_3 = 13
#Configure the pwm objects and initialize its value
pwm36 = gpio.PWM(36,100)
pwm36.start(100)
time.sleep(1)
pwm38 = gpio.PWM(38,100)
pwm38.start(0)
time.sleep(1)
pwm40 = gpio.PWM(40,100)
pwm40.start(0)
time.sleep(1)
pwm38.start(100)
time.sleep(1)
pwm40.start(100)
time.sleep(1)
#Create the dutycycle variables
dc36 = 0
dc38 = 0
dc40 = 0
pygame.mixer.init()
pygame.mixer.music.load("/home/pi/Documents/Demolidor/Testes/audio/calibrador_ini.mp3")
pygame.mixer.music.play()
print('inicialized')
####################### LDR 1 ################################
def ldr_1(LDR_1):
count = 0
#Output on the pin for
gpio.setup(LDR_1, gpio.OUT)
gpio.output(LDR_1, gpio.LOW)
time.sleep(0.01)
#Change the pin back to input
gpio.setup(LDR_1, gpio.IN)
while (gpio.input(LDR_1) == gpio.LOW):
count += 1
return count
####################### LDR 2 ################################
def ldr_2(LDR_2):
count = 0
#Output on the pin for
gpio.setup(LDR_2, gpio.OUT)
gpio.output(LDR_2, gpio.LOW)
time.sleep(0.001)
#Change the pin back to input
gpio.setup(LDR_2, gpio.IN)
while (gpio.input(LDR_2) == gpio.LOW):
count += 1
return count
####################### LDR 3 ################################
def ldr_3(LDR_3):
count = 0
#Output on the pin for
gpio.setup(LDR_3, gpio.OUT)
gpio.output(LDR_3, gpio.LOW)
time.sleep(0.001)
#Change the pin back to input
gpio.setup(LDR_3, gpio.IN)
while (gpio.input(LDR_3) == gpio.LOW):
count += 1
return count
####################### LDR ################################
LDR1 = []
LDR2 = []
LDR3 = []
MED = []
####################### MMA ################################
def med(MED):
n = 9
soma = []
somatorio = 0
MMA = 0
for i in range(0, n):
soma.append(ldr_1(LDR_1))
time.sleep(0.0001)
#print ('soma: ',soma)
somatorio = (sum(soma))
#print ('soma todos ele: ',somatorio)
MMA = (somatorio / len(soma))
#print ('MMA: ',int(MMA))
#time.sleep(0.004)
return int(MMA)
####################### MMA ################################
def plotNow():
plt.clf()
plt.ylim(0,3000)
plt.title('LDR Sensor')
plt.grid(True)
plt.ylabel('Value')
plt.xlabel('Time(ms)')
plt.plot(LDR1, 'r-', label='LDR_1')
plt.plot(LDR2, 'b-', label='LDR_2')
plt.plot(LDR3, 'y-', label='LDR_3')
plt.plot(MED, 'g-', label='MMA_9')
plt.legend(loc='upper right')
plt.pause(0.01)
plt.show()
# capture frames from the camera
for frame in camera.capture_continuous(rawCapture, format="bgr", use_video_port=True):
image = frame.array
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
cal = ldr_1(LDR_1)/1000
#print(cal)
#print(int(cal))
l_h = cv2.getTrackbarPos("L - H", "Trackbars")
l_s = cv2.getTrackbarPos("L - S", "Trackbars")
l_v = cv2.getTrackbarPos("L - V", "Trackbars")
u_h = cv2.getTrackbarPos("U - H", "Trackbars")
u_s = cv2.getTrackbarPos("U - S", "Trackbars")
u_v = cv2.getTrackbarPos("U - V", "Trackbars")
#lower_blue = np.array([l_h, l_s, l_v*int(cal)+1])
lower_blue = np.array([l_h, l_s, l_v])
upper_blue = np.array([u_h, u_s, u_v])
#lower_blue = np.array([l_h, l_s, l_v*int(cal)+1])
#upper_blue = np.array([u_h, u_s, u_v])
mask = cv2.inRange(hsv, lower_blue, upper_blue)
result = cv2.bitwise_and(image, image, mask=mask)
#print ('MMA_1: ',med(MED))
if pwm == 1 :
'''
if med(MED) > 2500:
pwm38.ChangeDutyCycle(0)
#print('Intensidade 100')
elif med(MED) < 2500 and med(MED) > 2000:
pwm38.ChangeDutyCycle(30)
#print('Intensidade 70')
elif med(MED) < 2000 and med(MED) > 1500:
pwm38.ChangeDutyCycle(60)
#print('Intensidade 40')
elif med(MED) < 1500 and med(MED) > 1000:
pwm38.ChangeDutyCycle(90)
#print('Intensidade 10')
elif med(MED) < 1000 and med(MED) > 0:
pwm38.ChangeDutyCycle(100)
#print('Intensidade 0')
'''
if ldr_1(LDR_1) > 2500:
pwm38.ChangeDutyCycle(0)
print('Intensidade 100')
elif ldr_1(LDR_1) < 2500 and ldr_1(LDR_1) > 2000:
pwm38.ChangeDutyCycle(30)
print('Intensidade 70')
elif ldr_1(LDR_1) < 2000 and ldr_1(LDR_1) > 1500:
pwm38.ChangeDutyCycle(60)
print('Intensidade 40')
elif ldr_1(LDR_1) < 1500 and ldr_1(LDR_1) > 1000:
pwm38.ChangeDutyCycle(90)
print('Intensidade 10')
elif ldr_1(LDR_1) < 1000 and ldr_1(LDR_1) > 0:
pwm38.ChangeDutyCycle(100)
print('Intensidade 0')
if graph == 1:
plt.ion()
osvalue = os.popen('vcgencmd measure_value').readline()
value = (osvalue.replace("value=", "").replace("'C\n", ""))
LDR1.append(ldr_1(LDR_1))
#LDR2.append(ldr_2(LDR_2))
#LDR3.append(ldr_3(LDR_3))
MED.append(med(MED))
plotNow()
#cv2.putText(image, "LDR_1: "+str(ldr_1(LDR_1)),(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_2: "+str(ldr_2(LDR_2)),(10,60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_3: "+str(ldr_3(LDR_3)),(10,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "Fator multiplicador: "+str(cal),(10,120),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_1 compensado: " +str(l_v*int(cal)),(140,60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "LDR_1 REAL: " +str(l_v),(140,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
#cv2.putText(image, "MMA LDR_1: " +str(med(MED)),(140,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1)
if window == 1:
cv2.imshow('Real image', image)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
key = cv2.waitKey(1) & 0xFF
# clear the stream in preparation for the next frame
rawCapture.truncate(0)
# if the `q` key was pressed, break from the loop
if key == ord("q"):
gpio.cleanup()
exit()
if key == ord("t"):
print('ldr_1 : ' , ldr_1(LDR_1))
print('ldr_2 : ' , ldr_2(LDR_2))
print('ldr_3 : ' , ldr_3(LDR_3))
print('MMA LDR_1 : ' , med(MED))
print('\n')
if key == ord("1"):
print('DC 38 ON')
pwm38.ChangeDutyCycle(dc38)
dc38 = 0
if key == ord("2"):
print('DC 38 OFF')
pwm38.start(dc38)
dc38 = 100
if key == ord("3"):
print('DC 40 ON')
pwm40.start(dc40)
dc40 = 0
if key == ord("4"):
print('DC 40 OFF')
pwm40.start(dc40)
dc40 = 100
|
en
| 0.204993
|
# -*- coding: utf-8 -*- #!/usr/bin/env python ####################################################################### ##### ##### ##### <NAME> ##### ##### <EMAIL> ##### ##### 09/24/2018 ##### ##### LABIO - PUCRS ##### ##### Real-time "Color Calibrator" ##### ##### ##### ####################################################################### # import the necessary packages # initialize the camera and grab a reference to the raw camera capture ############################ Controller ############################## #Configuring don’t show warnings #Configuring GPIO #Configure the pwm objects and initialize its value #Create the dutycycle variables ####################### LDR 1 ################################ #Output on the pin for #Change the pin back to input ####################### LDR 2 ################################ #Output on the pin for #Change the pin back to input ####################### LDR 3 ################################ #Output on the pin for #Change the pin back to input ####################### LDR ################################ ####################### MMA ################################ #print ('soma: ',soma) #print ('soma todos ele: ',somatorio) #print ('MMA: ',int(MMA)) #time.sleep(0.004) ####################### MMA ################################ # capture frames from the camera #print(cal) #print(int(cal)) #lower_blue = np.array([l_h, l_s, l_v*int(cal)+1]) #lower_blue = np.array([l_h, l_s, l_v*int(cal)+1]) #upper_blue = np.array([u_h, u_s, u_v]) #print ('MMA_1: ',med(MED)) if med(MED) > 2500: pwm38.ChangeDutyCycle(0) #print('Intensidade 100') elif med(MED) < 2500 and med(MED) > 2000: pwm38.ChangeDutyCycle(30) #print('Intensidade 70') elif med(MED) < 2000 and med(MED) > 1500: pwm38.ChangeDutyCycle(60) #print('Intensidade 40') elif med(MED) < 1500 and med(MED) > 1000: pwm38.ChangeDutyCycle(90) #print('Intensidade 10') elif med(MED) < 1000 and med(MED) > 0: pwm38.ChangeDutyCycle(100) #print('Intensidade 0') #LDR2.append(ldr_2(LDR_2)) #LDR3.append(ldr_3(LDR_3)) #cv2.putText(image, "LDR_1: "+str(ldr_1(LDR_1)),(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1) #cv2.putText(image, "LDR_2: "+str(ldr_2(LDR_2)),(10,60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1) #cv2.putText(image, "LDR_3: "+str(ldr_3(LDR_3)),(10,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1) #cv2.putText(image, "Fator multiplicador: "+str(cal),(10,120),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1) #cv2.putText(image, "LDR_1 compensado: " +str(l_v*int(cal)),(140,60),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1) #cv2.putText(image, "LDR_1 REAL: " +str(l_v),(140,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1) #cv2.putText(image, "MMA LDR_1: " +str(med(MED)),(140,90),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255),1) # clear the stream in preparation for the next frame # if the `q` key was pressed, break from the loop
| 2.706042
| 3
|
solution/longestValidParentheses.py
|
ccwwonebyone/leetcode
| 0
|
6627127
|
from typing import List
class Solution:
def longestValidParentheses(self, s: str) -> int:
res = 0
stack = [-1]
for i, chars in enumerate(s):
if chars == "(":
stack.append(i)
else:
stack.pop()
if not stack:
stack.append(i)
else:
res = max(res, i - stack[-1])
return res
if __name__ == "__main__":
solution = Solution()
print(solution.longestValidParentheses("))(((())"))
|
from typing import List
class Solution:
def longestValidParentheses(self, s: str) -> int:
res = 0
stack = [-1]
for i, chars in enumerate(s):
if chars == "(":
stack.append(i)
else:
stack.pop()
if not stack:
stack.append(i)
else:
res = max(res, i - stack[-1])
return res
if __name__ == "__main__":
solution = Solution()
print(solution.longestValidParentheses("))(((())"))
|
none
| 1
| 3.454796
| 3
|
|
4/oop16.py
|
ikramulkayes/Python_season2
| 0
|
6627128
|
class Author:
def __init__(self,*args):
if len(args)==0:
self.name = "Default"
self.books = []
elif len(args) == 1:
self.name = args[0]
self.books = []
else:
self.name = args[0]
self.books = args[1::]
self.books = list(self.books)
def addBooks(self,*args):
for elm in args:
self.books.append(elm)
def changeName(self,name):
self.name = name
def printDetails(self):
print("Author Name:",self.name)
print("--------")
print("List of Books:")
for elm in self.books:
print(elm)
auth1 = Author('<NAME>')
auth1.addBooks('Deyal', '<NAME>ari')
auth1.printDetails()
print("===================")
auth2 = Author()
print(auth2.name)
auth2.changeName('<NAME>')
auth2.addBooks('The Godfather', 'Omerta', 'The Sicilian')
print('===================')
auth2.printDetails()
print("===================")
auth3 = Author('<NAME>', 'The Alchemist', 'The Fifth Mountain')
auth3.printDetails()
|
class Author:
def __init__(self,*args):
if len(args)==0:
self.name = "Default"
self.books = []
elif len(args) == 1:
self.name = args[0]
self.books = []
else:
self.name = args[0]
self.books = args[1::]
self.books = list(self.books)
def addBooks(self,*args):
for elm in args:
self.books.append(elm)
def changeName(self,name):
self.name = name
def printDetails(self):
print("Author Name:",self.name)
print("--------")
print("List of Books:")
for elm in self.books:
print(elm)
auth1 = Author('<NAME>')
auth1.addBooks('Deyal', '<NAME>ari')
auth1.printDetails()
print("===================")
auth2 = Author()
print(auth2.name)
auth2.changeName('<NAME>')
auth2.addBooks('The Godfather', 'Omerta', 'The Sicilian')
print('===================')
auth2.printDetails()
print("===================")
auth3 = Author('<NAME>', 'The Alchemist', 'The Fifth Mountain')
auth3.printDetails()
|
none
| 1
| 3.833121
| 4
|
|
projects/bugs/example2.py
|
shreystechtips/pythonbytes
| 2
|
6627129
|
<reponame>shreystechtips/pythonbytes
# Same program as 'example.py' but fewer lines, some minor changes
import turtle
from turtle import Turtle
turtle.bgcolor(.95, .91, .85) # conda Turtle uses floats from 0.0 to 1.0 for rgb values
s, s2, epsilon, close_distance, a_cumulative, nCycles = 390., 390./2., 0.2, 0.21, 0., 0.
def ni(i): return (i+1)%4 # ni is 'next index' in the square of bugs 0, 1, 2, 3
a = [] # a, b, c, d --> a[] we create a list of turtles to simplify the code
for i in range(4): a.append(Turtle()); a[i].up(); a[i].speed(1000)
b = Turtle() # box > b
b.pencolor('black')
a[0].pencolor(1.0, 0.3, 0.3); a[1].pencolor(0.8, 0.9, 0.3); a[2].pencolor(0.3, 1.0, 0.3); a[3].pencolor(0.3, 0.8, 0.9)
a[0].setpos(-s2, s2); a[1].setpos(s2, s2); a[2].setpos(s2, -s2); a[3].setpos(-s2, -s2)
for i in range(4): a[i].down()
while True:
for i in range(4): a[i].setheading(a[i].towards(a[ni(i)]))
for i in range(4): a[i].forward(epsilon)
if int(nCycles) % int(s/(6.*epsilon)) == 0:
for i in range(4):
b.up(); b.setpos(a[i].pos()); b.setheading(b.towards(a[ni(i)]));
b.down(); b.forward(b.distance(a[ni(i)]))
nCycles += 1.
if a[0].distance(a[1]) < close_distance: break
print('Distance', "%.4f" % (nCycles * epsilon / s), 'in units of s')
|
# Same program as 'example.py' but fewer lines, some minor changes
import turtle
from turtle import Turtle
turtle.bgcolor(.95, .91, .85) # conda Turtle uses floats from 0.0 to 1.0 for rgb values
s, s2, epsilon, close_distance, a_cumulative, nCycles = 390., 390./2., 0.2, 0.21, 0., 0.
def ni(i): return (i+1)%4 # ni is 'next index' in the square of bugs 0, 1, 2, 3
a = [] # a, b, c, d --> a[] we create a list of turtles to simplify the code
for i in range(4): a.append(Turtle()); a[i].up(); a[i].speed(1000)
b = Turtle() # box > b
b.pencolor('black')
a[0].pencolor(1.0, 0.3, 0.3); a[1].pencolor(0.8, 0.9, 0.3); a[2].pencolor(0.3, 1.0, 0.3); a[3].pencolor(0.3, 0.8, 0.9)
a[0].setpos(-s2, s2); a[1].setpos(s2, s2); a[2].setpos(s2, -s2); a[3].setpos(-s2, -s2)
for i in range(4): a[i].down()
while True:
for i in range(4): a[i].setheading(a[i].towards(a[ni(i)]))
for i in range(4): a[i].forward(epsilon)
if int(nCycles) % int(s/(6.*epsilon)) == 0:
for i in range(4):
b.up(); b.setpos(a[i].pos()); b.setheading(b.towards(a[ni(i)]));
b.down(); b.forward(b.distance(a[ni(i)]))
nCycles += 1.
if a[0].distance(a[1]) < close_distance: break
print('Distance', "%.4f" % (nCycles * epsilon / s), 'in units of s')
|
en
| 0.744654
|
# Same program as 'example.py' but fewer lines, some minor changes # conda Turtle uses floats from 0.0 to 1.0 for rgb values # ni is 'next index' in the square of bugs 0, 1, 2, 3 # a, b, c, d --> a[] we create a list of turtles to simplify the code # box > b
| 3.448307
| 3
|
lib/django-1.4/django/contrib/gis/db/backends/oracle/models.py
|
MiCHiLU/google_appengine_sdk
| 790
|
6627130
|
"""
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'column_name'
def __unicode__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
@classmethod
def wkt_col(cls):
return 'wktext'
|
"""
The GeometryColumns and SpatialRefSys models for the Oracle spatial
backend.
It should be noted that Oracle Spatial does not have database tables
named according to the OGC standard, so the closest analogs are used.
For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns
model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model.
"""
from django.contrib.gis.db import models
from django.contrib.gis.db.backends.base import SpatialRefSysMixin
class GeometryColumns(models.Model):
"Maps to the Oracle USER_SDO_GEOM_METADATA table."
table_name = models.CharField(max_length=32)
column_name = models.CharField(max_length=1024)
srid = models.IntegerField(primary_key=True)
# TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY).
class Meta:
db_table = 'USER_SDO_GEOM_METADATA'
managed = False
@classmethod
def table_name_col(cls):
"""
Returns the name of the metadata column used to store the
the feature table name.
"""
return 'table_name'
@classmethod
def geom_col_name(cls):
"""
Returns the name of the metadata column used to store the
the feature geometry column.
"""
return 'column_name'
def __unicode__(self):
return '%s - %s (SRID: %s)' % (self.table_name, self.column_name, self.srid)
class SpatialRefSys(models.Model, SpatialRefSysMixin):
"Maps to the Oracle MDSYS.CS_SRS table."
cs_name = models.CharField(max_length=68)
srid = models.IntegerField(primary_key=True)
auth_srid = models.IntegerField()
auth_name = models.CharField(max_length=256)
wktext = models.CharField(max_length=2046)
# Optional geometry representing the bounds of this coordinate
# system. By default, all are NULL in the table.
cs_bounds = models.PolygonField(null=True)
objects = models.GeoManager()
class Meta:
db_table = 'CS_SRS'
managed = False
@property
def wkt(self):
return self.wktext
@classmethod
def wkt_col(cls):
return 'wktext'
|
en
| 0.757492
|
The GeometryColumns and SpatialRefSys models for the Oracle spatial backend. It should be noted that Oracle Spatial does not have database tables named according to the OGC standard, so the closest analogs are used. For example, the `USER_SDO_GEOM_METADATA` is used for the GeometryColumns model and the `SDO_COORD_REF_SYS` is used for the SpatialRefSys model. # TODO: Add support for `diminfo` column (type MDSYS.SDO_DIM_ARRAY). Returns the name of the metadata column used to store the the feature table name. Returns the name of the metadata column used to store the the feature geometry column. # Optional geometry representing the bounds of this coordinate # system. By default, all are NULL in the table.
| 2.398881
| 2
|
filters/oscillatory_hallucination/filter.py
|
raft001/NL-Augmenter
| 0
|
6627131
|
from collections import Counter
from nltk import ngrams
from transformers import BasicTokenizer
from interfaces.SentenceOperation import SentenceAndTargetOperation
from tasks.TaskTypes import TaskType
class OscillatoryHallucinationFilter(SentenceAndTargetOperation):
"""N-gram Count based Heuristic for Detecting Oscillatory Hallucinations
Paper: https://arxiv.org/pdf/2104.06683.pdf
The paper did not explicitly tokenize since IWSLT is available in tokenized form.
Tokenization used here = Whitespace + Punctuation, as in multilingual BERT pre-tokenization.
Finally, one more difference with the paper: Count Threshold is also set along with Difference Threshold.
Thresholds are set to very high values to ensure very High Precision of the Filter, but this can be tweaked.
"""
tasks = [TaskType.TEXT_TO_TEXT_GENERATION]
keywords = ["generation", "translation", "language-agnostic"]
def __init__(
self,
ngram_size=2,
count_threshold=10,
difference_threshold=5,
min_length_threshold=10,
):
super().__init__()
self.tokenizer = BasicTokenizer()
self.ngram_size = ngram_size
self.count_threshold = count_threshold
self.difference_threshold = difference_threshold
self.min_length_threshold = min_length_threshold
def filter(self, source: str = None, output: str = None) -> bool:
src = self.tokenizer.tokenize(source)
tgt = self.tokenizer.tokenize(output)
# Minimum Length Threshold is Not Really Required, Since Count Threshold will usually take care of it
# But still useful in practice, in case count threshold is too low
if len(tgt) < self.min_length_threshold:
return False
src_bigrams = ngrams(src, self.ngram_size)
src_max_bigram_count = Counter(src_bigrams).most_common(1)[0][1]
tgt_bigrams = ngrams(tgt, self.ngram_size)
tgt_max_bigram_count = Counter(tgt_bigrams).most_common(1)[0][1]
if (
tgt_max_bigram_count >= self.count_threshold
and (tgt_max_bigram_count - src_max_bigram_count)
> self.difference_threshold
):
return True
return False
|
from collections import Counter
from nltk import ngrams
from transformers import BasicTokenizer
from interfaces.SentenceOperation import SentenceAndTargetOperation
from tasks.TaskTypes import TaskType
class OscillatoryHallucinationFilter(SentenceAndTargetOperation):
"""N-gram Count based Heuristic for Detecting Oscillatory Hallucinations
Paper: https://arxiv.org/pdf/2104.06683.pdf
The paper did not explicitly tokenize since IWSLT is available in tokenized form.
Tokenization used here = Whitespace + Punctuation, as in multilingual BERT pre-tokenization.
Finally, one more difference with the paper: Count Threshold is also set along with Difference Threshold.
Thresholds are set to very high values to ensure very High Precision of the Filter, but this can be tweaked.
"""
tasks = [TaskType.TEXT_TO_TEXT_GENERATION]
keywords = ["generation", "translation", "language-agnostic"]
def __init__(
self,
ngram_size=2,
count_threshold=10,
difference_threshold=5,
min_length_threshold=10,
):
super().__init__()
self.tokenizer = BasicTokenizer()
self.ngram_size = ngram_size
self.count_threshold = count_threshold
self.difference_threshold = difference_threshold
self.min_length_threshold = min_length_threshold
def filter(self, source: str = None, output: str = None) -> bool:
src = self.tokenizer.tokenize(source)
tgt = self.tokenizer.tokenize(output)
# Minimum Length Threshold is Not Really Required, Since Count Threshold will usually take care of it
# But still useful in practice, in case count threshold is too low
if len(tgt) < self.min_length_threshold:
return False
src_bigrams = ngrams(src, self.ngram_size)
src_max_bigram_count = Counter(src_bigrams).most_common(1)[0][1]
tgt_bigrams = ngrams(tgt, self.ngram_size)
tgt_max_bigram_count = Counter(tgt_bigrams).most_common(1)[0][1]
if (
tgt_max_bigram_count >= self.count_threshold
and (tgt_max_bigram_count - src_max_bigram_count)
> self.difference_threshold
):
return True
return False
|
en
| 0.922208
|
N-gram Count based Heuristic for Detecting Oscillatory Hallucinations Paper: https://arxiv.org/pdf/2104.06683.pdf The paper did not explicitly tokenize since IWSLT is available in tokenized form. Tokenization used here = Whitespace + Punctuation, as in multilingual BERT pre-tokenization. Finally, one more difference with the paper: Count Threshold is also set along with Difference Threshold. Thresholds are set to very high values to ensure very High Precision of the Filter, but this can be tweaked. # Minimum Length Threshold is Not Really Required, Since Count Threshold will usually take care of it # But still useful in practice, in case count threshold is too low
| 2.803286
| 3
|
OutputImageAsPNGs.py
|
w326004741/MNIST-Data-Set-Problem-Sheet
| 0
|
6627132
|
#import OutputImageToConsole, python imaging library, numpy
import OutputImageToConsole as out
import PIL.Image as pil
import numpy as np
#defined function
def outImages(type):
#set if loop
if(type == 'test'):
#using len() return the length of an object(char,list,tuple,etc.) or number of items
for i in range(len(out.ReadDataFiles.test_images)):
#Convert PIL image.img is a PIL image
img = pil.fromarray(np.array(out.ReadDataFiles.test_images[i]))
lab = out.ReadDataFiles.test_labels[i]
#convert RGB mode
img = img.convert('RGB')
#save image as RGB mode and output specific file name.
imgname = '/Gitrepository/Read-Digits-Image-Files-Problem-sheet/test_images/test-'+str(i)+'-'+str(lab)+'.png'
#save imgname
img.save(imgname)
#set if loop
if(type == 'train'):
#using len() return the length of an object(char,list,tuple,etc.) or number of items
for i in range(len(out.ReadDataFiles.train_images)):
#Convert PIL image.img is a PIL image
img = pil.fromarray(np.array(out.ReadDataFiles.train_images[i]))
lab = out.ReadDataFiles.train_labels[i]
#convert RGB mode
img = img.convert('RGB')
#save image as RGB mode and output specific file name.
imgname = '/Gitrepository/Read-Digits-Image-Files-Problem-sheet/train_images/train-'+str(i)+'-'+str(lab)+'.png'
#save imgname
img.save(imgname)
#output images for specific file
outImages('test')
outImages('train')
|
#import OutputImageToConsole, python imaging library, numpy
import OutputImageToConsole as out
import PIL.Image as pil
import numpy as np
#defined function
def outImages(type):
#set if loop
if(type == 'test'):
#using len() return the length of an object(char,list,tuple,etc.) or number of items
for i in range(len(out.ReadDataFiles.test_images)):
#Convert PIL image.img is a PIL image
img = pil.fromarray(np.array(out.ReadDataFiles.test_images[i]))
lab = out.ReadDataFiles.test_labels[i]
#convert RGB mode
img = img.convert('RGB')
#save image as RGB mode and output specific file name.
imgname = '/Gitrepository/Read-Digits-Image-Files-Problem-sheet/test_images/test-'+str(i)+'-'+str(lab)+'.png'
#save imgname
img.save(imgname)
#set if loop
if(type == 'train'):
#using len() return the length of an object(char,list,tuple,etc.) or number of items
for i in range(len(out.ReadDataFiles.train_images)):
#Convert PIL image.img is a PIL image
img = pil.fromarray(np.array(out.ReadDataFiles.train_images[i]))
lab = out.ReadDataFiles.train_labels[i]
#convert RGB mode
img = img.convert('RGB')
#save image as RGB mode and output specific file name.
imgname = '/Gitrepository/Read-Digits-Image-Files-Problem-sheet/train_images/train-'+str(i)+'-'+str(lab)+'.png'
#save imgname
img.save(imgname)
#output images for specific file
outImages('test')
outImages('train')
|
en
| 0.582738
|
#import OutputImageToConsole, python imaging library, numpy #defined function #set if loop #using len() return the length of an object(char,list,tuple,etc.) or number of items #Convert PIL image.img is a PIL image #convert RGB mode #save image as RGB mode and output specific file name. #save imgname #set if loop #using len() return the length of an object(char,list,tuple,etc.) or number of items #Convert PIL image.img is a PIL image #convert RGB mode #save image as RGB mode and output specific file name. #save imgname #output images for specific file
| 3.169237
| 3
|
sysinv/sysinv/sysinv/sysinv/puppet/dcdbsync.py
|
albailey/config
| 10
|
6627133
|
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import utils
from sysinv.helm import helm
from sysinv.puppet import openstack
class DCDBsyncPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for dcdbsync
configuration"""
SERVICE_NAME = 'dcdbsync'
SERVICE_PORT = 8219
SERVICE_PATH = 'v1.0'
IDENTITY_SERVICE_NAME = 'keystone'
IDENTITY_SERVICE_DB = 'keystone'
def get_static_config(self):
dbuser = self._get_database_username(self.IDENTITY_SERVICE_NAME)
return {
'dcdbsync::db::postgresql::user': dbuser,
}
def get_secure_static_config(self):
dbpass = self._get_database_password(self.IDENTITY_SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
# initial bootstrap is bound to localhost
dburl = self._format_database_connection(self.IDENTITY_SERVICE_NAME,
constants.LOCALHOST_HOSTNAME,
database=self.IDENTITY_SERVICE_DB)
return {
'dcdbsync::database_connection': dburl,
'dcdbsync::db::postgresql::password': dbpass,
'dcdbsync::keystone::auth::password': kspass,
'dcdbsync::api::keystone_password': kspass,
}
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
# The region in which the identity server can be found
'dcdbsync::region_name': self._keystone_region_name(),
'dcdbsync::keystone::auth::public_url': self.get_public_url(),
'dcdbsync::keystone::auth::internal_url': self.get_internal_url(),
'dcdbsync::keystone::auth::admin_url': self.get_admin_url(),
'dcdbsync::keystone::auth::region': self._region_name(),
'dcdbsync::keystone::auth::auth_name': ksuser,
'dcdbsync::keystone::auth::auth_domain':
self._get_service_user_domain_name(),
'dcdbsync::keystone::auth::service_name': self.SERVICE_NAME,
'dcdbsync::keystone::auth::tenant': self._get_service_tenant_name(),
'dcdbsync::api::bind_host': self._get_management_address(),
'dcdbsync::api::keystone_auth_uri': self._keystone_auth_uri(),
'dcdbsync::api::keystone_identity_uri':
self._keystone_identity_uri(),
'dcdbsync::api::keystone_tenant': self._get_service_project_name(),
'dcdbsync::api::keystone_user_domain':
self._get_service_user_domain_name(),
'dcdbsync::api::keystone_project_domain':
self._get_service_project_domain_name(),
'dcdbsync::api::keystone_user': ksuser,
'platform::dcdbsync::params::region_name': self.get_region_name(),
'platform::dcdbsync::params::service_create':
self._to_create_services(),
}
if utils.is_openstack_applied(self.dbapi):
helm_data = helm.HelmOperatorData(self.dbapi)
# The dcdbsync instance for openstack is authenticated with
# pod based keystone.
endpoints_data = helm_data.get_keystone_endpoint_data()
service_config = {
'dcdbsync::openstack_init::region_name':
endpoints_data['region_name'],
'dcdbsync::openstack_api::keystone_auth_uri':
endpoints_data['endpoint_override'],
'dcdbsync::openstack_api::keystone_identity_uri':
endpoints_data['endpoint_override'],
}
config.update(service_config)
return config
def get_secure_system_config(self):
dbpass = self._get_database_password(self.IDENTITY_SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
config = {
'dcdbsync::database_connection':
self._format_database_connection(
self.IDENTITY_SERVICE_NAME,
database=self.IDENTITY_SERVICE_DB),
'dcdbsync::db::postgresql::password': dbpass,
'dcdbsync::keystone::auth::password': kspass,
'dcdbsync::api::keystone_password': kspass,
}
if utils.is_openstack_applied(self.dbapi):
helm_data = helm.HelmOperatorData(self.dbapi)
# The dcdbsync instance for openstack is authenticated with
# pod based keystone.
endpoints_data = helm_data.get_dcdbsync_endpoint_data()
db_data = helm_data.get_keystone_oslo_db_data()
service_auth_config = {
'dcdbsync::openstack_api::keystone_password':
endpoints_data['keystone_password'],
'dcdbsync::openstack_init::database_connection':
db_data['connection'],
}
config.update(service_auth_config)
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_admin_url(self):
return self._format_admin_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)
|
#
# Copyright (c) 2019 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from sysinv.common import constants
from sysinv.common import utils
from sysinv.helm import helm
from sysinv.puppet import openstack
class DCDBsyncPuppet(openstack.OpenstackBasePuppet):
"""Class to encapsulate puppet operations for dcdbsync
configuration"""
SERVICE_NAME = 'dcdbsync'
SERVICE_PORT = 8219
SERVICE_PATH = 'v1.0'
IDENTITY_SERVICE_NAME = 'keystone'
IDENTITY_SERVICE_DB = 'keystone'
def get_static_config(self):
dbuser = self._get_database_username(self.IDENTITY_SERVICE_NAME)
return {
'dcdbsync::db::postgresql::user': dbuser,
}
def get_secure_static_config(self):
dbpass = self._get_database_password(self.IDENTITY_SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
# initial bootstrap is bound to localhost
dburl = self._format_database_connection(self.IDENTITY_SERVICE_NAME,
constants.LOCALHOST_HOSTNAME,
database=self.IDENTITY_SERVICE_DB)
return {
'dcdbsync::database_connection': dburl,
'dcdbsync::db::postgresql::password': dbpass,
'dcdbsync::keystone::auth::password': kspass,
'dcdbsync::api::keystone_password': kspass,
}
def get_system_config(self):
ksuser = self._get_service_user_name(self.SERVICE_NAME)
config = {
# The region in which the identity server can be found
'dcdbsync::region_name': self._keystone_region_name(),
'dcdbsync::keystone::auth::public_url': self.get_public_url(),
'dcdbsync::keystone::auth::internal_url': self.get_internal_url(),
'dcdbsync::keystone::auth::admin_url': self.get_admin_url(),
'dcdbsync::keystone::auth::region': self._region_name(),
'dcdbsync::keystone::auth::auth_name': ksuser,
'dcdbsync::keystone::auth::auth_domain':
self._get_service_user_domain_name(),
'dcdbsync::keystone::auth::service_name': self.SERVICE_NAME,
'dcdbsync::keystone::auth::tenant': self._get_service_tenant_name(),
'dcdbsync::api::bind_host': self._get_management_address(),
'dcdbsync::api::keystone_auth_uri': self._keystone_auth_uri(),
'dcdbsync::api::keystone_identity_uri':
self._keystone_identity_uri(),
'dcdbsync::api::keystone_tenant': self._get_service_project_name(),
'dcdbsync::api::keystone_user_domain':
self._get_service_user_domain_name(),
'dcdbsync::api::keystone_project_domain':
self._get_service_project_domain_name(),
'dcdbsync::api::keystone_user': ksuser,
'platform::dcdbsync::params::region_name': self.get_region_name(),
'platform::dcdbsync::params::service_create':
self._to_create_services(),
}
if utils.is_openstack_applied(self.dbapi):
helm_data = helm.HelmOperatorData(self.dbapi)
# The dcdbsync instance for openstack is authenticated with
# pod based keystone.
endpoints_data = helm_data.get_keystone_endpoint_data()
service_config = {
'dcdbsync::openstack_init::region_name':
endpoints_data['region_name'],
'dcdbsync::openstack_api::keystone_auth_uri':
endpoints_data['endpoint_override'],
'dcdbsync::openstack_api::keystone_identity_uri':
endpoints_data['endpoint_override'],
}
config.update(service_config)
return config
def get_secure_system_config(self):
dbpass = self._get_database_password(self.IDENTITY_SERVICE_NAME)
kspass = self._get_service_password(self.SERVICE_NAME)
config = {
'dcdbsync::database_connection':
self._format_database_connection(
self.IDENTITY_SERVICE_NAME,
database=self.IDENTITY_SERVICE_DB),
'dcdbsync::db::postgresql::password': dbpass,
'dcdbsync::keystone::auth::password': kspass,
'dcdbsync::api::keystone_password': kspass,
}
if utils.is_openstack_applied(self.dbapi):
helm_data = helm.HelmOperatorData(self.dbapi)
# The dcdbsync instance for openstack is authenticated with
# pod based keystone.
endpoints_data = helm_data.get_dcdbsync_endpoint_data()
db_data = helm_data.get_keystone_oslo_db_data()
service_auth_config = {
'dcdbsync::openstack_api::keystone_password':
endpoints_data['keystone_password'],
'dcdbsync::openstack_init::database_connection':
db_data['connection'],
}
config.update(service_auth_config)
return config
def get_public_url(self):
return self._format_public_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_internal_url(self):
return self._format_private_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_admin_url(self):
return self._format_admin_endpoint(self.SERVICE_PORT,
path=self.SERVICE_PATH)
def get_region_name(self):
return self._get_service_region_name(self.SERVICE_NAME)
|
en
| 0.889716
|
# # Copyright (c) 2019 Wind River Systems, Inc. # # SPDX-License-Identifier: Apache-2.0 # Class to encapsulate puppet operations for dcdbsync configuration # initial bootstrap is bound to localhost # The region in which the identity server can be found # The dcdbsync instance for openstack is authenticated with # pod based keystone. # The dcdbsync instance for openstack is authenticated with # pod based keystone.
| 1.707929
| 2
|
napari/layers/utils/stack_utils.py
|
davidpross/napari
| 1
|
6627134
|
<reponame>davidpross/napari<filename>napari/layers/utils/stack_utils.py
from __future__ import annotations
import itertools
from typing import TYPE_CHECKING, List
import numpy as np
from ...layers import Image
from ...layers.image._image_utils import guess_multiscale
from ...utils.colormaps import CYMRGB, MAGENTA_GREEN, Colormap
from ...utils.misc import ensure_iterable, ensure_sequence_of_iterables
from ...utils.translations import trans
if TYPE_CHECKING:
from ...types import FullLayerData
def slice_from_axis(array, *, axis, element):
"""Take a single index slice from array using slicing.
Equivalent to :func:`np.take`, but using slicing, which ensures that the
output is a view of the original array.
Parameters
----------
array : NumPy or other array
Input array to be sliced.
axis : int
The axis along which to slice.
element : int
The element along that axis to grab.
Returns
-------
sliced : NumPy or other array
The sliced output array, which has one less dimension than the input.
"""
slices = [slice(None) for i in range(array.ndim)]
slices[axis] = element
return array[tuple(slices)]
def split_channels(
data: np.ndarray,
channel_axis: int,
**kwargs,
) -> List[FullLayerData]:
"""Split the data array into separate arrays along an axis.
Keyword arguments will override any parameters altered or set in this
function. Colormap, blending, or multiscale are set as follows if not
overridden by a keyword:
- colormap : (magenta, green) for 2 channels, (CYMRGB) for more than 2
- blending : translucent for first channel, additive for others
- multiscale : determined by layers.image._image_utils.guess_multiscale.
Colormap, blending and multiscale will be set and returned in meta if not in kwargs.
If any other key is not present in kwargs it will not be returned in the meta
dictionary of the returned LaterData tuple. For example, if gamma is not in
kwargs then meta will not have a gamma key.
Parameters
----------
data : array or list of array
channel_axis : int
Axis to split the image along.
**kwargs : dict
Keyword arguments will override the default image meta keys
returned in each layer data tuple.
Returns
-------
List of LayerData tuples: [(data: array, meta: Dict, type: str )]
"""
# Determine if data is a multiscale
multiscale = kwargs.get('multiscale')
if not multiscale:
multiscale, data = guess_multiscale(data)
kwargs['multiscale'] = multiscale
n_channels = (data[0] if multiscale else data).shape[channel_axis]
# Use original blending mode or for multichannel use translucent for first channel then additive
kwargs['blending'] = kwargs.get('blending') or ['translucent'] + [
'additive'
] * (n_channels - 1)
kwargs.setdefault('colormap', None)
# these arguments are *already* iterables in the single-channel case.
iterable_kwargs = {
'scale',
'translate',
'affine',
'contrast_limits',
'metadata',
'plane',
'experimental_clipping_planes',
}
# turn the kwargs dict into a mapping of {key: iterator}
# so that we can use {k: next(v) for k, v in kwargs.items()} below
for key, val in kwargs.items():
if key == 'colormap' and val is None:
if n_channels == 1:
kwargs[key] = iter(['gray'])
elif n_channels == 2:
kwargs[key] = iter(MAGENTA_GREEN)
else:
kwargs[key] = itertools.cycle(CYMRGB)
# make sure that iterable_kwargs are a *sequence* of iterables
# for the multichannel case. For example: if scale == (1, 2) &
# n_channels = 3, then scale should == [(1, 2), (1, 2), (1, 2)]
elif key in iterable_kwargs or (
key == 'colormap' and isinstance(val, Colormap)
):
kwargs[key] = iter(
ensure_sequence_of_iterables(
val,
n_channels,
repeat_empty=True,
allow_none=True,
)
)
else:
kwargs[key] = iter(ensure_iterable(val))
layerdata_list = list()
for i in range(n_channels):
if multiscale:
image = [
slice_from_axis(data[j], axis=channel_axis, element=i)
for j in range(len(data))
]
else:
image = slice_from_axis(data, axis=channel_axis, element=i)
i_kwargs = {}
for key, val in kwargs.items():
try:
i_kwargs[key] = next(val)
except StopIteration:
raise IndexError(
trans._(
"Error adding multichannel image with data shape {data_shape!r}.\nRequested channel_axis ({channel_axis}) had length {n_channels}, but the '{key}' argument only provided {i} values. ",
deferred=True,
data_shape=data.shape,
channel_axis=channel_axis,
n_channels=n_channels,
key=key,
i=i,
)
)
layerdata = (image, i_kwargs, 'image')
layerdata_list.append(layerdata)
return layerdata_list
def stack_to_images(stack: Image, axis: int, **kwargs) -> List[Image]:
"""Splits a single Image layer into a list layers along axis.
Some image layer properties will be changed unless specified as an item in
kwargs. Properties such as colormap and contrast_limits are set on individual
channels. Properties will be changed as follows (unless overridden with a kwarg):
- colormap : (magenta, green) for 2 channels, (CYMRGB) for more than 2
- blending : additive
- contrast_limits : min and max of the image
All other properties, such as scale and translate will be propagated from the
original stack, unless a keyword argument passed for that property.
Parameters
----------
stack : napari.layers.Image
The image stack to be split into a list of image layers
axis : int
The axis to split along.
Returns
-------
imagelist: list
List of Image objects
"""
data, meta, _ = stack.as_layer_data_tuple()
for key in ("contrast_limits", "colormap", "blending"):
del meta[key]
name = stack.name
num_dim = 3 if stack.rgb else stack.ndim
if num_dim < 3:
raise ValueError(
trans._(
"The image needs more than 2 dimensions for splitting",
deferred=True,
)
)
if axis >= num_dim:
raise ValueError(
trans._(
"Can't split along axis {axis}. The image has {num_dim} dimensions",
deferred=True,
axis=axis,
num_dim=num_dim,
)
)
if kwargs.get("colormap"):
kwargs['colormap'] = itertools.cycle(kwargs['colormap'])
if meta['rgb']:
if axis in [num_dim - 1, -1]:
kwargs['rgb'] = False # split channels as grayscale
else:
kwargs['rgb'] = True # split some other axis, remain rgb
meta['scale'].pop(axis)
meta['translate'].pop(axis)
else:
kwargs['rgb'] = False
meta['scale'].pop(axis)
meta['translate'].pop(axis)
meta['rotate'] = None
meta['shear'] = None
meta['affine'] = None
meta.update(kwargs)
imagelist = []
layerdata_list = split_channels(data, axis, **meta)
for i, tup in enumerate(layerdata_list):
idata, imeta, _ = tup
layer_name = f'{name} layer {i}'
imeta['name'] = layer_name
imagelist.append(Image(idata, **imeta))
return imagelist
def split_rgb(stack: Image, with_alpha=False) -> List[Image]:
"""Variant of stack_to_images that splits an RGB with predefined cmap."""
if not stack.rgb:
raise ValueError(
trans._('Image must be RGB to use split_rgb', deferred=True)
)
images = stack_to_images(stack, -1, colormap=('red', 'green', 'blue'))
return images if with_alpha else images[:3]
def images_to_stack(images: List[Image], axis: int = 0, **kwargs) -> Image:
"""Combines a list of Image layers into one layer stacked along axis
The new image layer will get the meta properties of the first
image layer in the input list unless specified in kwargs
Parameters
----------
images : List
List of Image Layers
axis : int
Index to to insert the new axis
**kwargs : dict
Dictionary of parameters values to override parameters
from the first image in images list.
Returns
-------
stack : napari.layers.Image
Combined image stack
"""
if not images:
raise IndexError(trans._("images list is empty", deferred=True))
data, meta, _ = images[0].as_layer_data_tuple()
kwargs.setdefault("scale", np.insert(meta['scale'], axis, 1))
kwargs.setdefault("translate", np.insert(meta['translate'], axis, 0))
meta.update(kwargs)
new_data = np.stack([image.data for image in images], axis=axis)
return Image(new_data, **meta)
def merge_rgb(images: List[Image]) -> List[Image]:
"""Variant of images_to_stack that makes an RGB from 3 images."""
if not (len(images) == 3 and all(isinstance(x, Image) for x in images)):
raise ValueError(
trans._("merge_rgb requires 3 images layers", deferred=True)
)
return images_to_stack(images, axis=-1, rgb=True)
|
from __future__ import annotations
import itertools
from typing import TYPE_CHECKING, List
import numpy as np
from ...layers import Image
from ...layers.image._image_utils import guess_multiscale
from ...utils.colormaps import CYMRGB, MAGENTA_GREEN, Colormap
from ...utils.misc import ensure_iterable, ensure_sequence_of_iterables
from ...utils.translations import trans
if TYPE_CHECKING:
from ...types import FullLayerData
def slice_from_axis(array, *, axis, element):
"""Take a single index slice from array using slicing.
Equivalent to :func:`np.take`, but using slicing, which ensures that the
output is a view of the original array.
Parameters
----------
array : NumPy or other array
Input array to be sliced.
axis : int
The axis along which to slice.
element : int
The element along that axis to grab.
Returns
-------
sliced : NumPy or other array
The sliced output array, which has one less dimension than the input.
"""
slices = [slice(None) for i in range(array.ndim)]
slices[axis] = element
return array[tuple(slices)]
def split_channels(
data: np.ndarray,
channel_axis: int,
**kwargs,
) -> List[FullLayerData]:
"""Split the data array into separate arrays along an axis.
Keyword arguments will override any parameters altered or set in this
function. Colormap, blending, or multiscale are set as follows if not
overridden by a keyword:
- colormap : (magenta, green) for 2 channels, (CYMRGB) for more than 2
- blending : translucent for first channel, additive for others
- multiscale : determined by layers.image._image_utils.guess_multiscale.
Colormap, blending and multiscale will be set and returned in meta if not in kwargs.
If any other key is not present in kwargs it will not be returned in the meta
dictionary of the returned LaterData tuple. For example, if gamma is not in
kwargs then meta will not have a gamma key.
Parameters
----------
data : array or list of array
channel_axis : int
Axis to split the image along.
**kwargs : dict
Keyword arguments will override the default image meta keys
returned in each layer data tuple.
Returns
-------
List of LayerData tuples: [(data: array, meta: Dict, type: str )]
"""
# Determine if data is a multiscale
multiscale = kwargs.get('multiscale')
if not multiscale:
multiscale, data = guess_multiscale(data)
kwargs['multiscale'] = multiscale
n_channels = (data[0] if multiscale else data).shape[channel_axis]
# Use original blending mode or for multichannel use translucent for first channel then additive
kwargs['blending'] = kwargs.get('blending') or ['translucent'] + [
'additive'
] * (n_channels - 1)
kwargs.setdefault('colormap', None)
# these arguments are *already* iterables in the single-channel case.
iterable_kwargs = {
'scale',
'translate',
'affine',
'contrast_limits',
'metadata',
'plane',
'experimental_clipping_planes',
}
# turn the kwargs dict into a mapping of {key: iterator}
# so that we can use {k: next(v) for k, v in kwargs.items()} below
for key, val in kwargs.items():
if key == 'colormap' and val is None:
if n_channels == 1:
kwargs[key] = iter(['gray'])
elif n_channels == 2:
kwargs[key] = iter(MAGENTA_GREEN)
else:
kwargs[key] = itertools.cycle(CYMRGB)
# make sure that iterable_kwargs are a *sequence* of iterables
# for the multichannel case. For example: if scale == (1, 2) &
# n_channels = 3, then scale should == [(1, 2), (1, 2), (1, 2)]
elif key in iterable_kwargs or (
key == 'colormap' and isinstance(val, Colormap)
):
kwargs[key] = iter(
ensure_sequence_of_iterables(
val,
n_channels,
repeat_empty=True,
allow_none=True,
)
)
else:
kwargs[key] = iter(ensure_iterable(val))
layerdata_list = list()
for i in range(n_channels):
if multiscale:
image = [
slice_from_axis(data[j], axis=channel_axis, element=i)
for j in range(len(data))
]
else:
image = slice_from_axis(data, axis=channel_axis, element=i)
i_kwargs = {}
for key, val in kwargs.items():
try:
i_kwargs[key] = next(val)
except StopIteration:
raise IndexError(
trans._(
"Error adding multichannel image with data shape {data_shape!r}.\nRequested channel_axis ({channel_axis}) had length {n_channels}, but the '{key}' argument only provided {i} values. ",
deferred=True,
data_shape=data.shape,
channel_axis=channel_axis,
n_channels=n_channels,
key=key,
i=i,
)
)
layerdata = (image, i_kwargs, 'image')
layerdata_list.append(layerdata)
return layerdata_list
def stack_to_images(stack: Image, axis: int, **kwargs) -> List[Image]:
"""Splits a single Image layer into a list layers along axis.
Some image layer properties will be changed unless specified as an item in
kwargs. Properties such as colormap and contrast_limits are set on individual
channels. Properties will be changed as follows (unless overridden with a kwarg):
- colormap : (magenta, green) for 2 channels, (CYMRGB) for more than 2
- blending : additive
- contrast_limits : min and max of the image
All other properties, such as scale and translate will be propagated from the
original stack, unless a keyword argument passed for that property.
Parameters
----------
stack : napari.layers.Image
The image stack to be split into a list of image layers
axis : int
The axis to split along.
Returns
-------
imagelist: list
List of Image objects
"""
data, meta, _ = stack.as_layer_data_tuple()
for key in ("contrast_limits", "colormap", "blending"):
del meta[key]
name = stack.name
num_dim = 3 if stack.rgb else stack.ndim
if num_dim < 3:
raise ValueError(
trans._(
"The image needs more than 2 dimensions for splitting",
deferred=True,
)
)
if axis >= num_dim:
raise ValueError(
trans._(
"Can't split along axis {axis}. The image has {num_dim} dimensions",
deferred=True,
axis=axis,
num_dim=num_dim,
)
)
if kwargs.get("colormap"):
kwargs['colormap'] = itertools.cycle(kwargs['colormap'])
if meta['rgb']:
if axis in [num_dim - 1, -1]:
kwargs['rgb'] = False # split channels as grayscale
else:
kwargs['rgb'] = True # split some other axis, remain rgb
meta['scale'].pop(axis)
meta['translate'].pop(axis)
else:
kwargs['rgb'] = False
meta['scale'].pop(axis)
meta['translate'].pop(axis)
meta['rotate'] = None
meta['shear'] = None
meta['affine'] = None
meta.update(kwargs)
imagelist = []
layerdata_list = split_channels(data, axis, **meta)
for i, tup in enumerate(layerdata_list):
idata, imeta, _ = tup
layer_name = f'{name} layer {i}'
imeta['name'] = layer_name
imagelist.append(Image(idata, **imeta))
return imagelist
def split_rgb(stack: Image, with_alpha=False) -> List[Image]:
"""Variant of stack_to_images that splits an RGB with predefined cmap."""
if not stack.rgb:
raise ValueError(
trans._('Image must be RGB to use split_rgb', deferred=True)
)
images = stack_to_images(stack, -1, colormap=('red', 'green', 'blue'))
return images if with_alpha else images[:3]
def images_to_stack(images: List[Image], axis: int = 0, **kwargs) -> Image:
"""Combines a list of Image layers into one layer stacked along axis
The new image layer will get the meta properties of the first
image layer in the input list unless specified in kwargs
Parameters
----------
images : List
List of Image Layers
axis : int
Index to to insert the new axis
**kwargs : dict
Dictionary of parameters values to override parameters
from the first image in images list.
Returns
-------
stack : napari.layers.Image
Combined image stack
"""
if not images:
raise IndexError(trans._("images list is empty", deferred=True))
data, meta, _ = images[0].as_layer_data_tuple()
kwargs.setdefault("scale", np.insert(meta['scale'], axis, 1))
kwargs.setdefault("translate", np.insert(meta['translate'], axis, 0))
meta.update(kwargs)
new_data = np.stack([image.data for image in images], axis=axis)
return Image(new_data, **meta)
def merge_rgb(images: List[Image]) -> List[Image]:
"""Variant of images_to_stack that makes an RGB from 3 images."""
if not (len(images) == 3 and all(isinstance(x, Image) for x in images)):
raise ValueError(
trans._("merge_rgb requires 3 images layers", deferred=True)
)
return images_to_stack(images, axis=-1, rgb=True)
|
en
| 0.727685
|
Take a single index slice from array using slicing. Equivalent to :func:`np.take`, but using slicing, which ensures that the output is a view of the original array. Parameters ---------- array : NumPy or other array Input array to be sliced. axis : int The axis along which to slice. element : int The element along that axis to grab. Returns ------- sliced : NumPy or other array The sliced output array, which has one less dimension than the input. Split the data array into separate arrays along an axis. Keyword arguments will override any parameters altered or set in this function. Colormap, blending, or multiscale are set as follows if not overridden by a keyword: - colormap : (magenta, green) for 2 channels, (CYMRGB) for more than 2 - blending : translucent for first channel, additive for others - multiscale : determined by layers.image._image_utils.guess_multiscale. Colormap, blending and multiscale will be set and returned in meta if not in kwargs. If any other key is not present in kwargs it will not be returned in the meta dictionary of the returned LaterData tuple. For example, if gamma is not in kwargs then meta will not have a gamma key. Parameters ---------- data : array or list of array channel_axis : int Axis to split the image along. **kwargs : dict Keyword arguments will override the default image meta keys returned in each layer data tuple. Returns ------- List of LayerData tuples: [(data: array, meta: Dict, type: str )] # Determine if data is a multiscale # Use original blending mode or for multichannel use translucent for first channel then additive # these arguments are *already* iterables in the single-channel case. # turn the kwargs dict into a mapping of {key: iterator} # so that we can use {k: next(v) for k, v in kwargs.items()} below # make sure that iterable_kwargs are a *sequence* of iterables # for the multichannel case. For example: if scale == (1, 2) & # n_channels = 3, then scale should == [(1, 2), (1, 2), (1, 2)] Splits a single Image layer into a list layers along axis. Some image layer properties will be changed unless specified as an item in kwargs. Properties such as colormap and contrast_limits are set on individual channels. Properties will be changed as follows (unless overridden with a kwarg): - colormap : (magenta, green) for 2 channels, (CYMRGB) for more than 2 - blending : additive - contrast_limits : min and max of the image All other properties, such as scale and translate will be propagated from the original stack, unless a keyword argument passed for that property. Parameters ---------- stack : napari.layers.Image The image stack to be split into a list of image layers axis : int The axis to split along. Returns ------- imagelist: list List of Image objects # split channels as grayscale # split some other axis, remain rgb Variant of stack_to_images that splits an RGB with predefined cmap. Combines a list of Image layers into one layer stacked along axis The new image layer will get the meta properties of the first image layer in the input list unless specified in kwargs Parameters ---------- images : List List of Image Layers axis : int Index to to insert the new axis **kwargs : dict Dictionary of parameters values to override parameters from the first image in images list. Returns ------- stack : napari.layers.Image Combined image stack Variant of images_to_stack that makes an RGB from 3 images.
| 2.604029
| 3
|
count_classes.py
|
DerekGloudemans/detrac-lbt
| 2
|
6627135
|
<gh_stars>1-10
import argparse
import os,sys,inspect
import numpy as np
import random
import time
import math
import _pickle as pickle
random.seed = 0
import cv2
from PIL import Image
import torch
import matplotlib.pyplot as plt
from config.data_paths import data_paths
# detector_path = os.path.join(os.getcwd(),"models","pytorch_retinanet_detector")
# sys.path.insert(0,detector_path)
detector_path = os.path.join(os.getcwd(),"models","py_ret_det_multigpu")
sys.path.insert(0,detector_path)
detrac_util_path = os.path.join(os.getcwd(),"util_detrac")
sys.path.insert(0,detrac_util_path)
eval_path = os.path.join(os.getcwd(),"util_eval","py_motmetrics")
sys.path.insert(0,eval_path)
from models.py_ret_det_multigpu.retinanet.model import resnet50
from util_detrac.detrac_detection_dataset import class_dict
from util_eval import mot_eval as mot
from tracker_fsld_112 import Localization_Tracker
def get_track_dict(TRAIN):
# get list of all files in directory and corresponding path to track and labels
if TRAIN:
track_dir = data_paths["train_im"]
label_dir = data_paths["train_lab"]
else:
track_dir = data_paths["test_im"]
label_dir = data_paths["test_lab"]
track_list = [os.path.join(track_dir,item) for item in os.listdir(track_dir)]
label_list = [os.path.join(label_dir,item) for item in os.listdir(label_dir)]
track_dict = {}
for item in track_list:
id = int(item.split("MVI_")[-1])
track_dict[id] = {"frames": item,
"labels": None}
for item in label_list:
if not TRAIN:
id = int(item.split("MVI_")[-1].split(".xml")[0])
else:
id = int(item.split("MVI_")[-1].split("_v3.xml")[0])
track_dict[id]['labels'] = item
return track_dict
if __name__ == "__main__":
det_steps = [1,8]
TRAIN = False
confs = [0]
GPU_ID = 0
mode = "iou"
SHOW = False
truncation_count = 0
class_dict = {
'Sedan':0,
'Hatchback':1,
'Suv':2,
'Van':3,
'Police':4,
'Taxi':5,
'Bus':6,
'Truck-Box-Large':7,
'MiniVan':8,
'Truck-Box-Med':9,
'Truck-Util':10,
'Truck-Pickup':11,
'Truck-Flatbed':12
}
class_count = np.zeros([13])
# get track_dict
track_dict = get_track_dict(TRAIN)
tracks = [key for key in track_dict]
tracks.sort()
#tracks.reverse()
#override tracks with a shorter list
#tracks = [39761,40141,40213,40241,40963,40992,63521]
#tracks = [40863,40864,40892,40763,39501,39511,40761,40903]
# for each track and for specified det_step, track and evaluate
running_metrics = {}
count = 0
for id in tracks:
if id in [40712,40774,40773,40772,40771,40711,40792,40775,39361,40901]:
continue
# get ground truth labels
gts,metadata = mot.parse_labels(track_dict[id]["labels"])
for frame in gts:
for det in frame:
cls = det["class_num"]
if det["truncation"] > 0.5:
truncation_count += 1
class_count[cls] += 1
print("Finished {}".format(id))
total_objs = np.sum(class_count)
for key in class_dict:
print("{}: {}".format(key,class_count[class_dict[key]]))
print("{} total objects".format(total_objs))
|
import argparse
import os,sys,inspect
import numpy as np
import random
import time
import math
import _pickle as pickle
random.seed = 0
import cv2
from PIL import Image
import torch
import matplotlib.pyplot as plt
from config.data_paths import data_paths
# detector_path = os.path.join(os.getcwd(),"models","pytorch_retinanet_detector")
# sys.path.insert(0,detector_path)
detector_path = os.path.join(os.getcwd(),"models","py_ret_det_multigpu")
sys.path.insert(0,detector_path)
detrac_util_path = os.path.join(os.getcwd(),"util_detrac")
sys.path.insert(0,detrac_util_path)
eval_path = os.path.join(os.getcwd(),"util_eval","py_motmetrics")
sys.path.insert(0,eval_path)
from models.py_ret_det_multigpu.retinanet.model import resnet50
from util_detrac.detrac_detection_dataset import class_dict
from util_eval import mot_eval as mot
from tracker_fsld_112 import Localization_Tracker
def get_track_dict(TRAIN):
# get list of all files in directory and corresponding path to track and labels
if TRAIN:
track_dir = data_paths["train_im"]
label_dir = data_paths["train_lab"]
else:
track_dir = data_paths["test_im"]
label_dir = data_paths["test_lab"]
track_list = [os.path.join(track_dir,item) for item in os.listdir(track_dir)]
label_list = [os.path.join(label_dir,item) for item in os.listdir(label_dir)]
track_dict = {}
for item in track_list:
id = int(item.split("MVI_")[-1])
track_dict[id] = {"frames": item,
"labels": None}
for item in label_list:
if not TRAIN:
id = int(item.split("MVI_")[-1].split(".xml")[0])
else:
id = int(item.split("MVI_")[-1].split("_v3.xml")[0])
track_dict[id]['labels'] = item
return track_dict
if __name__ == "__main__":
det_steps = [1,8]
TRAIN = False
confs = [0]
GPU_ID = 0
mode = "iou"
SHOW = False
truncation_count = 0
class_dict = {
'Sedan':0,
'Hatchback':1,
'Suv':2,
'Van':3,
'Police':4,
'Taxi':5,
'Bus':6,
'Truck-Box-Large':7,
'MiniVan':8,
'Truck-Box-Med':9,
'Truck-Util':10,
'Truck-Pickup':11,
'Truck-Flatbed':12
}
class_count = np.zeros([13])
# get track_dict
track_dict = get_track_dict(TRAIN)
tracks = [key for key in track_dict]
tracks.sort()
#tracks.reverse()
#override tracks with a shorter list
#tracks = [39761,40141,40213,40241,40963,40992,63521]
#tracks = [40863,40864,40892,40763,39501,39511,40761,40903]
# for each track and for specified det_step, track and evaluate
running_metrics = {}
count = 0
for id in tracks:
if id in [40712,40774,40773,40772,40771,40711,40792,40775,39361,40901]:
continue
# get ground truth labels
gts,metadata = mot.parse_labels(track_dict[id]["labels"])
for frame in gts:
for det in frame:
cls = det["class_num"]
if det["truncation"] > 0.5:
truncation_count += 1
class_count[cls] += 1
print("Finished {}".format(id))
total_objs = np.sum(class_count)
for key in class_dict:
print("{}: {}".format(key,class_count[class_dict[key]]))
print("{} total objects".format(total_objs))
|
en
| 0.669599
|
# detector_path = os.path.join(os.getcwd(),"models","pytorch_retinanet_detector") # sys.path.insert(0,detector_path) # get list of all files in directory and corresponding path to track and labels # get track_dict #tracks.reverse() #override tracks with a shorter list #tracks = [39761,40141,40213,40241,40963,40992,63521] #tracks = [40863,40864,40892,40763,39501,39511,40761,40903] # for each track and for specified det_step, track and evaluate # get ground truth labels
| 1.990465
| 2
|
Python/Nqueen.py
|
montukv/Coding-problem-solutions
| 0
|
6627136
|
<filename>Python/Nqueen.py
def isSafe(board,x,y,n):
for row in range(x):
if(board[row][y] == 1):
return False
row = x
col = y
while(row>=0 and col>=0):
if(board[row][col] == 1):
return False
row -= 1
col -= 1
row = x
col = y
while(row>=0 and col<n):
if(board[row][col] == 1):
return False
row -= 1
col += 1
return True
def nQueen(board,x,n):
if(x==n):
return True
for col in range(n):
if(isSafe(board,x,col,n)):
board[x][col] = 1
if(nQueen(board,x+1,n)):
return True
board[x][col] = 0
return False
n = int(input())
board = []
for i in range(n):
temp = [0]*n
board.append(temp)
nQueen(board,0,n)
print("\n")
for i in range(n):
print(board[i])
print("\n")
|
<filename>Python/Nqueen.py
def isSafe(board,x,y,n):
for row in range(x):
if(board[row][y] == 1):
return False
row = x
col = y
while(row>=0 and col>=0):
if(board[row][col] == 1):
return False
row -= 1
col -= 1
row = x
col = y
while(row>=0 and col<n):
if(board[row][col] == 1):
return False
row -= 1
col += 1
return True
def nQueen(board,x,n):
if(x==n):
return True
for col in range(n):
if(isSafe(board,x,col,n)):
board[x][col] = 1
if(nQueen(board,x+1,n)):
return True
board[x][col] = 0
return False
n = int(input())
board = []
for i in range(n):
temp = [0]*n
board.append(temp)
nQueen(board,0,n)
print("\n")
for i in range(n):
print(board[i])
print("\n")
|
none
| 1
| 3.674052
| 4
|
|
PycharmProjects/pythonteste/ex044.py
|
caioalexleme/Curso_Python
| 3
|
6627137
|
<filename>PycharmProjects/pythonteste/ex044.py
valor = float(input('Qual o valor do produto? R$'))
pagamento = int(input('''Qual a forma de pagamento?
[1]À vista (dinheiro cheque)
[2]À vista no cartão
[3]Em até 2X no cartão
[4]3X ou mais no cartão
Digite aqui a opção: '''))
if pagamento == 1:
print('Você ganhou 10% de desconto o valor é R${:.2f}'.format(valor - (valor * 0.10)))
elif pagamento == 2:
print('Você ganhou 5% de desconto o valor é R${:.2f}'.format(valor - (valor * 0.05)))
elif pagamento == 3:
print('R${:.2f}'.format(valor))
elif pagamento == 4:
print('Seu produto fica no valor de R${:.2f}'.format(valor + (valor * 0.20)))
else:
print('[ERRO] Opção inválida. Tente novamente')
|
<filename>PycharmProjects/pythonteste/ex044.py
valor = float(input('Qual o valor do produto? R$'))
pagamento = int(input('''Qual a forma de pagamento?
[1]À vista (dinheiro cheque)
[2]À vista no cartão
[3]Em até 2X no cartão
[4]3X ou mais no cartão
Digite aqui a opção: '''))
if pagamento == 1:
print('Você ganhou 10% de desconto o valor é R${:.2f}'.format(valor - (valor * 0.10)))
elif pagamento == 2:
print('Você ganhou 5% de desconto o valor é R${:.2f}'.format(valor - (valor * 0.05)))
elif pagamento == 3:
print('R${:.2f}'.format(valor))
elif pagamento == 4:
print('Seu produto fica no valor de R${:.2f}'.format(valor + (valor * 0.20)))
else:
print('[ERRO] Opção inválida. Tente novamente')
|
pt
| 0.985177
|
Qual a forma de pagamento? [1]À vista (dinheiro cheque) [2]À vista no cartão [3]Em até 2X no cartão [4]3X ou mais no cartão Digite aqui a opção:
| 4.017843
| 4
|
tensorflow_quantum/core/ops/math_ops/inner_product_op.py
|
amogh7joshi/quantum
| 1
|
6627138
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to register python op gradient."""
import os
import tensorflow as tf
from tensorflow_quantum.core.ops.load_module import load_module
MATH_OP_MODULE = load_module(os.path.join("math_ops", "_tfq_math_ops.so"))
def inner_product(programs, symbol_names, symbol_values, other_programs):
"""Calculate the inner product between circuits.
Compute (potentially many) inner products between the given circuits and
the symbol free comparison circuits.
Calculates out[i][j] = $ \langle \psi_{\text{programs[i]}} \\
(\text{symbol_values[i]}) | \psi_{\text{other_programs[j]}} \rangle $
>>> symbols = sympy.symbols('alpha beta')
>>> qubits = cirq.GridQubit.rect(1, 2)
>>> reference_circuits = [
... cirq.Circuit((cirq.H**symbols[0]).on_each(qubits)),
... cirq.Circuit(
... cirq.X(qubits[0]) ** symbols[0],
... cirq.Y(qubits[1]) ** symbols[1])
... ]
>>> other_circuits = [
... cirq.Circuit(cirq.X.on_each(qubits)),
... cirq.Circuit((cirq.Y**0.125).on_each(qubits)),
... cirq.Circuit((cirq.X**0.5).on_each(qubits))
... ]
>>> reference_tensor = tfq.convert_to_tensor(reference_circuits)
>>> symbol_tensor = tf.convert_to_tensor([s.name for s in symbols])
>>> values_tensor = tf.convert_to_tensor(np.arange(4).reshape(2, 2))
>>> other_tensor = tfq.convert_to_tensor([other_circuits, other_circuits])
>>> ip = tfq.math.inner_product(reference_tensor, symbol_tensor,
... values_tensor, other_tensor)
>>> ip
tf.Tensor(
[[ 0+0.j, 8.8871640e-01+0.3681184j,
0+0.5j],
[ 0+0.j, 7.3223300e-02-0.17677669j,
0-0.5j]],shape=(2, 3), dtype=complex64)
Note: `other_programs` must not contain any free symbols. These can
be resolved beforehand with `tfq.resolve_parameters`.
Note: Currently this op is not differentiable.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specificed by programs, following the ordering
dictated by `symbol_names`.
other_programs: `tf.Tensor` of strings with shape [batch_size, n_others]
containing the string representations of the circuits with which to
compute the overlap on `programs` with. Must not contain any free
symbols.
Returns:
`tf.Tensor` with shape [batch_size, n_others] where `out[i][j]` is equal
to the inner product of `programs[i]` with `symbol_values[i]`
resolved in and `other_programs[i][j]`.
"""
return MATH_OP_MODULE.tfq_inner_product(programs, symbol_names,
tf.cast(symbol_values, tf.float32),
other_programs)
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Module to register python op gradient."""
import os
import tensorflow as tf
from tensorflow_quantum.core.ops.load_module import load_module
MATH_OP_MODULE = load_module(os.path.join("math_ops", "_tfq_math_ops.so"))
def inner_product(programs, symbol_names, symbol_values, other_programs):
"""Calculate the inner product between circuits.
Compute (potentially many) inner products between the given circuits and
the symbol free comparison circuits.
Calculates out[i][j] = $ \langle \psi_{\text{programs[i]}} \\
(\text{symbol_values[i]}) | \psi_{\text{other_programs[j]}} \rangle $
>>> symbols = sympy.symbols('alpha beta')
>>> qubits = cirq.GridQubit.rect(1, 2)
>>> reference_circuits = [
... cirq.Circuit((cirq.H**symbols[0]).on_each(qubits)),
... cirq.Circuit(
... cirq.X(qubits[0]) ** symbols[0],
... cirq.Y(qubits[1]) ** symbols[1])
... ]
>>> other_circuits = [
... cirq.Circuit(cirq.X.on_each(qubits)),
... cirq.Circuit((cirq.Y**0.125).on_each(qubits)),
... cirq.Circuit((cirq.X**0.5).on_each(qubits))
... ]
>>> reference_tensor = tfq.convert_to_tensor(reference_circuits)
>>> symbol_tensor = tf.convert_to_tensor([s.name for s in symbols])
>>> values_tensor = tf.convert_to_tensor(np.arange(4).reshape(2, 2))
>>> other_tensor = tfq.convert_to_tensor([other_circuits, other_circuits])
>>> ip = tfq.math.inner_product(reference_tensor, symbol_tensor,
... values_tensor, other_tensor)
>>> ip
tf.Tensor(
[[ 0+0.j, 8.8871640e-01+0.3681184j,
0+0.5j],
[ 0+0.j, 7.3223300e-02-0.17677669j,
0-0.5j]],shape=(2, 3), dtype=complex64)
Note: `other_programs` must not contain any free symbols. These can
be resolved beforehand with `tfq.resolve_parameters`.
Note: Currently this op is not differentiable.
Args:
programs: `tf.Tensor` of strings with shape [batch_size] containing
the string representations of the circuits
symbol_names: `tf.Tensor` of strings with shape [n_params], which
is used to specify the order in which the values in
`symbol_values` should be placed inside of the circuits in
`programs`.
symbol_values: `tf.Tensor` of real numbers with shape
[batch_size, n_params] specifying parameter values to resolve
into the circuits specificed by programs, following the ordering
dictated by `symbol_names`.
other_programs: `tf.Tensor` of strings with shape [batch_size, n_others]
containing the string representations of the circuits with which to
compute the overlap on `programs` with. Must not contain any free
symbols.
Returns:
`tf.Tensor` with shape [batch_size, n_others] where `out[i][j]` is equal
to the inner product of `programs[i]` with `symbol_values[i]`
resolved in and `other_programs[i][j]`.
"""
return MATH_OP_MODULE.tfq_inner_product(programs, symbol_names,
tf.cast(symbol_values, tf.float32),
other_programs)
|
en
| 0.669814
|
# Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Module to register python op gradient. Calculate the inner product between circuits. Compute (potentially many) inner products between the given circuits and the symbol free comparison circuits. Calculates out[i][j] = $ \langle \psi_{\text{programs[i]}} \\ (\text{symbol_values[i]}) | \psi_{\text{other_programs[j]}} \rangle $ >>> symbols = sympy.symbols('alpha beta') >>> qubits = cirq.GridQubit.rect(1, 2) >>> reference_circuits = [ ... cirq.Circuit((cirq.H**symbols[0]).on_each(qubits)), ... cirq.Circuit( ... cirq.X(qubits[0]) ** symbols[0], ... cirq.Y(qubits[1]) ** symbols[1]) ... ] >>> other_circuits = [ ... cirq.Circuit(cirq.X.on_each(qubits)), ... cirq.Circuit((cirq.Y**0.125).on_each(qubits)), ... cirq.Circuit((cirq.X**0.5).on_each(qubits)) ... ] >>> reference_tensor = tfq.convert_to_tensor(reference_circuits) >>> symbol_tensor = tf.convert_to_tensor([s.name for s in symbols]) >>> values_tensor = tf.convert_to_tensor(np.arange(4).reshape(2, 2)) >>> other_tensor = tfq.convert_to_tensor([other_circuits, other_circuits]) >>> ip = tfq.math.inner_product(reference_tensor, symbol_tensor, ... values_tensor, other_tensor) >>> ip tf.Tensor( [[ 0+0.j, 8.8871640e-01+0.3681184j, 0+0.5j], [ 0+0.j, 7.3223300e-02-0.17677669j, 0-0.5j]],shape=(2, 3), dtype=complex64) Note: `other_programs` must not contain any free symbols. These can be resolved beforehand with `tfq.resolve_parameters`. Note: Currently this op is not differentiable. Args: programs: `tf.Tensor` of strings with shape [batch_size] containing the string representations of the circuits symbol_names: `tf.Tensor` of strings with shape [n_params], which is used to specify the order in which the values in `symbol_values` should be placed inside of the circuits in `programs`. symbol_values: `tf.Tensor` of real numbers with shape [batch_size, n_params] specifying parameter values to resolve into the circuits specificed by programs, following the ordering dictated by `symbol_names`. other_programs: `tf.Tensor` of strings with shape [batch_size, n_others] containing the string representations of the circuits with which to compute the overlap on `programs` with. Must not contain any free symbols. Returns: `tf.Tensor` with shape [batch_size, n_others] where `out[i][j]` is equal to the inner product of `programs[i]` with `symbol_values[i]` resolved in and `other_programs[i][j]`.
| 2.117136
| 2
|
run.py
|
abhilashmnair/weather-webhook
| 1
|
6627139
|
<filename>run.py
from telegram import *
from telegram.ext import *
import requests
import json
token = 'YOUR_BOT_TOKEN'
bot = Bot(token)
updater = Updater(token,use_context=True)
dispatcher : Dispatcher = updater.dispatcher
def getWeatherData(city_name):
key = 'OpenWeatherAPI_KEY'
URL = f'https://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={key}&units=metric'
response = requests.get(url = URL)
payload = response.json()
try:
cur_weather = payload['weather'][0]['main']
cur_temp = payload['main']['temp']
return f'Weather in {city_name}\nCurrent weather : {cur_weather}\nTemperature : {cur_temp}°C'
except KeyError:
return "Sorry, couldn't get the data for given location!"
def sendResponse(update:Update,context:CallbackContext):
if update.message.text == '/start':
bot.sendMessage(chat_id = update.effective_chat.id, text = 'Enter the city name', parse_mode = 'HTML')
else:
weatherData = getWeatherData(update.message.text)
bot.sendMessage(chat_id = update.effective_chat.id,text = weatherData,parse_mode = 'HTML')
dispatcher.add_handler(MessageHandler(Filters.text,sendResponse))
updater.start_polling()
|
<filename>run.py
from telegram import *
from telegram.ext import *
import requests
import json
token = 'YOUR_BOT_TOKEN'
bot = Bot(token)
updater = Updater(token,use_context=True)
dispatcher : Dispatcher = updater.dispatcher
def getWeatherData(city_name):
key = 'OpenWeatherAPI_KEY'
URL = f'https://api.openweathermap.org/data/2.5/weather?q={city_name}&appid={key}&units=metric'
response = requests.get(url = URL)
payload = response.json()
try:
cur_weather = payload['weather'][0]['main']
cur_temp = payload['main']['temp']
return f'Weather in {city_name}\nCurrent weather : {cur_weather}\nTemperature : {cur_temp}°C'
except KeyError:
return "Sorry, couldn't get the data for given location!"
def sendResponse(update:Update,context:CallbackContext):
if update.message.text == '/start':
bot.sendMessage(chat_id = update.effective_chat.id, text = 'Enter the city name', parse_mode = 'HTML')
else:
weatherData = getWeatherData(update.message.text)
bot.sendMessage(chat_id = update.effective_chat.id,text = weatherData,parse_mode = 'HTML')
dispatcher.add_handler(MessageHandler(Filters.text,sendResponse))
updater.start_polling()
|
none
| 1
| 2.94703
| 3
|
|
Chapter03/chapter_03_example_03.py
|
pesader/hands-on-music-generation-with-magenta
| 0
|
6627140
|
"""
This example shows a polyphonic generation with the performance rnn model.
VERSION: Magenta 1.1.7
"""
import math
import os
import time
import magenta.music as mm
import tensorflow as tf
from magenta.models.performance_rnn import performance_sequence_generator
from note_seq.protobuf.generator_pb2 import GeneratorOptions
from note_seq.protobuf.music_pb2 import NoteSequence
from visual_midi import Plotter
from magenta.models.shared import sequence_generator_bundle
from note_seq.constants import(
DEFAULT_QUARTERS_PER_MINUTE,
)
def generate(bundle_name: str,
sequence_generator,
generator_id: str,
primer_filename: str = None,
qpm: float = DEFAULT_QUARTERS_PER_MINUTE,
notes_per_second: str = None,
pitch_class_histogram: str = None,
total_length_steps: int = 64,
temperature: float = 1.0,
beam_size: int = 1,
branch_factor: int = 1,
steps_per_iteration: int = 1) -> NoteSequence:
"""Generates and returns a new sequence given the sequence generator.
Uses the bundle name to download the bundle in the "bundles" directory if it
doesn't already exist, then uses the sequence generator and the generator id
to get the generator. Parameters can be provided for the generation phase.
The MIDI and plot files are written to disk in the "output" folder, with the
filename pattern "<generator_name>_<generator_id>_<date_time>" with "mid" or
"html" as extension respectively.
:param bundle_name: The bundle name to be downloaded and generated with.
:param sequence_generator: The sequence generator module, which is the
python module in the corresponding models subfolder.
:param generator_id: The id of the generator configuration, this is the
model's configuration.
:param primer_filename: The filename for the primer, which will be taken
from the "primers" directory. If left empty, and empty note sequence will
be used.
:param qpm: The QPM for the generated sequence. If a primer is provided,
the primer QPM will be used and this parameter ignored.
:param notes_per_second: The approximate number of notes per second in
the generated output. Requires more RNN steps, which makes the generation
longer.
:param pitch_class_histogram:
:param total_length_steps: The total length of the sequence, which contains
the added length of the primer and the generated sequence together. This
value need to be bigger than the primer length in bars.
:param temperature: The temperature value for the generation algorithm,
lesser than 1 is less random (closer to the primer), bigger than 1 is
more random
:param beam_size: The beam size for the generation algorithm, a bigger
branch size means the generation algorithm will generate more sequence
each iteration, meaning a less random sequence at the cost of more time.
:param branch_factor: The branch factor for the generation algorithm,
a bigger branch factor means the generation algorithm will keep more
sequence candidates at each iteration, meaning a less random sequence
at the cost of more time.
:param steps_per_iteration: The number of steps the generation algorithm
generates at each iteration, a bigger steps per iteration meaning there
are less iterations in total because more steps gets generated each time.
:returns The generated NoteSequence
"""
# Downloads the bundle from the magenta website, a bundle (.mag file) is a
# trained model that is used by magenta
mm.notebook_utils.download_bundle(bundle_name, "bundles")
bundle = sequence_generator_bundle.read_bundle_file(
os.path.join("bundles", bundle_name))
# Initialize the generator from the generator id, this need to fit the
# bundle we downloaded before, and choose the model's configuration.
generator_map = sequence_generator.get_generator_map()
generator = generator_map[generator_id](checkpoint=None, bundle=bundle)
generator.initialize()
# Gets the primer sequence that is fed into the model for the generator,
# which will generate a sequence based on this one.
# If no primer sequence is given, the primer sequence is initialized
# to an empty note sequence
if primer_filename:
primer_sequence = mm.midi_io.midi_file_to_note_sequence(
os.path.join("primers", primer_filename))
else:
primer_sequence = NoteSequence()
# Gets the QPM from the primer sequence. If it wasn't provided, take the
# parameters that defaults to Magenta's default
if primer_sequence.tempos:
if len(primer_sequence.tempos) > 1:
raise Exception("No support for multiple tempos")
qpm = primer_sequence.tempos[0].qpm
# Calculates the seconds per 1 step, which changes depending on the QPM value
# (steps per quarter in generators are mostly 4)
seconds_per_step = 60.0 / qpm / getattr(generator, "steps_per_quarter", 4)
# Calculates the primer sequence length in steps and time by taking the
# total time (which is the end of the last note) and finding the next step
# start time.
primer_sequence_length_steps = math.ceil(primer_sequence.total_time
/ seconds_per_step)
primer_sequence_length_time = primer_sequence_length_steps * seconds_per_step
# Calculates the start and the end of the primer sequence.
# We add a negative delta to the end, because if we don't some generators
# won't start the generation right at the beginning of the bar, they will
# start at the next step, meaning we'll have a small gap between the primer
# and the generated sequence.
primer_end_adjust = (0.00001 if primer_sequence_length_time > 0 else 0)
primer_start_time = 0
primer_end_time = (primer_start_time
+ primer_sequence_length_time
- primer_end_adjust)
# Calculates the generation time by taking the total time and substracting
# the primer time. The resulting generation time needs to be bigger than zero.
generation_length_steps = total_length_steps - primer_sequence_length_steps
if generation_length_steps <= 0:
raise Exception("Total length in steps too small "
+ "(" + str(total_length_steps) + ")"
+ ", needs to be at least one bar bigger than primer "
+ "(" + str(primer_sequence_length_steps) + ")")
generation_length_time = generation_length_steps * seconds_per_step
# Calculates the generate start and end time, the start time will contain
# the previously added negative delta from the primer end time.
# We remove the generation end time delta to end the generation
# on the last bar.
generation_start_time = primer_end_time
generation_end_time = (generation_start_time
+ generation_length_time
+ primer_end_adjust)
# Showtime
print(f"Primer time: [{primer_start_time}, {primer_end_time}]")
print(f"Generation time: [{generation_start_time}, {generation_end_time}]")
# Pass the given parameters, the generator options are common for all models,
# except for condition_on_primer and no_inject_primer_during_generation
# which are specific to polyphonic models
generator_options = GeneratorOptions()
generator_options.args['temperature'].float_value = temperature
generator_options.args['beam_size'].int_value = beam_size
generator_options.args['branch_factor'].int_value = branch_factor
generator_options.args['steps_per_iteration'].int_value = steps_per_iteration
if notes_per_second:
generator_options.args['notes_per_second'].string_value = notes_per_second
if pitch_class_histogram:
generator_options.args['pitch_class_histogram'].string_value = (
pitch_class_histogram)
generator_options.generate_sections.add(
start_time=generation_start_time,
end_time=generation_end_time)
# Generates the sequence, add add the time signature
# back to the generated sequence
sequence = generator.generate(primer_sequence, generator_options)
# Writes the resulting midi file to the output directory
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
generator_name = str(generator.__class__).split(".")[2]
midi_filename = "%s_%s_%s.mid" % (generator_name, generator_id,
date_and_time)
midi_path = os.path.join("output", midi_filename)
mm.midi_io.note_sequence_to_midi_file(sequence, midi_path)
print(f"Generated midi file: {os.path.abspath(midi_path)}")
# Writes the resulting plot file to the output directory
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
generator_name = str(generator.__class__).split(".")[2]
plot_filename = "%s_%s_%s.html" % (generator_name, generator_id,
date_and_time)
plot_path = os.path.join("output", plot_filename)
pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
plotter = Plotter()
plotter.save(pretty_midi, plot_path)
print(f"Generated plot file: {os.path.abspath(plot_path)}")
return sequence
def app():
# Generates a sequence with expressive timing and variable velocity
generate(
"performance_with_dynamics.mag",
performance_sequence_generator,
"performance_with_dynamics",
primer_filename="Fur_Elisa_Beethoveen_Polyphonic.mid",
)
# Generates a sequence with expressive timing, variable velocity, and
# less notes per seconds (defaults 10)
generate(
"density_conditioned_performance_with_dynamics.mag",
performance_sequence_generator,
"density_conditioned_performance_with_dynamics",
primer_filename="Fur_Elisa_Beethoveen_Polyphonic.mid",
notes_per_second="5"
)
# Generates a sequence with expressive timing, variable velocity, and
# a specific pitch density corresponding to the F major scale
generate(
"pitch_conditioned_performance_with_dynamics.mag",
performance_sequence_generator,
"pitch_conditioned_performance_with_dynamics",
primer_filename="Fur_Elisa_Beethoveen_Polyphonic.mid",
pitch_class_histogram="[1, 0, 1, 0, 1, 2, 0, 1, 0, 1, 0, 1]"
)
return 0
if __name__ == "__main__":
app()
|
"""
This example shows a polyphonic generation with the performance rnn model.
VERSION: Magenta 1.1.7
"""
import math
import os
import time
import magenta.music as mm
import tensorflow as tf
from magenta.models.performance_rnn import performance_sequence_generator
from note_seq.protobuf.generator_pb2 import GeneratorOptions
from note_seq.protobuf.music_pb2 import NoteSequence
from visual_midi import Plotter
from magenta.models.shared import sequence_generator_bundle
from note_seq.constants import(
DEFAULT_QUARTERS_PER_MINUTE,
)
def generate(bundle_name: str,
sequence_generator,
generator_id: str,
primer_filename: str = None,
qpm: float = DEFAULT_QUARTERS_PER_MINUTE,
notes_per_second: str = None,
pitch_class_histogram: str = None,
total_length_steps: int = 64,
temperature: float = 1.0,
beam_size: int = 1,
branch_factor: int = 1,
steps_per_iteration: int = 1) -> NoteSequence:
"""Generates and returns a new sequence given the sequence generator.
Uses the bundle name to download the bundle in the "bundles" directory if it
doesn't already exist, then uses the sequence generator and the generator id
to get the generator. Parameters can be provided for the generation phase.
The MIDI and plot files are written to disk in the "output" folder, with the
filename pattern "<generator_name>_<generator_id>_<date_time>" with "mid" or
"html" as extension respectively.
:param bundle_name: The bundle name to be downloaded and generated with.
:param sequence_generator: The sequence generator module, which is the
python module in the corresponding models subfolder.
:param generator_id: The id of the generator configuration, this is the
model's configuration.
:param primer_filename: The filename for the primer, which will be taken
from the "primers" directory. If left empty, and empty note sequence will
be used.
:param qpm: The QPM for the generated sequence. If a primer is provided,
the primer QPM will be used and this parameter ignored.
:param notes_per_second: The approximate number of notes per second in
the generated output. Requires more RNN steps, which makes the generation
longer.
:param pitch_class_histogram:
:param total_length_steps: The total length of the sequence, which contains
the added length of the primer and the generated sequence together. This
value need to be bigger than the primer length in bars.
:param temperature: The temperature value for the generation algorithm,
lesser than 1 is less random (closer to the primer), bigger than 1 is
more random
:param beam_size: The beam size for the generation algorithm, a bigger
branch size means the generation algorithm will generate more sequence
each iteration, meaning a less random sequence at the cost of more time.
:param branch_factor: The branch factor for the generation algorithm,
a bigger branch factor means the generation algorithm will keep more
sequence candidates at each iteration, meaning a less random sequence
at the cost of more time.
:param steps_per_iteration: The number of steps the generation algorithm
generates at each iteration, a bigger steps per iteration meaning there
are less iterations in total because more steps gets generated each time.
:returns The generated NoteSequence
"""
# Downloads the bundle from the magenta website, a bundle (.mag file) is a
# trained model that is used by magenta
mm.notebook_utils.download_bundle(bundle_name, "bundles")
bundle = sequence_generator_bundle.read_bundle_file(
os.path.join("bundles", bundle_name))
# Initialize the generator from the generator id, this need to fit the
# bundle we downloaded before, and choose the model's configuration.
generator_map = sequence_generator.get_generator_map()
generator = generator_map[generator_id](checkpoint=None, bundle=bundle)
generator.initialize()
# Gets the primer sequence that is fed into the model for the generator,
# which will generate a sequence based on this one.
# If no primer sequence is given, the primer sequence is initialized
# to an empty note sequence
if primer_filename:
primer_sequence = mm.midi_io.midi_file_to_note_sequence(
os.path.join("primers", primer_filename))
else:
primer_sequence = NoteSequence()
# Gets the QPM from the primer sequence. If it wasn't provided, take the
# parameters that defaults to Magenta's default
if primer_sequence.tempos:
if len(primer_sequence.tempos) > 1:
raise Exception("No support for multiple tempos")
qpm = primer_sequence.tempos[0].qpm
# Calculates the seconds per 1 step, which changes depending on the QPM value
# (steps per quarter in generators are mostly 4)
seconds_per_step = 60.0 / qpm / getattr(generator, "steps_per_quarter", 4)
# Calculates the primer sequence length in steps and time by taking the
# total time (which is the end of the last note) and finding the next step
# start time.
primer_sequence_length_steps = math.ceil(primer_sequence.total_time
/ seconds_per_step)
primer_sequence_length_time = primer_sequence_length_steps * seconds_per_step
# Calculates the start and the end of the primer sequence.
# We add a negative delta to the end, because if we don't some generators
# won't start the generation right at the beginning of the bar, they will
# start at the next step, meaning we'll have a small gap between the primer
# and the generated sequence.
primer_end_adjust = (0.00001 if primer_sequence_length_time > 0 else 0)
primer_start_time = 0
primer_end_time = (primer_start_time
+ primer_sequence_length_time
- primer_end_adjust)
# Calculates the generation time by taking the total time and substracting
# the primer time. The resulting generation time needs to be bigger than zero.
generation_length_steps = total_length_steps - primer_sequence_length_steps
if generation_length_steps <= 0:
raise Exception("Total length in steps too small "
+ "(" + str(total_length_steps) + ")"
+ ", needs to be at least one bar bigger than primer "
+ "(" + str(primer_sequence_length_steps) + ")")
generation_length_time = generation_length_steps * seconds_per_step
# Calculates the generate start and end time, the start time will contain
# the previously added negative delta from the primer end time.
# We remove the generation end time delta to end the generation
# on the last bar.
generation_start_time = primer_end_time
generation_end_time = (generation_start_time
+ generation_length_time
+ primer_end_adjust)
# Showtime
print(f"Primer time: [{primer_start_time}, {primer_end_time}]")
print(f"Generation time: [{generation_start_time}, {generation_end_time}]")
# Pass the given parameters, the generator options are common for all models,
# except for condition_on_primer and no_inject_primer_during_generation
# which are specific to polyphonic models
generator_options = GeneratorOptions()
generator_options.args['temperature'].float_value = temperature
generator_options.args['beam_size'].int_value = beam_size
generator_options.args['branch_factor'].int_value = branch_factor
generator_options.args['steps_per_iteration'].int_value = steps_per_iteration
if notes_per_second:
generator_options.args['notes_per_second'].string_value = notes_per_second
if pitch_class_histogram:
generator_options.args['pitch_class_histogram'].string_value = (
pitch_class_histogram)
generator_options.generate_sections.add(
start_time=generation_start_time,
end_time=generation_end_time)
# Generates the sequence, add add the time signature
# back to the generated sequence
sequence = generator.generate(primer_sequence, generator_options)
# Writes the resulting midi file to the output directory
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
generator_name = str(generator.__class__).split(".")[2]
midi_filename = "%s_%s_%s.mid" % (generator_name, generator_id,
date_and_time)
midi_path = os.path.join("output", midi_filename)
mm.midi_io.note_sequence_to_midi_file(sequence, midi_path)
print(f"Generated midi file: {os.path.abspath(midi_path)}")
# Writes the resulting plot file to the output directory
date_and_time = time.strftime('%Y-%m-%d_%H%M%S')
generator_name = str(generator.__class__).split(".")[2]
plot_filename = "%s_%s_%s.html" % (generator_name, generator_id,
date_and_time)
plot_path = os.path.join("output", plot_filename)
pretty_midi = mm.midi_io.note_sequence_to_pretty_midi(sequence)
plotter = Plotter()
plotter.save(pretty_midi, plot_path)
print(f"Generated plot file: {os.path.abspath(plot_path)}")
return sequence
def app():
# Generates a sequence with expressive timing and variable velocity
generate(
"performance_with_dynamics.mag",
performance_sequence_generator,
"performance_with_dynamics",
primer_filename="Fur_Elisa_Beethoveen_Polyphonic.mid",
)
# Generates a sequence with expressive timing, variable velocity, and
# less notes per seconds (defaults 10)
generate(
"density_conditioned_performance_with_dynamics.mag",
performance_sequence_generator,
"density_conditioned_performance_with_dynamics",
primer_filename="Fur_Elisa_Beethoveen_Polyphonic.mid",
notes_per_second="5"
)
# Generates a sequence with expressive timing, variable velocity, and
# a specific pitch density corresponding to the F major scale
generate(
"pitch_conditioned_performance_with_dynamics.mag",
performance_sequence_generator,
"pitch_conditioned_performance_with_dynamics",
primer_filename="Fur_Elisa_Beethoveen_Polyphonic.mid",
pitch_class_histogram="[1, 0, 1, 0, 1, 2, 0, 1, 0, 1, 0, 1]"
)
return 0
if __name__ == "__main__":
app()
|
en
| 0.847685
|
This example shows a polyphonic generation with the performance rnn model. VERSION: Magenta 1.1.7 Generates and returns a new sequence given the sequence generator. Uses the bundle name to download the bundle in the "bundles" directory if it doesn't already exist, then uses the sequence generator and the generator id to get the generator. Parameters can be provided for the generation phase. The MIDI and plot files are written to disk in the "output" folder, with the filename pattern "<generator_name>_<generator_id>_<date_time>" with "mid" or "html" as extension respectively. :param bundle_name: The bundle name to be downloaded and generated with. :param sequence_generator: The sequence generator module, which is the python module in the corresponding models subfolder. :param generator_id: The id of the generator configuration, this is the model's configuration. :param primer_filename: The filename for the primer, which will be taken from the "primers" directory. If left empty, and empty note sequence will be used. :param qpm: The QPM for the generated sequence. If a primer is provided, the primer QPM will be used and this parameter ignored. :param notes_per_second: The approximate number of notes per second in the generated output. Requires more RNN steps, which makes the generation longer. :param pitch_class_histogram: :param total_length_steps: The total length of the sequence, which contains the added length of the primer and the generated sequence together. This value need to be bigger than the primer length in bars. :param temperature: The temperature value for the generation algorithm, lesser than 1 is less random (closer to the primer), bigger than 1 is more random :param beam_size: The beam size for the generation algorithm, a bigger branch size means the generation algorithm will generate more sequence each iteration, meaning a less random sequence at the cost of more time. :param branch_factor: The branch factor for the generation algorithm, a bigger branch factor means the generation algorithm will keep more sequence candidates at each iteration, meaning a less random sequence at the cost of more time. :param steps_per_iteration: The number of steps the generation algorithm generates at each iteration, a bigger steps per iteration meaning there are less iterations in total because more steps gets generated each time. :returns The generated NoteSequence # Downloads the bundle from the magenta website, a bundle (.mag file) is a # trained model that is used by magenta # Initialize the generator from the generator id, this need to fit the # bundle we downloaded before, and choose the model's configuration. # Gets the primer sequence that is fed into the model for the generator, # which will generate a sequence based on this one. # If no primer sequence is given, the primer sequence is initialized # to an empty note sequence # Gets the QPM from the primer sequence. If it wasn't provided, take the # parameters that defaults to Magenta's default # Calculates the seconds per 1 step, which changes depending on the QPM value # (steps per quarter in generators are mostly 4) # Calculates the primer sequence length in steps and time by taking the # total time (which is the end of the last note) and finding the next step # start time. # Calculates the start and the end of the primer sequence. # We add a negative delta to the end, because if we don't some generators # won't start the generation right at the beginning of the bar, they will # start at the next step, meaning we'll have a small gap between the primer # and the generated sequence. # Calculates the generation time by taking the total time and substracting # the primer time. The resulting generation time needs to be bigger than zero. # Calculates the generate start and end time, the start time will contain # the previously added negative delta from the primer end time. # We remove the generation end time delta to end the generation # on the last bar. # Showtime # Pass the given parameters, the generator options are common for all models, # except for condition_on_primer and no_inject_primer_during_generation # which are specific to polyphonic models # Generates the sequence, add add the time signature # back to the generated sequence # Writes the resulting midi file to the output directory # Writes the resulting plot file to the output directory # Generates a sequence with expressive timing and variable velocity # Generates a sequence with expressive timing, variable velocity, and # less notes per seconds (defaults 10) # Generates a sequence with expressive timing, variable velocity, and # a specific pitch density corresponding to the F major scale
| 2.842292
| 3
|
January/Day6-Car-Pooling.py
|
tayyrov/Daily_Coding_Challenge
| 1
|
6627141
|
"""
Question Source: https://leetcode.com/problems/car-pooling/
Level: Medium
Topic: Sorting, stimulation
Solver: Tayyrov
Date: 01.06.2022
"""
from typing import *
def carPooling(trips: List[List[int]], capacity: int) -> bool:
passenger_logistics = []
for nums_pass, add, remove in trips:
# minus comes before plus, meaning after sorting first we remove then we add.
passenger_logistics.append((add, nums_pass))
passenger_logistics.append((remove, -nums_pass))
passenger_logistics.sort()
# print(passenger_logistics) [[2,1,5],[3,5,7]] => [(1, 2), (5, -2), (5, 3), (7, -3)]
current_people = 0
for action, people in passenger_logistics:
current_people += people
if current_people > capacity:
return False
return True
def check():
assert carPooling([[2, 1, 5], [3, 3, 7]], 4) == False
assert carPooling([[2, 1, 5], [3, 3, 7]], 5) == True
print("All runs are OK")
check()
|
"""
Question Source: https://leetcode.com/problems/car-pooling/
Level: Medium
Topic: Sorting, stimulation
Solver: Tayyrov
Date: 01.06.2022
"""
from typing import *
def carPooling(trips: List[List[int]], capacity: int) -> bool:
passenger_logistics = []
for nums_pass, add, remove in trips:
# minus comes before plus, meaning after sorting first we remove then we add.
passenger_logistics.append((add, nums_pass))
passenger_logistics.append((remove, -nums_pass))
passenger_logistics.sort()
# print(passenger_logistics) [[2,1,5],[3,5,7]] => [(1, 2), (5, -2), (5, 3), (7, -3)]
current_people = 0
for action, people in passenger_logistics:
current_people += people
if current_people > capacity:
return False
return True
def check():
assert carPooling([[2, 1, 5], [3, 3, 7]], 4) == False
assert carPooling([[2, 1, 5], [3, 3, 7]], 5) == True
print("All runs are OK")
check()
|
en
| 0.793115
|
Question Source: https://leetcode.com/problems/car-pooling/
Level: Medium
Topic: Sorting, stimulation
Solver: Tayyrov
Date: 01.06.2022 # minus comes before plus, meaning after sorting first we remove then we add. # print(passenger_logistics) [[2,1,5],[3,5,7]] => [(1, 2), (5, -2), (5, 3), (7, -3)]
| 3.831848
| 4
|
python/pyspark/ml/param/_shared_params_code_gen.py
|
dobashim/spark
| 0
|
6627142
|
<reponame>dobashim/spark
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
header = """#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#"""
# Code generator for shared params (shared.py). Run under this folder with:
# python _shared_params_code_gen.py > shared.py
def _gen_param_code(name, doc, defaultValueStr):
"""
Generates Python code for a shared param class.
:param name: param name
:param doc: param doc
:param defaultValueStr: string representation of the default value
:return: code string
"""
# TODO: How to correctly inherit instance attributes?
template = '''class Has$Name(Params):
"""
Mixin for param $name: $doc.
"""
# a placeholder to make it appear in the generated doc
$name = Param(Params._dummy(), "$name", "$doc")
def __init__(self):
super(Has$Name, self).__init__()
#: param for $doc
self.$name = Param(self, "$name", "$doc")
if $defaultValueStr is not None:
self._setDefault($name=$defaultValueStr)
def set$Name(self, value):
"""
Sets the value of :py:attr:`$name`.
"""
self.paramMap[self.$name] = value
return self
def get$Name(self):
"""
Gets the value of $name or its default value.
"""
return self.getOrDefault(self.$name)'''
Name = name[0].upper() + name[1:]
return template \
.replace("$name", name) \
.replace("$Name", Name) \
.replace("$doc", doc) \
.replace("$defaultValueStr", str(defaultValueStr))
if __name__ == "__main__":
print(header)
print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
print("from pyspark.ml.param import Param, Params\n\n")
shared = [
("maxIter", "max number of iterations", None),
("regParam", "regularization constant", None),
("featuresCol", "features column name", "'features'"),
("labelCol", "label column name", "'label'"),
("predictionCol", "prediction column name", "'prediction'"),
("rawPredictionCol", "raw prediction column name", "'rawPrediction'"),
("inputCol", "input column name", None),
("inputCols", "input column names", None),
("outputCol", "output column name", None),
("numFeatures", "number of features", None),
("checkpointInterval", "checkpoint interval (>= 1)", None),
("seed", "random seed", None),
("tol", "the convergence tolerance for iterative algorithms", None),
("stepSize", "Step size to be used for each iteration of optimization.", None)]
code = []
for name, doc, defaultValueStr in shared:
code.append(_gen_param_code(name, doc, defaultValueStr))
print("\n\n\n".join(code))
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
header = """#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#"""
# Code generator for shared params (shared.py). Run under this folder with:
# python _shared_params_code_gen.py > shared.py
def _gen_param_code(name, doc, defaultValueStr):
"""
Generates Python code for a shared param class.
:param name: param name
:param doc: param doc
:param defaultValueStr: string representation of the default value
:return: code string
"""
# TODO: How to correctly inherit instance attributes?
template = '''class Has$Name(Params):
"""
Mixin for param $name: $doc.
"""
# a placeholder to make it appear in the generated doc
$name = Param(Params._dummy(), "$name", "$doc")
def __init__(self):
super(Has$Name, self).__init__()
#: param for $doc
self.$name = Param(self, "$name", "$doc")
if $defaultValueStr is not None:
self._setDefault($name=$defaultValueStr)
def set$Name(self, value):
"""
Sets the value of :py:attr:`$name`.
"""
self.paramMap[self.$name] = value
return self
def get$Name(self):
"""
Gets the value of $name or its default value.
"""
return self.getOrDefault(self.$name)'''
Name = name[0].upper() + name[1:]
return template \
.replace("$name", name) \
.replace("$Name", Name) \
.replace("$doc", doc) \
.replace("$defaultValueStr", str(defaultValueStr))
if __name__ == "__main__":
print(header)
print("\n# DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
print("from pyspark.ml.param import Param, Params\n\n")
shared = [
("maxIter", "max number of iterations", None),
("regParam", "regularization constant", None),
("featuresCol", "features column name", "'features'"),
("labelCol", "label column name", "'label'"),
("predictionCol", "prediction column name", "'prediction'"),
("rawPredictionCol", "raw prediction column name", "'rawPrediction'"),
("inputCol", "input column name", None),
("inputCols", "input column names", None),
("outputCol", "output column name", None),
("numFeatures", "number of features", None),
("checkpointInterval", "checkpoint interval (>= 1)", None),
("seed", "random seed", None),
("tol", "the convergence tolerance for iterative algorithms", None),
("stepSize", "Step size to be used for each iteration of optimization.", None)]
code = []
for name, doc, defaultValueStr in shared:
code.append(_gen_param_code(name, doc, defaultValueStr))
print("\n\n\n".join(code))
|
en
| 0.744116
|
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Code generator for shared params (shared.py). Run under this folder with: # python _shared_params_code_gen.py > shared.py Generates Python code for a shared param class. :param name: param name :param doc: param doc :param defaultValueStr: string representation of the default value :return: code string # TODO: How to correctly inherit instance attributes? class Has$Name(Params): """ Mixin for param $name: $doc. """ # a placeholder to make it appear in the generated doc $name = Param(Params._dummy(), "$name", "$doc") def __init__(self): super(Has$Name, self).__init__() #: param for $doc self.$name = Param(self, "$name", "$doc") if $defaultValueStr is not None: self._setDefault($name=$defaultValueStr) def set$Name(self, value): """ Sets the value of :py:attr:`$name`. """ self.paramMap[self.$name] = value return self def get$Name(self): """ Gets the value of $name or its default value. """ return self.getOrDefault(self.$name) # DO NOT MODIFY THIS FILE! It was generated by _shared_params_code_gen.py.\n")
| 1.67705
| 2
|
models/1-Tom/train/kaggle-hubmap-main/src/02_train/run.py
|
navekshasood/HuBMAP---Hacking-the-Kidney
| 0
|
6627143
|
import time
import pandas as pd
import numpy as np
import gc
from os.path import join as opj
import matplotlib.pyplot as plt
import pickle
from tqdm import tqdm
import torchvision
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from dataset import HuBMAPDatasetTrain
from models import build_model
from scheduler import CosineLR
from utils import elapsed_time
from lovasz_loss import lovasz_hinge
from losses import criterion_lovasz_hinge_non_empty
from metrics import dice_sum, dice_sum_2
from get_config import get_config
config = get_config()
output_path = config['OUTPUT_PATH']
fold_list = config['FOLD_LIST']
pretrain_path_list = config['pretrain_path_list']
device = config['device']
def feature_imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.detach().numpy().transpose((1, 2, 0))
# mean = np.array([0.5, 0.5, 0.5])
# std = np.array([0.5, 0.5, 0.5])
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
# inp = STD * inp + MEAN
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.pause(0.001) # pause a bit so that plots are updated
def run(seed, data_df, pseudo_df, trn_idxs_list, val_idxs_list):
log_cols = ['fold', 'epoch', 'lr',
'loss_trn', 'loss_val',
'trn_score', 'val_score',
'elapsed_time']
criterion = nn.BCEWithLogitsLoss().to(device)
criterion_clf = nn.BCEWithLogitsLoss().to(device)
for fold, (trn_idxs, val_idxs) in enumerate(zip(trn_idxs_list, val_idxs_list)):
if fold in fold_list:
pass
else:
continue
print('seed = {}, fold = {}'.format(seed, fold))
log_df = pd.DataFrame(columns=log_cols, dtype=object)
log_counter = 0
#dataset
trn_df = data_df.iloc[trn_idxs].reset_index(drop=True)
val_df = data_df.iloc[val_idxs].reset_index(drop=True)
#add pseudo label
if pseudo_df is not None:
trn_df = pd.concat([trn_df, pseudo_df], axis=0).reset_index(drop=True)
# dataloader
valid_dataset = HuBMAPDatasetTrain(val_df, config, mode='valid')
valid_loader = DataLoader(valid_dataset, batch_size=config['test_batch_size'],
shuffle=False, num_workers=4, pin_memory=True)
#model
model = build_model(model_name=config['model_name'],
resolution=config['resolution'],
deepsupervision=config['deepsupervision'],
clfhead=config['clfhead'],
clf_threshold=config['clf_threshold'],
load_weights=True).to(device, torch.float32)
# if pretrain_path_list is not None:
# model.load_state_dict(torch.load(pretrain_path_list[fold]))
# print("pre-trained models loaded")
# for p in model.parameters():
# p.requires_grad = True
optimizer = optim.Adam(model.parameters(), **config['Adam'])
#optimizer = optim.RMSprop(model.parameters(), **config['RMSprop'])
# Creates a GradScaler once at the beginning of training.
scaler = torch.cuda.amp.GradScaler()
if config['lr_scheduler_name']=='ReduceLROnPlateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **config['lr_scheduler']['ReduceLROnPlateau'])
elif config['lr_scheduler_name']=='CosineAnnealingLR':
#scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])
scheduler = CosineLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])
elif config['lr_scheduler_name']=='OneCycleLR':
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, steps_per_epoch=len(train_loader),
**config['lr_scheduler']['OneCycleLR'])
#training
val_score_best = -1e+99
val_score_best2 = -1e+99
loss_val_best = 1e+99
epoch_best = 0
counter_ES = 0
trn_score = 0
trn_score_each = 0
start_time = time.time()
for epoch in range(1, config['num_epochs']+1):
if epoch < config['restart_epoch_list'][fold]:
scheduler.step()
continue
# if elapsed_time(start_time) > config['time_limit']:
# print('elapsed_time go beyond {} sec'.format(config['time_limit']))
# break
#print('lr = ', scheduler.get_lr()[0])
print('lr : ', [ group['lr'] for group in optimizer.param_groups ])
#train
trn_df['binned'] = trn_df['binned'].apply(lambda x:config['binned_max'] if x>=config['binned_max'] else x)
n_sample = trn_df['is_masked'].value_counts().min()
trn_df_0 = trn_df[trn_df['is_masked']==False].sample(n_sample, replace=True)
trn_df_1 = trn_df[trn_df['is_masked']==True].sample(n_sample, replace=True)
n_bin = int(trn_df_1['binned'].value_counts().mean())
trn_df_list = []
for bin_size in trn_df_1['binned'].unique():
trn_df_list.append(trn_df_1[trn_df_1['binned']==bin_size].sample(n_bin, replace=True))
trn_df_1 = pd.concat(trn_df_list, axis=0)
trn_df_balanced = pd.concat([trn_df_1, trn_df_0], axis=0).reset_index(drop=True)
train_dataset = HuBMAPDatasetTrain(trn_df_balanced, config, mode='train')
train_loader = DataLoader(train_dataset, batch_size=config['trn_batch_size'],
shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
model.train()
running_loss_trn = 0
trn_score_numer = 0
trn_score_denom = 0
y_preds = []
y_trues = []
counter = 0
tk0 = tqdm(train_loader, total=int(len(train_loader)))
feature_test = []
for i,data in enumerate(tk0):
optimizer.zero_grad()
with torch.cuda.amp.autocast():
batch,c,h,w = data['img'].shape
if config['clfhead']:
y_clf = data['label'].to(device, torch.float32, non_blocking=True)
if config['deepsupervision']:
logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
if config['deepsupervision']:
logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits = model(data['img'].to(device, torch.float32, non_blocking=True))
y_true = data['mask'].to(device, torch.float32, non_blocking=True)
dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(),
y_true.detach().cpu().numpy(),
dice_threshold=config['dice_threshold'])
trn_score_numer += dice_numer
trn_score_denom += dice_denom
y_true = y_true.unsqueeze(1)
# get intermediate data
# print(logits.shape)
# print(y_true.shape)
# print(model.x4.shape)
feature_test.append(model.x4.cpu())#transpose(1,0).cpu()
#out = torchvision.utils.make_grid(feature_test)
#print(out.shape)
#feature_imshow(out)
loss = criterion(logits,y_true)
loss += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w))
if config['deepsupervision']:
for logits_deep in logits_deeps:
loss += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true)
if config['clfhead']:
loss += criterion_clf(logits_clf.squeeze(-1),y_clf)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
#loss.backward()
#optimizer.step()
if config['lr_scheduler_name']=='OneCycleLR':
scheduler.step()
running_loss_trn += loss.item() * batch
counter += 1
tk0.set_postfix(loss=(running_loss_trn / (counter * train_loader.batch_size) ))
epoch_loss_trn = running_loss_trn / len(train_dataset)
trn_score = trn_score_numer / trn_score_denom
feature_merge = np.concatenate(feature_test, axis=0)
print(feature_merge.shape)
pickle.dump(feature_merge, open("featue_test", "wb"))
#release GPU memory cache
del data, loss,logits,y_true
torch.cuda.empty_cache()
gc.collect()
#eval
model.eval()
loss_val = 0
val_score_numer = 0
val_score_denom = 0
y_preds = []
y_trues = []
tk1 = tqdm(valid_loader, total=int(len(valid_loader)))
for i,data in enumerate(tk1):
with torch.no_grad():
batch,c,h,w = data['img'].shape
if config['clfhead']:
y_clf = data['label'].to(device, torch.float32, non_blocking=True)
if config['deepsupervision']:
logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
if config['deepsupervision']:
logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits = model(data['img'].to(device, torch.float32, non_blocking=True))
y_true = data['mask'].to(device, torch.float32, non_blocking=True)
dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(),
y_true.detach().cpu().numpy(),
dice_threshold=config['dice_threshold'])
val_score_numer += dice_numer
val_score_denom += dice_denom
y_true = y_true.unsqueeze(1)
loss_val += criterion(logits,y_true).item() * batch
loss_val += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w)).item() * batch
if config['deepsupervision']:
for logits_deep in logits_deeps:
loss_val += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true).item() * batch
if config['clfhead']:
loss_val += criterion_clf(logits_clf.squeeze(-1), y_clf).item() * batch
#release GPU memory cache
del data,logits,y_true
torch.cuda.empty_cache()
gc.collect()
loss_val /= len(valid_dataset)
val_score = val_score_numer / val_score_denom
#logging
log_df.loc[log_counter,log_cols] = np.array([fold, epoch,
[ group['lr'] for group in optimizer.param_groups ],
epoch_loss_trn, loss_val,
trn_score, val_score,
elapsed_time(start_time)], dtype='object')
log_counter += 1
#monitering
print('epoch {:.0f} loss_trn = {:.5f}, loss_val = {:.5f}, trn_score = {:.4f}, val_score = {:.4f}'.format(epoch, epoch_loss_trn, loss_val, trn_score, val_score))
if epoch%10 == 0:
print(' elapsed_time = {:.1f} min'.format((time.time() - start_time)/60))
if config['early_stopping']:
if loss_val < loss_val_best: #val_score > val_score_best:
val_score_best = val_score #update
loss_val_best = loss_val #update
epoch_best = epoch #update
counter_ES = 0 #reset
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save
print('model (best loss) saved')
else:
counter_ES += 1
if counter_ES > config['patience']:
print('early stopping, epoch_best {:.0f}, loss_val_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))
break
else:
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save
if val_score > val_score_best2:
val_score_best2 = val_score #update
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestscore.pth') #save
print('model (best score) saved')
if config['lr_scheduler_name']=='ReduceLROnPlateau':
scheduler.step(loss_val)
#scheduler.step(val_score)
elif config['lr_scheduler_name']=='CosineAnnealingLR':
scheduler.step()
#for snapshot ensemble
if config['lr_scheduler_name']=='CosineAnnealingLR':
t0 = config['lr_scheduler']['CosineAnnealingLR']['t0']
if (epoch%(t0+1)==0) or (epoch%(t0)==0) or (epoch%(t0-1)==0):
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_epoch{epoch}.pth') #save
print(f'model saved epoch{epoch} for snapshot ensemble')
#save result
log_df.to_csv(output_path+f'log_seed{seed}_fold{fold}.csv', index=False)
print('')
#best model
if config['early_stopping']&(counter_ES<=config['patience']):
print('epoch_best {:d}, val_loss_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))
del model
torch.cuda.empty_cache()
gc.collect()
print('')
|
import time
import pandas as pd
import numpy as np
import gc
from os.path import join as opj
import matplotlib.pyplot as plt
import pickle
from tqdm import tqdm
import torchvision
import torch
from torch import nn, optim
from torch.utils.data import DataLoader
from dataset import HuBMAPDatasetTrain
from models import build_model
from scheduler import CosineLR
from utils import elapsed_time
from lovasz_loss import lovasz_hinge
from losses import criterion_lovasz_hinge_non_empty
from metrics import dice_sum, dice_sum_2
from get_config import get_config
config = get_config()
output_path = config['OUTPUT_PATH']
fold_list = config['FOLD_LIST']
pretrain_path_list = config['pretrain_path_list']
device = config['device']
def feature_imshow(inp, title=None):
"""Imshow for Tensor."""
inp = inp.detach().numpy().transpose((1, 2, 0))
# mean = np.array([0.5, 0.5, 0.5])
# std = np.array([0.5, 0.5, 0.5])
MEAN = np.array([0.485, 0.456, 0.406])
STD = np.array([0.229, 0.224, 0.225])
# inp = STD * inp + MEAN
inp = np.clip(inp, 0, 1)
plt.imshow(inp)
plt.pause(0.001) # pause a bit so that plots are updated
def run(seed, data_df, pseudo_df, trn_idxs_list, val_idxs_list):
log_cols = ['fold', 'epoch', 'lr',
'loss_trn', 'loss_val',
'trn_score', 'val_score',
'elapsed_time']
criterion = nn.BCEWithLogitsLoss().to(device)
criterion_clf = nn.BCEWithLogitsLoss().to(device)
for fold, (trn_idxs, val_idxs) in enumerate(zip(trn_idxs_list, val_idxs_list)):
if fold in fold_list:
pass
else:
continue
print('seed = {}, fold = {}'.format(seed, fold))
log_df = pd.DataFrame(columns=log_cols, dtype=object)
log_counter = 0
#dataset
trn_df = data_df.iloc[trn_idxs].reset_index(drop=True)
val_df = data_df.iloc[val_idxs].reset_index(drop=True)
#add pseudo label
if pseudo_df is not None:
trn_df = pd.concat([trn_df, pseudo_df], axis=0).reset_index(drop=True)
# dataloader
valid_dataset = HuBMAPDatasetTrain(val_df, config, mode='valid')
valid_loader = DataLoader(valid_dataset, batch_size=config['test_batch_size'],
shuffle=False, num_workers=4, pin_memory=True)
#model
model = build_model(model_name=config['model_name'],
resolution=config['resolution'],
deepsupervision=config['deepsupervision'],
clfhead=config['clfhead'],
clf_threshold=config['clf_threshold'],
load_weights=True).to(device, torch.float32)
# if pretrain_path_list is not None:
# model.load_state_dict(torch.load(pretrain_path_list[fold]))
# print("pre-trained models loaded")
# for p in model.parameters():
# p.requires_grad = True
optimizer = optim.Adam(model.parameters(), **config['Adam'])
#optimizer = optim.RMSprop(model.parameters(), **config['RMSprop'])
# Creates a GradScaler once at the beginning of training.
scaler = torch.cuda.amp.GradScaler()
if config['lr_scheduler_name']=='ReduceLROnPlateau':
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, **config['lr_scheduler']['ReduceLROnPlateau'])
elif config['lr_scheduler_name']=='CosineAnnealingLR':
#scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])
scheduler = CosineLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR'])
elif config['lr_scheduler_name']=='OneCycleLR':
scheduler = optim.lr_scheduler.OneCycleLR(optimizer, steps_per_epoch=len(train_loader),
**config['lr_scheduler']['OneCycleLR'])
#training
val_score_best = -1e+99
val_score_best2 = -1e+99
loss_val_best = 1e+99
epoch_best = 0
counter_ES = 0
trn_score = 0
trn_score_each = 0
start_time = time.time()
for epoch in range(1, config['num_epochs']+1):
if epoch < config['restart_epoch_list'][fold]:
scheduler.step()
continue
# if elapsed_time(start_time) > config['time_limit']:
# print('elapsed_time go beyond {} sec'.format(config['time_limit']))
# break
#print('lr = ', scheduler.get_lr()[0])
print('lr : ', [ group['lr'] for group in optimizer.param_groups ])
#train
trn_df['binned'] = trn_df['binned'].apply(lambda x:config['binned_max'] if x>=config['binned_max'] else x)
n_sample = trn_df['is_masked'].value_counts().min()
trn_df_0 = trn_df[trn_df['is_masked']==False].sample(n_sample, replace=True)
trn_df_1 = trn_df[trn_df['is_masked']==True].sample(n_sample, replace=True)
n_bin = int(trn_df_1['binned'].value_counts().mean())
trn_df_list = []
for bin_size in trn_df_1['binned'].unique():
trn_df_list.append(trn_df_1[trn_df_1['binned']==bin_size].sample(n_bin, replace=True))
trn_df_1 = pd.concat(trn_df_list, axis=0)
trn_df_balanced = pd.concat([trn_df_1, trn_df_0], axis=0).reset_index(drop=True)
train_dataset = HuBMAPDatasetTrain(trn_df_balanced, config, mode='train')
train_loader = DataLoader(train_dataset, batch_size=config['trn_batch_size'],
shuffle=True, num_workers=4, pin_memory=True, drop_last=True)
model.train()
running_loss_trn = 0
trn_score_numer = 0
trn_score_denom = 0
y_preds = []
y_trues = []
counter = 0
tk0 = tqdm(train_loader, total=int(len(train_loader)))
feature_test = []
for i,data in enumerate(tk0):
optimizer.zero_grad()
with torch.cuda.amp.autocast():
batch,c,h,w = data['img'].shape
if config['clfhead']:
y_clf = data['label'].to(device, torch.float32, non_blocking=True)
if config['deepsupervision']:
logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
if config['deepsupervision']:
logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits = model(data['img'].to(device, torch.float32, non_blocking=True))
y_true = data['mask'].to(device, torch.float32, non_blocking=True)
dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(),
y_true.detach().cpu().numpy(),
dice_threshold=config['dice_threshold'])
trn_score_numer += dice_numer
trn_score_denom += dice_denom
y_true = y_true.unsqueeze(1)
# get intermediate data
# print(logits.shape)
# print(y_true.shape)
# print(model.x4.shape)
feature_test.append(model.x4.cpu())#transpose(1,0).cpu()
#out = torchvision.utils.make_grid(feature_test)
#print(out.shape)
#feature_imshow(out)
loss = criterion(logits,y_true)
loss += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w))
if config['deepsupervision']:
for logits_deep in logits_deeps:
loss += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true)
if config['clfhead']:
loss += criterion_clf(logits_clf.squeeze(-1),y_clf)
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
#loss.backward()
#optimizer.step()
if config['lr_scheduler_name']=='OneCycleLR':
scheduler.step()
running_loss_trn += loss.item() * batch
counter += 1
tk0.set_postfix(loss=(running_loss_trn / (counter * train_loader.batch_size) ))
epoch_loss_trn = running_loss_trn / len(train_dataset)
trn_score = trn_score_numer / trn_score_denom
feature_merge = np.concatenate(feature_test, axis=0)
print(feature_merge.shape)
pickle.dump(feature_merge, open("featue_test", "wb"))
#release GPU memory cache
del data, loss,logits,y_true
torch.cuda.empty_cache()
gc.collect()
#eval
model.eval()
loss_val = 0
val_score_numer = 0
val_score_denom = 0
y_preds = []
y_trues = []
tk1 = tqdm(valid_loader, total=int(len(valid_loader)))
for i,data in enumerate(tk1):
with torch.no_grad():
batch,c,h,w = data['img'].shape
if config['clfhead']:
y_clf = data['label'].to(device, torch.float32, non_blocking=True)
if config['deepsupervision']:
logits,logits_deeps,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits,logits_clf = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
if config['deepsupervision']:
logits,logits_deeps = model(data['img'].to(device, torch.float32, non_blocking=True))
else:
logits = model(data['img'].to(device, torch.float32, non_blocking=True))
y_true = data['mask'].to(device, torch.float32, non_blocking=True)
dice_numer, dice_denom = dice_sum_2((torch.sigmoid(logits)).detach().cpu().numpy(),
y_true.detach().cpu().numpy(),
dice_threshold=config['dice_threshold'])
val_score_numer += dice_numer
val_score_denom += dice_denom
y_true = y_true.unsqueeze(1)
loss_val += criterion(logits,y_true).item() * batch
loss_val += lovasz_hinge(logits.view(-1,h,w), y_true.view(-1,h,w)).item() * batch
if config['deepsupervision']:
for logits_deep in logits_deeps:
loss_val += 0.1 * criterion_lovasz_hinge_non_empty(criterion, logits_deep, y_true).item() * batch
if config['clfhead']:
loss_val += criterion_clf(logits_clf.squeeze(-1), y_clf).item() * batch
#release GPU memory cache
del data,logits,y_true
torch.cuda.empty_cache()
gc.collect()
loss_val /= len(valid_dataset)
val_score = val_score_numer / val_score_denom
#logging
log_df.loc[log_counter,log_cols] = np.array([fold, epoch,
[ group['lr'] for group in optimizer.param_groups ],
epoch_loss_trn, loss_val,
trn_score, val_score,
elapsed_time(start_time)], dtype='object')
log_counter += 1
#monitering
print('epoch {:.0f} loss_trn = {:.5f}, loss_val = {:.5f}, trn_score = {:.4f}, val_score = {:.4f}'.format(epoch, epoch_loss_trn, loss_val, trn_score, val_score))
if epoch%10 == 0:
print(' elapsed_time = {:.1f} min'.format((time.time() - start_time)/60))
if config['early_stopping']:
if loss_val < loss_val_best: #val_score > val_score_best:
val_score_best = val_score #update
loss_val_best = loss_val #update
epoch_best = epoch #update
counter_ES = 0 #reset
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save
print('model (best loss) saved')
else:
counter_ES += 1
if counter_ES > config['patience']:
print('early stopping, epoch_best {:.0f}, loss_val_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))
break
else:
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestloss.pth') #save
if val_score > val_score_best2:
val_score_best2 = val_score #update
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_bestscore.pth') #save
print('model (best score) saved')
if config['lr_scheduler_name']=='ReduceLROnPlateau':
scheduler.step(loss_val)
#scheduler.step(val_score)
elif config['lr_scheduler_name']=='CosineAnnealingLR':
scheduler.step()
#for snapshot ensemble
if config['lr_scheduler_name']=='CosineAnnealingLR':
t0 = config['lr_scheduler']['CosineAnnealingLR']['t0']
if (epoch%(t0+1)==0) or (epoch%(t0)==0) or (epoch%(t0-1)==0):
torch.save(model.state_dict(), output_path+f'model_seed{seed}_fold{fold}_epoch{epoch}.pth') #save
print(f'model saved epoch{epoch} for snapshot ensemble')
#save result
log_df.to_csv(output_path+f'log_seed{seed}_fold{fold}.csv', index=False)
print('')
#best model
if config['early_stopping']&(counter_ES<=config['patience']):
print('epoch_best {:d}, val_loss_best {:.5f}, val_score_best {:.5f}'.format(epoch_best, loss_val_best, val_score_best))
del model
torch.cuda.empty_cache()
gc.collect()
print('')
|
en
| 0.356694
|
Imshow for Tensor. # mean = np.array([0.5, 0.5, 0.5]) # std = np.array([0.5, 0.5, 0.5]) # inp = STD * inp + MEAN # pause a bit so that plots are updated #dataset #add pseudo label # dataloader #model # if pretrain_path_list is not None: # model.load_state_dict(torch.load(pretrain_path_list[fold])) # print("pre-trained models loaded") # for p in model.parameters(): # p.requires_grad = True #optimizer = optim.RMSprop(model.parameters(), **config['RMSprop']) # Creates a GradScaler once at the beginning of training. #scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, **config['lr_scheduler']['CosineAnnealingLR']) #training # if elapsed_time(start_time) > config['time_limit']: # print('elapsed_time go beyond {} sec'.format(config['time_limit'])) # break #print('lr = ', scheduler.get_lr()[0]) #train # get intermediate data # print(logits.shape) # print(y_true.shape) # print(model.x4.shape) #transpose(1,0).cpu() #out = torchvision.utils.make_grid(feature_test) #print(out.shape) #feature_imshow(out) #loss.backward() #optimizer.step() #release GPU memory cache #eval #release GPU memory cache #logging #monitering #val_score > val_score_best: #update #update #update #reset #save #save #update #save #scheduler.step(val_score) #for snapshot ensemble #save #save result #best model
| 1.894844
| 2
|
tests/components/hue/conftest.py
|
bazwilliams/home-assistant
| 1
|
6627144
|
"""Test helpers for Hue."""
from collections import deque
import logging
from unittest.mock import AsyncMock, Mock, patch
from aiohue.groups import Groups
from aiohue.lights import Lights
from aiohue.scenes import Scenes
from aiohue.sensors import Sensors
import pytest
from homeassistant.components import hue
from homeassistant.components.hue import sensor_base as hue_sensor_base
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa: F401
@pytest.fixture(autouse=True)
def no_request_delay():
"""Make the request refresh delay 0 for instant tests."""
with patch("homeassistant.components.hue.light.REQUEST_REFRESH_DELAY", 0):
yield
def create_mock_bridge(hass):
"""Create a mock Hue bridge."""
bridge = Mock(
hass=hass,
available=True,
authorized=True,
allow_unreachable=False,
allow_groups=False,
api=create_mock_api(hass),
reset_jobs=[],
spec=hue.HueBridge,
)
bridge.sensor_manager = hue_sensor_base.SensorManager(bridge)
bridge.mock_requests = bridge.api.mock_requests
bridge.mock_light_responses = bridge.api.mock_light_responses
bridge.mock_group_responses = bridge.api.mock_group_responses
bridge.mock_sensor_responses = bridge.api.mock_sensor_responses
async def async_request_call(task):
await task()
bridge.async_request_call = async_request_call
return bridge
@pytest.fixture
def mock_api(hass):
"""Mock the Hue api."""
return create_mock_api(hass)
def create_mock_api(hass):
"""Create a mock API."""
api = Mock(initialize=AsyncMock())
api.mock_requests = []
api.mock_light_responses = deque()
api.mock_group_responses = deque()
api.mock_sensor_responses = deque()
api.mock_scene_responses = deque()
async def mock_request(method, path, **kwargs):
kwargs["method"] = method
kwargs["path"] = path
api.mock_requests.append(kwargs)
if path == "lights":
return api.mock_light_responses.popleft()
if path == "groups":
return api.mock_group_responses.popleft()
if path == "sensors":
return api.mock_sensor_responses.popleft()
if path == "scenes":
return api.mock_scene_responses.popleft()
return None
logger = logging.getLogger(__name__)
api.config.apiversion = "9.9.9"
api.lights = Lights(logger, {}, mock_request)
api.groups = Groups(logger, {}, mock_request)
api.sensors = Sensors(logger, {}, mock_request)
api.scenes = Scenes(logger, {}, mock_request)
return api
@pytest.fixture
def mock_bridge(hass):
"""Mock a Hue bridge."""
return create_mock_bridge(hass)
async def setup_bridge_for_sensors(hass, mock_bridge, hostname=None):
"""Load the Hue platform with the provided bridge for sensor-related platforms."""
if hostname is None:
hostname = "mock-host"
hass.config.components.add(hue.DOMAIN)
config_entry = MockConfigEntry(
domain=hue.DOMAIN,
title="Mock Title",
data={"host": hostname},
system_options={},
)
mock_bridge.config_entry = config_entry
hass.data[hue.DOMAIN] = {config_entry.entry_id: mock_bridge}
await hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor")
await hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
# simulate a full setup by manually adding the bridge config entry
config_entry.add_to_hass(hass)
# and make sure it completes before going further
await hass.async_block_till_done()
|
"""Test helpers for Hue."""
from collections import deque
import logging
from unittest.mock import AsyncMock, Mock, patch
from aiohue.groups import Groups
from aiohue.lights import Lights
from aiohue.scenes import Scenes
from aiohue.sensors import Sensors
import pytest
from homeassistant.components import hue
from homeassistant.components.hue import sensor_base as hue_sensor_base
from tests.common import MockConfigEntry
from tests.components.light.conftest import mock_light_profiles # noqa: F401
@pytest.fixture(autouse=True)
def no_request_delay():
"""Make the request refresh delay 0 for instant tests."""
with patch("homeassistant.components.hue.light.REQUEST_REFRESH_DELAY", 0):
yield
def create_mock_bridge(hass):
"""Create a mock Hue bridge."""
bridge = Mock(
hass=hass,
available=True,
authorized=True,
allow_unreachable=False,
allow_groups=False,
api=create_mock_api(hass),
reset_jobs=[],
spec=hue.HueBridge,
)
bridge.sensor_manager = hue_sensor_base.SensorManager(bridge)
bridge.mock_requests = bridge.api.mock_requests
bridge.mock_light_responses = bridge.api.mock_light_responses
bridge.mock_group_responses = bridge.api.mock_group_responses
bridge.mock_sensor_responses = bridge.api.mock_sensor_responses
async def async_request_call(task):
await task()
bridge.async_request_call = async_request_call
return bridge
@pytest.fixture
def mock_api(hass):
"""Mock the Hue api."""
return create_mock_api(hass)
def create_mock_api(hass):
"""Create a mock API."""
api = Mock(initialize=AsyncMock())
api.mock_requests = []
api.mock_light_responses = deque()
api.mock_group_responses = deque()
api.mock_sensor_responses = deque()
api.mock_scene_responses = deque()
async def mock_request(method, path, **kwargs):
kwargs["method"] = method
kwargs["path"] = path
api.mock_requests.append(kwargs)
if path == "lights":
return api.mock_light_responses.popleft()
if path == "groups":
return api.mock_group_responses.popleft()
if path == "sensors":
return api.mock_sensor_responses.popleft()
if path == "scenes":
return api.mock_scene_responses.popleft()
return None
logger = logging.getLogger(__name__)
api.config.apiversion = "9.9.9"
api.lights = Lights(logger, {}, mock_request)
api.groups = Groups(logger, {}, mock_request)
api.sensors = Sensors(logger, {}, mock_request)
api.scenes = Scenes(logger, {}, mock_request)
return api
@pytest.fixture
def mock_bridge(hass):
"""Mock a Hue bridge."""
return create_mock_bridge(hass)
async def setup_bridge_for_sensors(hass, mock_bridge, hostname=None):
"""Load the Hue platform with the provided bridge for sensor-related platforms."""
if hostname is None:
hostname = "mock-host"
hass.config.components.add(hue.DOMAIN)
config_entry = MockConfigEntry(
domain=hue.DOMAIN,
title="Mock Title",
data={"host": hostname},
system_options={},
)
mock_bridge.config_entry = config_entry
hass.data[hue.DOMAIN] = {config_entry.entry_id: mock_bridge}
await hass.config_entries.async_forward_entry_setup(config_entry, "binary_sensor")
await hass.config_entries.async_forward_entry_setup(config_entry, "sensor")
# simulate a full setup by manually adding the bridge config entry
config_entry.add_to_hass(hass)
# and make sure it completes before going further
await hass.async_block_till_done()
|
en
| 0.86199
|
Test helpers for Hue. # noqa: F401 Make the request refresh delay 0 for instant tests. Create a mock Hue bridge. Mock the Hue api. Create a mock API. Mock a Hue bridge. Load the Hue platform with the provided bridge for sensor-related platforms. # simulate a full setup by manually adding the bridge config entry # and make sure it completes before going further
| 2.352766
| 2
|
mlir/test/python/dialects/linalg/opdsl/emit_structured_generic.py
|
acidburn0zzz/llvm-project
| 2,338
|
6627145
|
<reponame>acidburn0zzz/llvm-project<filename>mlir/test/python/dialects/linalg/opdsl/emit_structured_generic.py<gh_stars>1000+
# RUN: %PYTHON %s | FileCheck %s
from mlir.ir import *
from mlir.dialects import builtin
from mlir.dialects import linalg
from mlir.dialects import std
from mlir.dialects.linalg.opdsl.lang import *
T1 = TV.T1
T2 = TV.T2
@linalg_structured_op
def matmul_mono(
A=TensorDef(T, S.M, S.K),
B=TensorDef(T, S.K, S.N),
C=TensorDef(T, S.M, S.N, output=True)):
domain(D.m, D.n, D.k)
C[D.m, D.n] += A[D.m, D.k] * B[D.k, D.n]
@linalg_structured_op
def matmul_poly(
A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
domain(D.m, D.n, D.k)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
@linalg_structured_op
def conv_poly(
I=TensorDef(T1, S.N, S.IH, S.IW, S.C),
K=TensorDef(T2, S.KH, S.KW, S.C),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
O[D.n, D.oh, D.ow, D.c] += cast(
U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
D.c]) * cast(U, K[D.kh, D.kw, D.c])
@linalg_structured_op
def pooling_max_poly(
I=TensorDef(T1, S.N, S.H, S.W, S.C),
K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
O[D.n, D.oh, D.ow, D.c] = ReduceFn.max(D.kh, D.kw)(
cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
D.c]))
@linalg_structured_op
def pooling_min_poly(
I=TensorDef(T1, S.N, S.H, S.W, S.C),
K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
O[D.n, D.oh, D.ow, D.c] = ReduceFn.min(D.kh, D.kw)(
cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
D.c]))
@linalg_structured_op
def fill_rng_poly(
min=ScalarDef(F64),
max=ScalarDef(F64),
seed=ScalarDef(I32),
O=TensorDef(T, S.M, S.N, output=True)):
multiplier = cast(I32, const(1103515245))
increment = cast(I32, const(12345))
rand1 = (cast(I32, index(D.m)) + seed) * multiplier + increment
rand2 = (cast(I32, index(D.n)) + rand1) * multiplier + increment
inv_range = cast(F64, const(2.3283064e-10))
offset = cast(F64, const(2147483647))
scaling = (max - min) * inv_range
O[D.m, D.n] = cast(T, (offset + cast(F64, rand2)) * scaling + min)
@linalg_structured_op
def soft_plus_poly(
I=TensorDef(T, S.M, S.N), O=TensorDef(U, S.M, S.N, output=True)):
O[D.m, D.n] = \
PrimFn.log(cast(U, const(1.0)) + cast(U, PrimFn.exp(I[D.m, D.n])))
with Context() as ctx, Location.unknown():
module = Module.create()
f16 = F16Type.get()
f32 = F32Type.get()
f64 = F64Type.get()
i8 = IntegerType.get_signless(8)
i16 = IntegerType.get_signless(16)
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
# Multiplication indexing maps. We verify only the indexing maps of the
# first multiplication and then do additional tests on casting and body
# generation behavior.
# CHECK: #[[$MUL_MAP_A:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
# CHECK: #[[$MUL_MAP_B:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
# CHECK: #[[$MUL_MAP_C:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
# Convolution indexing maps.
# CHECK: #[[$CONV_MAP_I:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1 * 2 + d3, d2 * 4 + d4 * 2, d5)>
# CHECK: #[[$CONV_MAP_K:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>
# CHECK: #[[$CONV_MAP_O:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d5)>
# Pooling indexing maps.
# CHECK: #[[$POOL_MAP_K:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)>
# CHECK-LABEL: func @test_matmul_mono
# CHECK-SAME: %[[A:.+]]: tensor<4x16xf32>
# CHECK-SAME: %[[B:.+]]: tensor<16x8xf32>
# CHECK: %[[INITC:.+]] = linalg.init_tensor [4, 8] : tensor<4x8xf32>
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$MUL_MAP_A]], #[[$MUL_MAP_B]], #[[$MUL_MAP_C]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"]
# CHECK-SAME: ins(%[[A]], %[[B]]
# CHECK-SAME: outs(%[[INITC]]
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8), f32))
def test_matmul_mono(lhs, rhs):
init_result = linalg.InitTensorOp([4, 8], f32)
return matmul_mono(lhs, rhs, outs=[init_result.result])
# CHECK-LABEL: @test_i8i8i32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: i32)
# CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32
# CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i8 to i32
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32
# CHECK-NEXT: linalg.yield %[[ADD]] : i32
# CHECK-NEXT: -> tensor<4x8xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8),
RankedTensorType.get((4, 8), i32))
def test_i8i8i32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i8i16i32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i16, %[[C_ARG:.+]]: i32)
# CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32
# CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i16 to i32
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32
# CHECK-NEXT: linalg.yield %[[ADD]] : i32
# CHECK-NEXT: -> tensor<4x8xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i16),
RankedTensorType.get((4, 8), i32))
def test_i8i16i32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i32i32i16_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i32, %[[B_ARG:.+]]: i32, %[[C_ARG:.+]]: i16)
# CHECK-NEXT: %[[A_CAST:.+]] = trunci %[[A_ARG]] : i32 to i16
# CHECK-NEXT: %[[B_CAST:.+]] = trunci %[[B_ARG]] : i32 to i16
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i16
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i16
# CHECK-NEXT: linalg.yield %[[ADD]] : i16
# CHECK-NEXT: -> tensor<4x8xi16>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i32), RankedTensorType.get((16, 8), i32),
RankedTensorType.get((4, 8), i16))
def test_i32i32i16_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i8i8f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = sitofp %[[A_ARG]] : i8 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = sitofp %[[B_ARG]] : i8 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8),
RankedTensorType.get((4, 8), f32))
def test_i8i8f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f16f16f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: f16, %[[B_ARG:.+]]: f16, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = fpext %[[A_ARG]] : f16 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = fpext %[[B_ARG]] : f16 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f16), RankedTensorType.get((16, 8), f16),
RankedTensorType.get((4, 8), f32))
def test_f16f16f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f64f64f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: f64, %[[B_ARG:.+]]: f64, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = fptrunc %[[A_ARG]] : f64 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = fptrunc %[[B_ARG]] : f64 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f64), RankedTensorType.get((16, 8), f64),
RankedTensorType.get((4, 8), f32))
def test_f64f64f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f32i32_conv
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$CONV_MAP_K]], #[[$CONV_MAP_O]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"]
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[FILTER:.+]]: f32, %[[OUT:.+]]: i32)
# CHECK-NEXT: %[[IN_CAST:.+]] = fptosi %[[IN:.+]] : f32 to i32
# CHECK-NEXT: %[[FILTER_CAST:.+]] = fptosi %[[FILTER:.+]] : f32 to i32
# CHECK-NEXT: %[[PROD:.+]] = muli %[[IN_CAST]], %[[FILTER_CAST]] : i32
# CHECK-NEXT: %[[SUM:.+]] = addi %[[OUT]], %[[PROD]] : i32
# CHECK-NEXT: linalg.yield %[[SUM]] : i32
# CHECK-NEXT: -> tensor<2x4xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2, 1),
f32),
RankedTensorType.get((2, 4), i32))
def test_f32i32_conv(input, filter, init_result):
return conv_poly(
input, filter, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32i32_max_pooling
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$POOL_MAP_K]], #[[$CONV_MAP_O]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"]
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[SHAPE:.+]]: f32, %[[OUT:.+]]: i32)
# CHECK-NEXT: %[[IN_CAST:.+]] = fptosi %[[IN:.+]] : f32 to i32
# CHECK-NEXT: %[[COND:.+]] = cmpi sgt, %[[OUT]], %[[IN_CAST:.+]] : i32
# CHECK-NEXT: %[[MAX:.+]] = select %[[COND]], %[[OUT]], %[[IN_CAST:.+]] : i32
# CHECK-NEXT: linalg.yield %[[MAX]] : i32
# CHECK-NEXT: -> tensor<2x4xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), i32))
def test_f32i32_max_pooling(input, shape, init_result):
return pooling_max_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32f32_max_pooling
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$POOL_MAP_K]], #[[$CONV_MAP_O]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"]
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[SHAPE:.+]]: f32, %[[OUT:.+]]: f32)
# CHECK-NEXT: %[[COND:.+]] = cmpf ogt, %[[OUT]], %[[IN:.+]] : f32
# CHECK-NEXT: %[[MAX:.+]] = select %[[COND]], %[[OUT]], %[[IN:.+]] : f32
# CHECK-NEXT: linalg.yield %[[MAX]] : f32
# CHECK-NEXT: -> tensor<2x4xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), f32))
def test_f32f32_max_pooling(input, shape, init_result):
return pooling_max_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32i32_min_pooling
# CHECK: = cmpi slt,
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), i32))
def test_f32i32_min_pooling(input, shape, init_result):
return pooling_min_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32f32_min_pooling
# CHECK: = cmpf olt,
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), f32))
def test_f32f32_min_pooling(input, shape, init_result):
return pooling_min_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_i32_fill_rng
# CHECK: ^{{.*}}(%[[MIN:.+]]: f64, %[[MAX:.+]]: f64, %[[SEED:.+]]: i32, %{{.*}}
# CHECK-DAG: %[[IDX0:.+]] = linalg.index 0 : index
# CHECK-DAG: %[[IDX0_CAST:.+]] = index_cast %[[IDX0]] : index to i32
# CHECK-DAG: %[[RND0:.+]] = addi %[[IDX0_CAST]], %[[SEED]] : i32
# CHECK-DAG: %[[CST0:.+]] = constant 1103515245 : i64
# CHECK-DAG: %[[CST0_CAST:.+]] = trunci %[[CST0]] : i64 to i32
# Skip the remaining random number computation and match the scaling logic.
# CHECK-DAG: %[[DIFF:.+]] = subf %[[MAX]], %[[MIN]] : f64
# CHECK-DAG: %[[CST3:.+]] = constant 2.3283063999999999E-10 : f64
# CHECK-DAG: %[[FACT:.+]] = mulf %[[DIFF]], %[[CST3]] : f64
# CHECK-DAG: %[[RND4:.+]] = mulf %{{.+}}, %[[FACT]] : f64
# CHECK-DAG: %[[RND5:.+]] = addf %[[RND4]], %[[MIN]] : f64
# CHECK-DAG: %{{.*}} = fptosi %[[RND5]] : f64 to i32
@builtin.FuncOp.from_py_func(f64, f64, i32,
RankedTensorType.get((4, 16), i32))
def test_i32_fill_rng(min, max, seed, init_result):
return fill_rng_poly(min, max, seed, outs=[init_result])
# CHECK-LABEL: @test_f32_soft_plus
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[OUT:.+]]: f32)
# CHECK-NEXT: %[[C1:.+]] = constant 1.000000e+00 : f64
# CHECK-NEXT: %[[C1_CAST:.+]] = fptrunc %[[C1]] : f64 to f32
# CHECK-NEXT: %[[EXP:.+]] = math.exp %[[IN]] : f32
# CHECK-NEXT: %[[SUM:.+]] = addf %[[C1_CAST]], %[[EXP]] : f32
# CHECK-NEXT: %[[LOG:.+]] = math.log %[[SUM]] : f32
# CHECK-NEXT: linalg.yield %[[LOG]] : f32
# CHECK-NEXT: -> tensor<4x16xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32))
def test_f32_soft_plus(input, init_result):
return soft_plus_poly(input, outs=[init_result])
print(module)
|
# RUN: %PYTHON %s | FileCheck %s
from mlir.ir import *
from mlir.dialects import builtin
from mlir.dialects import linalg
from mlir.dialects import std
from mlir.dialects.linalg.opdsl.lang import *
T1 = TV.T1
T2 = TV.T2
@linalg_structured_op
def matmul_mono(
A=TensorDef(T, S.M, S.K),
B=TensorDef(T, S.K, S.N),
C=TensorDef(T, S.M, S.N, output=True)):
domain(D.m, D.n, D.k)
C[D.m, D.n] += A[D.m, D.k] * B[D.k, D.n]
@linalg_structured_op
def matmul_poly(
A=TensorDef(T1, S.M, S.K),
B=TensorDef(T2, S.K, S.N),
C=TensorDef(U, S.M, S.N, output=True)):
domain(D.m, D.n, D.k)
C[D.m, D.n] += cast(U, A[D.m, D.k]) * cast(U, B[D.k, D.n])
@linalg_structured_op
def conv_poly(
I=TensorDef(T1, S.N, S.IH, S.IW, S.C),
K=TensorDef(T2, S.KH, S.KW, S.C),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
O[D.n, D.oh, D.ow, D.c] += cast(
U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
D.c]) * cast(U, K[D.kh, D.kw, D.c])
@linalg_structured_op
def pooling_max_poly(
I=TensorDef(T1, S.N, S.H, S.W, S.C),
K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
O[D.n, D.oh, D.ow, D.c] = ReduceFn.max(D.kh, D.kw)(
cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
D.c]))
@linalg_structured_op
def pooling_min_poly(
I=TensorDef(T1, S.N, S.H, S.W, S.C),
K=TensorDef(T2, S.KH, S.KW, index_dims=[D.kh, D.kw]),
O=TensorDef(U, S.N, S.OH, S.OW, S.C, output=True),
strides=AttributeDef(S.SH, S.SW),
dilations=AttributeDef(S.DH, S.DW)):
domain(D.n, D.oh, D.ow, D.kh, D.kw, D.c)
O[D.n, D.oh, D.ow, D.c] = ReduceFn.min(D.kh, D.kw)(
cast(U, I[D.n, D.oh * S.SH + D.kh * S.DH, D.ow * S.SW + D.kw * S.DW,
D.c]))
@linalg_structured_op
def fill_rng_poly(
min=ScalarDef(F64),
max=ScalarDef(F64),
seed=ScalarDef(I32),
O=TensorDef(T, S.M, S.N, output=True)):
multiplier = cast(I32, const(1103515245))
increment = cast(I32, const(12345))
rand1 = (cast(I32, index(D.m)) + seed) * multiplier + increment
rand2 = (cast(I32, index(D.n)) + rand1) * multiplier + increment
inv_range = cast(F64, const(2.3283064e-10))
offset = cast(F64, const(2147483647))
scaling = (max - min) * inv_range
O[D.m, D.n] = cast(T, (offset + cast(F64, rand2)) * scaling + min)
@linalg_structured_op
def soft_plus_poly(
I=TensorDef(T, S.M, S.N), O=TensorDef(U, S.M, S.N, output=True)):
O[D.m, D.n] = \
PrimFn.log(cast(U, const(1.0)) + cast(U, PrimFn.exp(I[D.m, D.n])))
with Context() as ctx, Location.unknown():
module = Module.create()
f16 = F16Type.get()
f32 = F32Type.get()
f64 = F64Type.get()
i8 = IntegerType.get_signless(8)
i16 = IntegerType.get_signless(16)
i32 = IntegerType.get_signless(32)
with InsertionPoint(module.body):
# Multiplication indexing maps. We verify only the indexing maps of the
# first multiplication and then do additional tests on casting and body
# generation behavior.
# CHECK: #[[$MUL_MAP_A:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)>
# CHECK: #[[$MUL_MAP_B:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)>
# CHECK: #[[$MUL_MAP_C:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)>
# Convolution indexing maps.
# CHECK: #[[$CONV_MAP_I:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1 * 2 + d3, d2 * 4 + d4 * 2, d5)>
# CHECK: #[[$CONV_MAP_K:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)>
# CHECK: #[[$CONV_MAP_O:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d5)>
# Pooling indexing maps.
# CHECK: #[[$POOL_MAP_K:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)>
# CHECK-LABEL: func @test_matmul_mono
# CHECK-SAME: %[[A:.+]]: tensor<4x16xf32>
# CHECK-SAME: %[[B:.+]]: tensor<16x8xf32>
# CHECK: %[[INITC:.+]] = linalg.init_tensor [4, 8] : tensor<4x8xf32>
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$MUL_MAP_A]], #[[$MUL_MAP_B]], #[[$MUL_MAP_C]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"]
# CHECK-SAME: ins(%[[A]], %[[B]]
# CHECK-SAME: outs(%[[INITC]]
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((16, 8), f32))
def test_matmul_mono(lhs, rhs):
init_result = linalg.InitTensorOp([4, 8], f32)
return matmul_mono(lhs, rhs, outs=[init_result.result])
# CHECK-LABEL: @test_i8i8i32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: i32)
# CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32
# CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i8 to i32
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32
# CHECK-NEXT: linalg.yield %[[ADD]] : i32
# CHECK-NEXT: -> tensor<4x8xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8),
RankedTensorType.get((4, 8), i32))
def test_i8i8i32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i8i16i32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i16, %[[C_ARG:.+]]: i32)
# CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32
# CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i16 to i32
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32
# CHECK-NEXT: linalg.yield %[[ADD]] : i32
# CHECK-NEXT: -> tensor<4x8xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i16),
RankedTensorType.get((4, 8), i32))
def test_i8i16i32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i32i32i16_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i32, %[[B_ARG:.+]]: i32, %[[C_ARG:.+]]: i16)
# CHECK-NEXT: %[[A_CAST:.+]] = trunci %[[A_ARG]] : i32 to i16
# CHECK-NEXT: %[[B_CAST:.+]] = trunci %[[B_ARG]] : i32 to i16
# CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i16
# CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i16
# CHECK-NEXT: linalg.yield %[[ADD]] : i16
# CHECK-NEXT: -> tensor<4x8xi16>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i32), RankedTensorType.get((16, 8), i32),
RankedTensorType.get((4, 8), i16))
def test_i32i32i16_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_i8i8f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = sitofp %[[A_ARG]] : i8 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = sitofp %[[B_ARG]] : i8 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), i8), RankedTensorType.get((16, 8), i8),
RankedTensorType.get((4, 8), f32))
def test_i8i8f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f16f16f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: f16, %[[B_ARG:.+]]: f16, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = fpext %[[A_ARG]] : f16 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = fpext %[[B_ARG]] : f16 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f16), RankedTensorType.get((16, 8), f16),
RankedTensorType.get((4, 8), f32))
def test_f16f16f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f64f64f32_matmul
# CHECK: ^{{.*}}(%[[A_ARG:.+]]: f64, %[[B_ARG:.+]]: f64, %[[C_ARG:.+]]: f32)
# CHECK-NEXT: %[[A_CAST:.+]] = fptrunc %[[A_ARG]] : f64 to f32
# CHECK-NEXT: %[[B_CAST:.+]] = fptrunc %[[B_ARG]] : f64 to f32
# CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32
# CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32
# CHECK-NEXT: linalg.yield %[[ADD]] : f32
# CHECK-NEXT: -> tensor<4x8xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f64), RankedTensorType.get((16, 8), f64),
RankedTensorType.get((4, 8), f32))
def test_f64f64f32_matmul(lhs, rhs, init_result):
return matmul_poly(lhs, rhs, outs=[init_result])
# CHECK-LABEL: @test_f32i32_conv
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$CONV_MAP_K]], #[[$CONV_MAP_O]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"]
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[FILTER:.+]]: f32, %[[OUT:.+]]: i32)
# CHECK-NEXT: %[[IN_CAST:.+]] = fptosi %[[IN:.+]] : f32 to i32
# CHECK-NEXT: %[[FILTER_CAST:.+]] = fptosi %[[FILTER:.+]] : f32 to i32
# CHECK-NEXT: %[[PROD:.+]] = muli %[[IN_CAST]], %[[FILTER_CAST]] : i32
# CHECK-NEXT: %[[SUM:.+]] = addi %[[OUT]], %[[PROD]] : i32
# CHECK-NEXT: linalg.yield %[[SUM]] : i32
# CHECK-NEXT: -> tensor<2x4xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2, 1),
f32),
RankedTensorType.get((2, 4), i32))
def test_f32i32_conv(input, filter, init_result):
return conv_poly(
input, filter, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32i32_max_pooling
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$POOL_MAP_K]], #[[$CONV_MAP_O]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"]
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[SHAPE:.+]]: f32, %[[OUT:.+]]: i32)
# CHECK-NEXT: %[[IN_CAST:.+]] = fptosi %[[IN:.+]] : f32 to i32
# CHECK-NEXT: %[[COND:.+]] = cmpi sgt, %[[OUT]], %[[IN_CAST:.+]] : i32
# CHECK-NEXT: %[[MAX:.+]] = select %[[COND]], %[[OUT]], %[[IN_CAST:.+]] : i32
# CHECK-NEXT: linalg.yield %[[MAX]] : i32
# CHECK-NEXT: -> tensor<2x4xi32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), i32))
def test_f32i32_max_pooling(input, shape, init_result):
return pooling_max_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32f32_max_pooling
# CHECK: linalg.generic
# CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$POOL_MAP_K]], #[[$CONV_MAP_O]]]
# CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"]
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[SHAPE:.+]]: f32, %[[OUT:.+]]: f32)
# CHECK-NEXT: %[[COND:.+]] = cmpf ogt, %[[OUT]], %[[IN:.+]] : f32
# CHECK-NEXT: %[[MAX:.+]] = select %[[COND]], %[[OUT]], %[[IN:.+]] : f32
# CHECK-NEXT: linalg.yield %[[MAX]] : f32
# CHECK-NEXT: -> tensor<2x4xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), f32))
def test_f32f32_max_pooling(input, shape, init_result):
return pooling_max_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32i32_min_pooling
# CHECK: = cmpi slt,
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), i32))
def test_f32i32_min_pooling(input, shape, init_result):
return pooling_min_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_f32f32_min_pooling
# CHECK: = cmpf olt,
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((2, 2), f32),
RankedTensorType.get((2, 4), f32))
def test_f32f32_min_pooling(input, shape, init_result):
return pooling_min_poly(
input, shape, outs=[init_result], strides=[2, 4], dilations=[1, 2])
# CHECK-LABEL: @test_i32_fill_rng
# CHECK: ^{{.*}}(%[[MIN:.+]]: f64, %[[MAX:.+]]: f64, %[[SEED:.+]]: i32, %{{.*}}
# CHECK-DAG: %[[IDX0:.+]] = linalg.index 0 : index
# CHECK-DAG: %[[IDX0_CAST:.+]] = index_cast %[[IDX0]] : index to i32
# CHECK-DAG: %[[RND0:.+]] = addi %[[IDX0_CAST]], %[[SEED]] : i32
# CHECK-DAG: %[[CST0:.+]] = constant 1103515245 : i64
# CHECK-DAG: %[[CST0_CAST:.+]] = trunci %[[CST0]] : i64 to i32
# Skip the remaining random number computation and match the scaling logic.
# CHECK-DAG: %[[DIFF:.+]] = subf %[[MAX]], %[[MIN]] : f64
# CHECK-DAG: %[[CST3:.+]] = constant 2.3283063999999999E-10 : f64
# CHECK-DAG: %[[FACT:.+]] = mulf %[[DIFF]], %[[CST3]] : f64
# CHECK-DAG: %[[RND4:.+]] = mulf %{{.+}}, %[[FACT]] : f64
# CHECK-DAG: %[[RND5:.+]] = addf %[[RND4]], %[[MIN]] : f64
# CHECK-DAG: %{{.*}} = fptosi %[[RND5]] : f64 to i32
@builtin.FuncOp.from_py_func(f64, f64, i32,
RankedTensorType.get((4, 16), i32))
def test_i32_fill_rng(min, max, seed, init_result):
return fill_rng_poly(min, max, seed, outs=[init_result])
# CHECK-LABEL: @test_f32_soft_plus
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[OUT:.+]]: f32)
# CHECK-NEXT: %[[C1:.+]] = constant 1.000000e+00 : f64
# CHECK-NEXT: %[[C1_CAST:.+]] = fptrunc %[[C1]] : f64 to f32
# CHECK-NEXT: %[[EXP:.+]] = math.exp %[[IN]] : f32
# CHECK-NEXT: %[[SUM:.+]] = addf %[[C1_CAST]], %[[EXP]] : f32
# CHECK-NEXT: %[[LOG:.+]] = math.log %[[SUM]] : f32
# CHECK-NEXT: linalg.yield %[[LOG]] : f32
# CHECK-NEXT: -> tensor<4x16xf32>
@builtin.FuncOp.from_py_func(
RankedTensorType.get((4, 16), f32), RankedTensorType.get((4, 16), f32))
def test_f32_soft_plus(input, init_result):
return soft_plus_poly(input, outs=[init_result])
print(module)
|
en
| 0.30148
|
# RUN: %PYTHON %s | FileCheck %s # Multiplication indexing maps. We verify only the indexing maps of the # first multiplication and then do additional tests on casting and body # generation behavior. # CHECK: #[[$MUL_MAP_A:.+]] = affine_map<(d0, d1, d2) -> (d0, d2)> # CHECK: #[[$MUL_MAP_B:.+]] = affine_map<(d0, d1, d2) -> (d2, d1)> # CHECK: #[[$MUL_MAP_C:.+]] = affine_map<(d0, d1, d2) -> (d0, d1)> # Convolution indexing maps. # CHECK: #[[$CONV_MAP_I:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1 * 2 + d3, d2 * 4 + d4 * 2, d5)> # CHECK: #[[$CONV_MAP_K:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4, d5)> # CHECK: #[[$CONV_MAP_O:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d0, d1, d2, d5)> # Pooling indexing maps. # CHECK: #[[$POOL_MAP_K:.+]] = affine_map<(d0, d1, d2, d3, d4, d5) -> (d3, d4)> # CHECK-LABEL: func @test_matmul_mono # CHECK-SAME: %[[A:.+]]: tensor<4x16xf32> # CHECK-SAME: %[[B:.+]]: tensor<16x8xf32> # CHECK: %[[INITC:.+]] = linalg.init_tensor [4, 8] : tensor<4x8xf32> # CHECK: linalg.generic # CHECK-SAME: indexing_maps = [#[[$MUL_MAP_A]], #[[$MUL_MAP_B]], #[[$MUL_MAP_C]]] # CHECK-SAME: iterator_types = ["parallel", "parallel", "reduction"] # CHECK-SAME: ins(%[[A]], %[[B]] # CHECK-SAME: outs(%[[INITC]] # CHECK-LABEL: @test_i8i8i32_matmul # CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: i32) # CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32 # CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i8 to i32 # CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32 # CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32 # CHECK-NEXT: linalg.yield %[[ADD]] : i32 # CHECK-NEXT: -> tensor<4x8xi32> # CHECK-LABEL: @test_i8i16i32_matmul # CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i16, %[[C_ARG:.+]]: i32) # CHECK-NEXT: %[[A_CAST:.+]] = sexti %[[A_ARG]] : i8 to i32 # CHECK-NEXT: %[[B_CAST:.+]] = sexti %[[B_ARG]] : i16 to i32 # CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i32 # CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i32 # CHECK-NEXT: linalg.yield %[[ADD]] : i32 # CHECK-NEXT: -> tensor<4x8xi32> # CHECK-LABEL: @test_i32i32i16_matmul # CHECK: ^{{.*}}(%[[A_ARG:.+]]: i32, %[[B_ARG:.+]]: i32, %[[C_ARG:.+]]: i16) # CHECK-NEXT: %[[A_CAST:.+]] = trunci %[[A_ARG]] : i32 to i16 # CHECK-NEXT: %[[B_CAST:.+]] = trunci %[[B_ARG]] : i32 to i16 # CHECK-NEXT: %[[MUL:.+]] = muli %[[A_CAST]], %[[B_CAST]] : i16 # CHECK-NEXT: %[[ADD:.+]] = addi %[[C_ARG]], %[[MUL]] : i16 # CHECK-NEXT: linalg.yield %[[ADD]] : i16 # CHECK-NEXT: -> tensor<4x8xi16> # CHECK-LABEL: @test_i8i8f32_matmul # CHECK: ^{{.*}}(%[[A_ARG:.+]]: i8, %[[B_ARG:.+]]: i8, %[[C_ARG:.+]]: f32) # CHECK-NEXT: %[[A_CAST:.+]] = sitofp %[[A_ARG]] : i8 to f32 # CHECK-NEXT: %[[B_CAST:.+]] = sitofp %[[B_ARG]] : i8 to f32 # CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32 # CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32 # CHECK-NEXT: linalg.yield %[[ADD]] : f32 # CHECK-NEXT: -> tensor<4x8xf32> # CHECK-LABEL: @test_f16f16f32_matmul # CHECK: ^{{.*}}(%[[A_ARG:.+]]: f16, %[[B_ARG:.+]]: f16, %[[C_ARG:.+]]: f32) # CHECK-NEXT: %[[A_CAST:.+]] = fpext %[[A_ARG]] : f16 to f32 # CHECK-NEXT: %[[B_CAST:.+]] = fpext %[[B_ARG]] : f16 to f32 # CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32 # CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32 # CHECK-NEXT: linalg.yield %[[ADD]] : f32 # CHECK-NEXT: -> tensor<4x8xf32> # CHECK-LABEL: @test_f64f64f32_matmul # CHECK: ^{{.*}}(%[[A_ARG:.+]]: f64, %[[B_ARG:.+]]: f64, %[[C_ARG:.+]]: f32) # CHECK-NEXT: %[[A_CAST:.+]] = fptrunc %[[A_ARG]] : f64 to f32 # CHECK-NEXT: %[[B_CAST:.+]] = fptrunc %[[B_ARG]] : f64 to f32 # CHECK-NEXT: %[[MUL:.+]] = mulf %[[A_CAST]], %[[B_CAST]] : f32 # CHECK-NEXT: %[[ADD:.+]] = addf %[[C_ARG]], %[[MUL]] : f32 # CHECK-NEXT: linalg.yield %[[ADD]] : f32 # CHECK-NEXT: -> tensor<4x8xf32> # CHECK-LABEL: @test_f32i32_conv # CHECK: linalg.generic # CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$CONV_MAP_K]], #[[$CONV_MAP_O]]] # CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"] # CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[FILTER:.+]]: f32, %[[OUT:.+]]: i32) # CHECK-NEXT: %[[IN_CAST:.+]] = fptosi %[[IN:.+]] : f32 to i32 # CHECK-NEXT: %[[FILTER_CAST:.+]] = fptosi %[[FILTER:.+]] : f32 to i32 # CHECK-NEXT: %[[PROD:.+]] = muli %[[IN_CAST]], %[[FILTER_CAST]] : i32 # CHECK-NEXT: %[[SUM:.+]] = addi %[[OUT]], %[[PROD]] : i32 # CHECK-NEXT: linalg.yield %[[SUM]] : i32 # CHECK-NEXT: -> tensor<2x4xi32> # CHECK-LABEL: @test_f32i32_max_pooling # CHECK: linalg.generic # CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$POOL_MAP_K]], #[[$CONV_MAP_O]]] # CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"] # CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[SHAPE:.+]]: f32, %[[OUT:.+]]: i32) # CHECK-NEXT: %[[IN_CAST:.+]] = fptosi %[[IN:.+]] : f32 to i32 # CHECK-NEXT: %[[COND:.+]] = cmpi sgt, %[[OUT]], %[[IN_CAST:.+]] : i32 # CHECK-NEXT: %[[MAX:.+]] = select %[[COND]], %[[OUT]], %[[IN_CAST:.+]] : i32 # CHECK-NEXT: linalg.yield %[[MAX]] : i32 # CHECK-NEXT: -> tensor<2x4xi32> # CHECK-LABEL: @test_f32f32_max_pooling # CHECK: linalg.generic # CHECK-SAME: indexing_maps = [#[[$CONV_MAP_I]], #[[$POOL_MAP_K]], #[[$CONV_MAP_O]]] # CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction", "reduction", "parallel"] # CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[SHAPE:.+]]: f32, %[[OUT:.+]]: f32) # CHECK-NEXT: %[[COND:.+]] = cmpf ogt, %[[OUT]], %[[IN:.+]] : f32 # CHECK-NEXT: %[[MAX:.+]] = select %[[COND]], %[[OUT]], %[[IN:.+]] : f32 # CHECK-NEXT: linalg.yield %[[MAX]] : f32 # CHECK-NEXT: -> tensor<2x4xf32> # CHECK-LABEL: @test_f32i32_min_pooling # CHECK: = cmpi slt, # CHECK-LABEL: @test_f32f32_min_pooling # CHECK: = cmpf olt, # CHECK-LABEL: @test_i32_fill_rng # CHECK: ^{{.*}}(%[[MIN:.+]]: f64, %[[MAX:.+]]: f64, %[[SEED:.+]]: i32, %{{.*}} # CHECK-DAG: %[[IDX0:.+]] = linalg.index 0 : index # CHECK-DAG: %[[IDX0_CAST:.+]] = index_cast %[[IDX0]] : index to i32 # CHECK-DAG: %[[RND0:.+]] = addi %[[IDX0_CAST]], %[[SEED]] : i32 # CHECK-DAG: %[[CST0:.+]] = constant 1103515245 : i64 # CHECK-DAG: %[[CST0_CAST:.+]] = trunci %[[CST0]] : i64 to i32 # Skip the remaining random number computation and match the scaling logic. # CHECK-DAG: %[[DIFF:.+]] = subf %[[MAX]], %[[MIN]] : f64 # CHECK-DAG: %[[CST3:.+]] = constant 2.3283063999999999E-10 : f64 # CHECK-DAG: %[[FACT:.+]] = mulf %[[DIFF]], %[[CST3]] : f64 # CHECK-DAG: %[[RND4:.+]] = mulf %{{.+}}, %[[FACT]] : f64 # CHECK-DAG: %[[RND5:.+]] = addf %[[RND4]], %[[MIN]] : f64 # CHECK-DAG: %{{.*}} = fptosi %[[RND5]] : f64 to i32 # CHECK-LABEL: @test_f32_soft_plus # CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[OUT:.+]]: f32) # CHECK-NEXT: %[[C1:.+]] = constant 1.000000e+00 : f64 # CHECK-NEXT: %[[C1_CAST:.+]] = fptrunc %[[C1]] : f64 to f32 # CHECK-NEXT: %[[EXP:.+]] = math.exp %[[IN]] : f32 # CHECK-NEXT: %[[SUM:.+]] = addf %[[C1_CAST]], %[[EXP]] : f32 # CHECK-NEXT: %[[LOG:.+]] = math.log %[[SUM]] : f32 # CHECK-NEXT: linalg.yield %[[LOG]] : f32 # CHECK-NEXT: -> tensor<4x16xf32>
| 2.016758
| 2
|
dlfairness/original_code/Balanced-Datasets-Are-Not-Enough/object_multilabel/adv/ae_adv_model.py
|
lin-tan/fairness-variance
| 0
|
6627146
|
import torch
import torch.nn as nn
import functools
import torch.nn.functional as F
import torchvision.models as models
import torch.nn.utils
from torch.autograd import Function
import copy
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
# with skip connection and pixel connection and smoothed
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
use_bias = True
# construct unet structure
self.downsample_0 = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downRelu_1 = nn.LeakyReLU(0.2, True)
self.downSample_1 = nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downNorm_1 = norm_layer(ngf * 2)
self.downRelu_2 = nn.LeakyReLU(0.2, True)
self.downSample_2 = nn.Conv2d(ngf * 2, ngf * 4, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downNorm_2 = norm_layer(ngf * 4)
self.downRelu_3 = nn.LeakyReLU(0.2, True)
self.downSample_3 = nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downNorm_3 = norm_layer(ngf * 8)
self.innerLeakyRelu = nn.LeakyReLU(0.2, True)
self.innerDownSample = nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.innerRelu = nn.ReLU(True)
innerUpSample = []
innerUpSample.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
innerUpSample.append(nn.ReflectionPad2d((2, 1, 2, 1)))
innerUpSample.append(nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.innerUpSample = nn.Sequential(*innerUpSample)
self.innerNorm = norm_layer(ngf * 8)
self.upRelu_3 = nn.ReLU(True)
upSample_3 = []
upSample_3.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_3.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_3.append(nn.Conv2d(ngf * 16, ngf * 4, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_3 = nn.Sequential(*upSample_3)
self.upNorm_3 = norm_layer(ngf * 4)
self.upRelu_2 = nn.ReLU(True)
upSample_2 = []
upSample_2.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_2.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_2.append(nn.Conv2d(ngf * 8, ngf * 2, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_2 = nn.Sequential(*upSample_2)
self.upNorm_2 = norm_layer(ngf * 2)
self.upRelu_1 = nn.ReLU(True)
upSample_1 = []
upSample_1.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_1.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_1.append(nn.Conv2d(ngf * 4, ngf, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_1 = nn.Sequential(*upSample_1)
self.upNorm_1 = norm_layer(ngf)
self.upRelu_0 = nn.ReLU(True)
upSample_0 = []
upSample_0.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_0.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_0.append(nn.Conv2d(ngf * 2, 1, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_0 = nn.Sequential(*upSample_0)
## initialize bias
nn.init.normal_(self.upSample_0[-1].bias, mean=3, std=1)
self.activation = nn.Sigmoid()
def forward(self, input):
# assume input image size = 224
x_down_0 = self.downsample_0(input) # (ngf, 112, 112)
x_down_1 = self.downNorm_1(self.downSample_1(self.downRelu_1(x_down_0))) # (ngf*2, 56, 56)
x_down_2 = self.downNorm_2(self.downSample_2(self.downRelu_2(x_down_1))) # (ngf*4, 28, 28)
x_down_3 = self.downNorm_3(self.downSample_3(self.downRelu_3(x_down_2))) # (ngf*8, 14, 14)
latent = self.innerDownSample(self.innerLeakyRelu(x_down_3)) # (ngf*8, 7, 7)
x = self.innerNorm(self.innerUpSample(self.innerRelu(latent))) # (ngf*8, 14, 14)
x_up_3 = self.upNorm_3(self.upSample_3(self.upRelu_3(torch.cat([x, x_down_3], 1)))) # (ngf*4, 28, 28)
x_up_2 = self.upNorm_2(self.upSample_2(self.upRelu_2(torch.cat([x_up_3, x_down_2], 1)))) # (ngf*2, 56, 56)
x_up_1 = self.upNorm_1(self.upSample_1(self.upRelu_1(torch.cat([x_up_2, x_down_1], 1)))) # (ngf, 112, 112)
encoded_image = self.activation(self.upSample_0(self.upRelu_0(torch.cat([x_up_1, x_down_0], 1)))) # (3, 224, 224)
return torch.mul(input, encoded_image), latent
class ObjectMultiLabelAdv(nn.Module):
def __init__(self, args, num_object, hid_size, dropout, adv_lambda):
super(ObjectMultiLabelAdv, self).__init__()
print("Build a ObjectMultiLabelAdv Model[{}]".format(args.layer))
self.num_object = num_object
self.args = args
self.base_network = models.resnet50(pretrained = True)
self.adv_lambda = adv_lambda
print('Load weights from Resnet18/50 done')
norm_layer = 'batch'
use_dropout = False
norm_layer = get_norm_layer(norm_type=norm_layer)
self.autoencoder = UnetGenerator(3, 3, 5, 64, \
norm_layer=norm_layer, use_dropout=use_dropout)
output_size = self.num_object
self.finalLayer = nn.Linear(self.base_network.fc.in_features, output_size)
if not args.autoencoder_finetune:
for param in self.autoencoder.parameters():
param.requires_grad = False
if not args.finetune:
for param in self.base_network.parameters():
param.requires_grad = False
for param in self.finalLayer.parameters():
param.requires_grad = False
assert args.layer == 'generated_image'
self.adv_component = GenderClassification(args)
pretrained_gender_classifier_path = './model_best_object_balanced.pth.tar'
gender_clssifier_checkpoint = torch.load(pretrained_gender_classifier_path)
self.adv_component.load_state_dict(gender_clssifier_checkpoint['state_dict'])
print("Loaded pretrained gender classifier from {}".format(pretrained_gender_classifier_path))
if not args.finetune:
for param in self.adv_component.parameters():
param.requires_grad = False
def forward(self, image):
autoencoded_image, latent = self.autoencoder(image)
x = self.base_network.conv1(autoencoded_image)
x = self.base_network.bn1(x)
x = self.base_network.relu(x)
conv1_feature = self.base_network.maxpool(x)
layer1_feature = self.base_network.layer1(conv1_feature)
layer2_feature = self.base_network.layer2(layer1_feature)
layer3_feature = self.base_network.layer3(layer2_feature)
layer4_feature = self.base_network.layer4(layer3_feature)
final_feature = self.base_network.avgpool(layer4_feature)
final_feature = final_feature.view(final_feature.size(0), -1)
preds = self.finalLayer(final_feature)
adv_feature = ReverseLayerF.apply(autoencoded_image, self.adv_lambda)
adv_preds = self.adv_component(adv_feature)
return preds, adv_preds, autoencoded_image
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg() * ctx.alpha, None
class GenderClassification(nn.Module):
def __init__(self, args):
super(GenderClassification, self).__init__()
print("Build a GenderClassification Model")
self.base_network = models.resnet18(pretrained = True)
print('Load weights from Resnet18 done')
self.finalLayer = nn.Linear(self.base_network.fc.in_features, 2)
def forward(self, image):
x = self.base_network.conv1(image)
x = self.base_network.bn1(x)
x = self.base_network.relu(x)
x = self.base_network.maxpool(x)
x = self.base_network.layer1(x)
x = self.base_network.layer2(x)
x = self.base_network.layer3(x)
x = self.base_network.layer4(x)
x = self.base_network.avgpool(x)
image_features = x.view(x.size(0), -1)
preds = self.finalLayer(image_features)
return preds
|
import torch
import torch.nn as nn
import functools
import torch.nn.functional as F
import torchvision.models as models
import torch.nn.utils
from torch.autograd import Function
import copy
def get_norm_layer(norm_type='instance'):
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
norm_layer = None
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
# with skip connection and pixel connection and smoothed
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
use_bias = True
# construct unet structure
self.downsample_0 = nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downRelu_1 = nn.LeakyReLU(0.2, True)
self.downSample_1 = nn.Conv2d(ngf, ngf * 2, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downNorm_1 = norm_layer(ngf * 2)
self.downRelu_2 = nn.LeakyReLU(0.2, True)
self.downSample_2 = nn.Conv2d(ngf * 2, ngf * 4, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downNorm_2 = norm_layer(ngf * 4)
self.downRelu_3 = nn.LeakyReLU(0.2, True)
self.downSample_3 = nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.downNorm_3 = norm_layer(ngf * 8)
self.innerLeakyRelu = nn.LeakyReLU(0.2, True)
self.innerDownSample = nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1, bias=use_bias)
self.innerRelu = nn.ReLU(True)
innerUpSample = []
innerUpSample.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
innerUpSample.append(nn.ReflectionPad2d((2, 1, 2, 1)))
innerUpSample.append(nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.innerUpSample = nn.Sequential(*innerUpSample)
self.innerNorm = norm_layer(ngf * 8)
self.upRelu_3 = nn.ReLU(True)
upSample_3 = []
upSample_3.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_3.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_3.append(nn.Conv2d(ngf * 16, ngf * 4, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_3 = nn.Sequential(*upSample_3)
self.upNorm_3 = norm_layer(ngf * 4)
self.upRelu_2 = nn.ReLU(True)
upSample_2 = []
upSample_2.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_2.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_2.append(nn.Conv2d(ngf * 8, ngf * 2, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_2 = nn.Sequential(*upSample_2)
self.upNorm_2 = norm_layer(ngf * 2)
self.upRelu_1 = nn.ReLU(True)
upSample_1 = []
upSample_1.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_1.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_1.append(nn.Conv2d(ngf * 4, ngf, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_1 = nn.Sequential(*upSample_1)
self.upNorm_1 = norm_layer(ngf)
self.upRelu_0 = nn.ReLU(True)
upSample_0 = []
upSample_0.append(nn.Upsample(scale_factor = 2, mode='bilinear'))
upSample_0.append(nn.ReflectionPad2d((2, 1, 2, 1)))
upSample_0.append(nn.Conv2d(ngf * 2, 1, kernel_size=4, stride=1, padding=0, bias=use_bias))
self.upSample_0 = nn.Sequential(*upSample_0)
## initialize bias
nn.init.normal_(self.upSample_0[-1].bias, mean=3, std=1)
self.activation = nn.Sigmoid()
def forward(self, input):
# assume input image size = 224
x_down_0 = self.downsample_0(input) # (ngf, 112, 112)
x_down_1 = self.downNorm_1(self.downSample_1(self.downRelu_1(x_down_0))) # (ngf*2, 56, 56)
x_down_2 = self.downNorm_2(self.downSample_2(self.downRelu_2(x_down_1))) # (ngf*4, 28, 28)
x_down_3 = self.downNorm_3(self.downSample_3(self.downRelu_3(x_down_2))) # (ngf*8, 14, 14)
latent = self.innerDownSample(self.innerLeakyRelu(x_down_3)) # (ngf*8, 7, 7)
x = self.innerNorm(self.innerUpSample(self.innerRelu(latent))) # (ngf*8, 14, 14)
x_up_3 = self.upNorm_3(self.upSample_3(self.upRelu_3(torch.cat([x, x_down_3], 1)))) # (ngf*4, 28, 28)
x_up_2 = self.upNorm_2(self.upSample_2(self.upRelu_2(torch.cat([x_up_3, x_down_2], 1)))) # (ngf*2, 56, 56)
x_up_1 = self.upNorm_1(self.upSample_1(self.upRelu_1(torch.cat([x_up_2, x_down_1], 1)))) # (ngf, 112, 112)
encoded_image = self.activation(self.upSample_0(self.upRelu_0(torch.cat([x_up_1, x_down_0], 1)))) # (3, 224, 224)
return torch.mul(input, encoded_image), latent
class ObjectMultiLabelAdv(nn.Module):
def __init__(self, args, num_object, hid_size, dropout, adv_lambda):
super(ObjectMultiLabelAdv, self).__init__()
print("Build a ObjectMultiLabelAdv Model[{}]".format(args.layer))
self.num_object = num_object
self.args = args
self.base_network = models.resnet50(pretrained = True)
self.adv_lambda = adv_lambda
print('Load weights from Resnet18/50 done')
norm_layer = 'batch'
use_dropout = False
norm_layer = get_norm_layer(norm_type=norm_layer)
self.autoencoder = UnetGenerator(3, 3, 5, 64, \
norm_layer=norm_layer, use_dropout=use_dropout)
output_size = self.num_object
self.finalLayer = nn.Linear(self.base_network.fc.in_features, output_size)
if not args.autoencoder_finetune:
for param in self.autoencoder.parameters():
param.requires_grad = False
if not args.finetune:
for param in self.base_network.parameters():
param.requires_grad = False
for param in self.finalLayer.parameters():
param.requires_grad = False
assert args.layer == 'generated_image'
self.adv_component = GenderClassification(args)
pretrained_gender_classifier_path = './model_best_object_balanced.pth.tar'
gender_clssifier_checkpoint = torch.load(pretrained_gender_classifier_path)
self.adv_component.load_state_dict(gender_clssifier_checkpoint['state_dict'])
print("Loaded pretrained gender classifier from {}".format(pretrained_gender_classifier_path))
if not args.finetune:
for param in self.adv_component.parameters():
param.requires_grad = False
def forward(self, image):
autoencoded_image, latent = self.autoencoder(image)
x = self.base_network.conv1(autoencoded_image)
x = self.base_network.bn1(x)
x = self.base_network.relu(x)
conv1_feature = self.base_network.maxpool(x)
layer1_feature = self.base_network.layer1(conv1_feature)
layer2_feature = self.base_network.layer2(layer1_feature)
layer3_feature = self.base_network.layer3(layer2_feature)
layer4_feature = self.base_network.layer4(layer3_feature)
final_feature = self.base_network.avgpool(layer4_feature)
final_feature = final_feature.view(final_feature.size(0), -1)
preds = self.finalLayer(final_feature)
adv_feature = ReverseLayerF.apply(autoencoded_image, self.adv_lambda)
adv_preds = self.adv_component(adv_feature)
return preds, adv_preds, autoencoded_image
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg() * ctx.alpha, None
class GenderClassification(nn.Module):
def __init__(self, args):
super(GenderClassification, self).__init__()
print("Build a GenderClassification Model")
self.base_network = models.resnet18(pretrained = True)
print('Load weights from Resnet18 done')
self.finalLayer = nn.Linear(self.base_network.fc.in_features, 2)
def forward(self, image):
x = self.base_network.conv1(image)
x = self.base_network.bn1(x)
x = self.base_network.relu(x)
x = self.base_network.maxpool(x)
x = self.base_network.layer1(x)
x = self.base_network.layer2(x)
x = self.base_network.layer3(x)
x = self.base_network.layer4(x)
x = self.base_network.avgpool(x)
image_features = x.view(x.size(0), -1)
preds = self.finalLayer(image_features)
return preds
|
en
| 0.524742
|
# with skip connection and pixel connection and smoothed # construct unet structure ## initialize bias # assume input image size = 224 # (ngf, 112, 112) # (ngf*2, 56, 56) # (ngf*4, 28, 28) # (ngf*8, 14, 14) # (ngf*8, 7, 7) # (ngf*8, 14, 14) # (ngf*4, 28, 28) # (ngf*2, 56, 56) # (ngf, 112, 112) # (3, 224, 224)
| 2.478694
| 2
|
tools/accuracy_checker/setup.py
|
jkamelin/open_model_zoo
| 2
|
6627147
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import os
import re
import sys
import warnings
import platform
import subprocess # nosec - disable B404:import-subprocess check
from distutils.version import LooseVersion
from pathlib import Path
from setuptools import find_packages, setup # pylint:disable=W9902
from setuptools.command.test import test as test_command # pylint:disable=W9902
from setuptools.command.install import install as install_command # pylint:disable=W9902
here = Path(__file__).parent
class PyTest(test_command):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
test_command.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex # pylint:disable=C0415
# import here, cause outside the eggs aren't loaded
import pytest # pylint:disable=C0415
error_code = pytest.main(shlex.split(self.pytest_args))
sys.exit(error_code)
def read(*path):
input_file = os.path.join(here, *path)
with open(str(input_file), encoding='utf-8') as file:
return file.read()
def check_and_update_numpy(min_acceptable='1.15'):
try:
import numpy as np # pylint:disable=C0415
update_required = LooseVersion(np.__version__) < LooseVersion(min_acceptable)
except ImportError:
update_required = True
if update_required:
subprocess.call([sys.executable, '-m', 'pip', 'install', 'numpy>={}'.format(min_acceptable)])
def install_dependencies_with_pip(dependencies):
for dep in dependencies:
if dep.startswith('#'):
continue
subprocess.call([sys.executable, '-m', 'pip', 'install', str(dep)])
class CoreInstall(install_command):
pass
def find_version(*path):
version_file = read(*path)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
is_arm = platform.processor() == 'aarch64'
long_description = read("README.md")
version = find_version("openvino/tools/accuracy_checker", "__init__.py")
def prepare_requirements():
requirements_core = read('requirements-core.in').split('\n')
if 'install_core' in sys.argv:
return requirements_core
requirements = read("requirements.in").split('\n')
return requirements_core + requirements
_requirements = prepare_requirements()
try:
importlib.import_module('cv2')
except ImportError as opencv_import_error:
if platform.processor() != 'aarch64':
warnings.warn(
"Problem with cv2 import: \n{}\n opencv-python will be added to requirements".format(opencv_import_error)
)
_requirements.append('opencv-python')
else:
warnings.warn(
"Problem with cv2 import: \n{}".format(opencv_import_error)
+ "\n Probably due to unsuitable numpy version, will be updated")
check_and_update_numpy()
if is_arm:
install_dependencies_with_pip(_requirements)
setup(
name="accuracy_checker",
description="Deep Learning Accuracy validation framework",
version=version,
long_description=long_description,
packages=find_packages(),
entry_points={
"console_scripts": [
"accuracy_check=openvino.tools.accuracy_checker.main:main",
"convert_annotation=openvino.tools.accuracy_checker.annotation_converters.convert:main"]},
zip_safe=False,
python_requires='>=3.5',
install_requires=_requirements if not is_arm else '',
tests_require=[read("requirements-test.in")],
cmdclass={'test': PyTest, 'install_core': CoreInstall},
extras_require={'extra': ['pycocotools>=2.0.2', 'torch>=0.4.0', 'torchvision>=0.2.1', 'lpips',
'kenlm @ git+https://github.com/kpu/kenlm.git#egg=kenlm']}
)
|
"""
Copyright (c) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import importlib
import os
import re
import sys
import warnings
import platform
import subprocess # nosec - disable B404:import-subprocess check
from distutils.version import LooseVersion
from pathlib import Path
from setuptools import find_packages, setup # pylint:disable=W9902
from setuptools.command.test import test as test_command # pylint:disable=W9902
from setuptools.command.install import install as install_command # pylint:disable=W9902
here = Path(__file__).parent
class PyTest(test_command):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
test_command.initialize_options(self)
self.pytest_args = ''
def run_tests(self):
import shlex # pylint:disable=C0415
# import here, cause outside the eggs aren't loaded
import pytest # pylint:disable=C0415
error_code = pytest.main(shlex.split(self.pytest_args))
sys.exit(error_code)
def read(*path):
input_file = os.path.join(here, *path)
with open(str(input_file), encoding='utf-8') as file:
return file.read()
def check_and_update_numpy(min_acceptable='1.15'):
try:
import numpy as np # pylint:disable=C0415
update_required = LooseVersion(np.__version__) < LooseVersion(min_acceptable)
except ImportError:
update_required = True
if update_required:
subprocess.call([sys.executable, '-m', 'pip', 'install', 'numpy>={}'.format(min_acceptable)])
def install_dependencies_with_pip(dependencies):
for dep in dependencies:
if dep.startswith('#'):
continue
subprocess.call([sys.executable, '-m', 'pip', 'install', str(dep)])
class CoreInstall(install_command):
pass
def find_version(*path):
version_file = read(*path)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
is_arm = platform.processor() == 'aarch64'
long_description = read("README.md")
version = find_version("openvino/tools/accuracy_checker", "__init__.py")
def prepare_requirements():
requirements_core = read('requirements-core.in').split('\n')
if 'install_core' in sys.argv:
return requirements_core
requirements = read("requirements.in").split('\n')
return requirements_core + requirements
_requirements = prepare_requirements()
try:
importlib.import_module('cv2')
except ImportError as opencv_import_error:
if platform.processor() != 'aarch64':
warnings.warn(
"Problem with cv2 import: \n{}\n opencv-python will be added to requirements".format(opencv_import_error)
)
_requirements.append('opencv-python')
else:
warnings.warn(
"Problem with cv2 import: \n{}".format(opencv_import_error)
+ "\n Probably due to unsuitable numpy version, will be updated")
check_and_update_numpy()
if is_arm:
install_dependencies_with_pip(_requirements)
setup(
name="accuracy_checker",
description="Deep Learning Accuracy validation framework",
version=version,
long_description=long_description,
packages=find_packages(),
entry_points={
"console_scripts": [
"accuracy_check=openvino.tools.accuracy_checker.main:main",
"convert_annotation=openvino.tools.accuracy_checker.annotation_converters.convert:main"]},
zip_safe=False,
python_requires='>=3.5',
install_requires=_requirements if not is_arm else '',
tests_require=[read("requirements-test.in")],
cmdclass={'test': PyTest, 'install_core': CoreInstall},
extras_require={'extra': ['pycocotools>=2.0.2', 'torch>=0.4.0', 'torchvision>=0.2.1', 'lpips',
'kenlm @ git+https://github.com/kpu/kenlm.git#egg=kenlm']}
)
|
en
| 0.742924
|
Copyright (c) 2018-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # nosec - disable B404:import-subprocess check # pylint:disable=W9902 # pylint:disable=W9902 # pylint:disable=W9902 # pylint:disable=C0415 # import here, cause outside the eggs aren't loaded # pylint:disable=C0415 # pylint:disable=C0415 #egg=kenlm']}
| 1.672402
| 2
|
protobuf-master/python/google/protobuf/internal/text_format_test.py
|
Fresher-Chen/tarsim
| 0
|
6627148
|
<reponame>Fresher-Chen/tarsim
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = '<EMAIL> (<NAME>)'
import math
import re
import six
import string
try:
import unittest2 as unittest # PY26, pylint: disable=g-import-not-at-top
except ImportError:
import unittest # pylint: disable=g-import-not-at-top
from google.protobuf.internal import _parameterized
from google.protobuf import any_test_pb2
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import any_test_pb2 as test_extend_any
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf.internal import test_util
from google.protobuf import descriptor_pool
from google.protobuf import text_format
# Low-level nuts-n-bolts tests.
class SimpleTextFormatTests(unittest.TestCase):
# The members of _QUOTES are formatted into a regexp template that
# expects single characters. Therefore it's an error (in addition to being
# non-sensical in the first place) to try to specify a "quote mark" that is
# more than one character.
def testQuoteMarksAreSingleChars(self):
for quote in text_format._QUOTES:
self.assertEqual(1, len(quote))
# Base class with some common functionality.
class TextFormatBase(unittest.TestCase):
def ReadGolden(self, golden_filename):
with test_util.GoldenFile(golden_filename) as f:
return (f.readlines() if str is bytes else # PY3
[golden_line.decode('utf-8') for golden_line in f])
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.assertMultiLineEqual(text, ''.join(golden_lines))
def CompareToGoldenText(self, text, golden_text):
self.assertEqual(text, golden_text)
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile(r'\.0$', re.MULTILINE).sub('', text)
return text
@_parameterized.Parameters((unittest_pb2), (unittest_proto3_arena_pb2))
class TextFormatTest(TextFormatBase):
def testPrintExotic(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string:'
' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self, message_module):
class UnicodeSub(six.text_type):
pass
message = message_module.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
self.CompareToGoldenText(
text_format.MessageToString(message),
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self, message_module):
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append('Google')
message.repeated_string.append('Zurich')
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.optional_string = 'a\nnew\nline'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=False)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=True)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintRawUtf8String(self, message_module):
message = message_module.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = message_module.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintFloatFormat(self, message_module):
# Check that float_format argument is passed to sub-message formatting.
message = message_module.NestedTestAllTypes()
# We use 1.25 as it is a round number in binary. The proto 32-bit float
# will not gain additional imprecise digits as a 64-bit Python float and
# show up in its str. 32-bit 1.2 is noisy when extended to 64-bit:
# >>> struct.unpack('f', struct.pack('f', 1.2))[0]
# 1.2000000476837158
# >>> struct.unpack('f', struct.pack('f', 1.25))[0]
# 1.25
message.payload.optional_float = 1.25
# Check rounding at 15 significant digits
message.payload.optional_double = -.000003456789012345678
# Check no decimal point.
message.payload.repeated_float.append(-5642)
# Check no trailing zeros.
message.payload.repeated_double.append(.000078900)
formatted_fields = ['optional_float: 1.25',
'optional_double: -3.45678901234568e-6',
'repeated_float: -5642', 'repeated_double: 7.89e-5']
text_message = text_format.MessageToString(message, float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(
*formatted_fields))
# as_one_line=True is a separate code branch where float_format is passed.
text_message = text_format.MessageToString(message,
as_one_line=True,
float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields))
def testMessageToString(self, message_module):
message = message_module.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def testPrintField(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintField(field, value, out)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintField(field, value)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
def testPrintFieldValue(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintFieldValue(field, value, out)
self.assertEqual('0.0', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintFieldValue(field, value)
self.assertEqual('0.0', out.getvalue())
out.close()
def testParseAllFields(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
def testParseAndMergeUtf8(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
ascii_text = ascii_text.encode('utf-8')
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
parsed_message.Clear()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
if six.PY2:
msg2 = message_module.TestAllTypes()
text = (u'optional_string: "café"')
text_format.Merge(text, msg2)
self.assertEqual(msg2.optional_string, u'café')
msg2.Clear()
text_format.Parse(text, msg2)
self.assertEqual(msg2.optional_string, u'café')
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual('\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testParseTrailingCommas(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: 100;\n'
'repeated_int64: 200;\n'
'repeated_int64: 300,\n'
'repeated_string: "one",\n'
'repeated_string: "two";\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedScalarShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: [100, 200];\n'
'repeated_int64: 300,\n'
'repeated_string: ["one", "two"];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedMessageShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_nested_message: [{bb: 100}, {bb: 200}],\n'
'repeated_nested_message: {bb: 300}\n'
'repeated_nested_message [{bb: 400}];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_nested_message[0].bb)
self.assertEqual(200, message.repeated_nested_message[1].bb)
self.assertEqual(300, message.repeated_nested_message[2].bb)
self.assertEqual(400, message.repeated_nested_message[3].bb)
def testParseEmptyText(self, message_module):
message = message_module.TestAllTypes()
text = ''
text_format.Parse(text, message)
self.assertEqual(message_module.TestAllTypes(), message)
def testParseInvalidUtf8(self, message_module):
message = message_module.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
with self.assertRaises(text_format.ParseError) as e:
text_format.Parse(text, message)
self.assertEqual(e.exception.GetLine(), 1)
self.assertEqual(e.exception.GetColumn(), 28)
def testParseSingleWord(self, message_module):
message = message_module.TestAllTypes()
text = 'foo'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"foo".'), text_format.Parse, text, message)
def testParseUnknownField(self, message_module):
message = message_module.TestAllTypes()
text = 'unknown_field: 8\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"unknown_field".'), text_format.Parse, text, message)
def testParseBadEnumValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_nested_enum: BARR'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value named BARR.'), text_format.Parse,
text, message)
def testParseBadIntValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_int32: bork'
six.assertRaisesRegex(self, text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Parse, text, message)
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def testMergeDuplicateScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
r = text_format.Merge(text, message)
self.assertIs(r, message)
self.assertEqual(67, message.optional_int32)
def testMergeDuplicateNestedMessageScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
r = text_format.Merge(text, message)
self.assertTrue(r is message)
self.assertEqual(2, message.optional_nested_message.bb)
def testParseOneof(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
text_format.Parse(text_format.MessageToString(m), m2)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testMergeMultipleOneof(self, message_module):
m_string = '\n'.join(['oneof_uint32: 11', 'oneof_string: "foo"'])
m2 = message_module.TestAllTypes()
text_format.Merge(m_string, m2)
self.assertEqual('oneof_string', m2.WhichOneof('oneof_field'))
def testParseMultipleOneof(self, message_module):
m_string = '\n'.join(['oneof_uint32: 11', 'oneof_string: "foo"'])
m2 = message_module.TestAllTypes()
with self.assertRaisesRegexp(text_format.ParseError,
' is specified along with field '):
text_format.Parse(m_string, m2)
# These are tests that aren't fundamentally specific to proto2, but are at
# the moment because of differences between the proto2 and proto3 test schemas.
# Ideally the schemas would be made more similar so these tests could pass.
class OnlyWorksWithProto2RightNowTests(TextFormatBase):
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testParseGolden(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_data_oneof_implemented.txt'))
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.Parse(golden_text, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data_oneof_implemented.txt')
def testPrintInIndexOrder(self):
message = unittest_pb2.TestFieldOrderings()
message.my_string = '115'
message.my_int = 101
message.my_float = 111
message.optional_nested_message.oo = 0
message.optional_nested_message.bb = 1
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, use_index_order=True)),
'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n'
'optional_nested_message {\n oo: 0\n bb: 1\n}\n')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n'
'optional_nested_message {\n bb: 1\n oo: 0\n}\n')
def testMergeLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.MergeLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testParseLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.ParseLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintMap(self):
message = map_unittest_pb2.TestMap()
message.map_int32_int32[-123] = -456
message.map_int64_int64[-2**33] = -2**34
message.map_uint32_uint32[123] = 456
message.map_uint64_uint64[2**33] = 2**34
message.map_string_string['abc'] = '123'
message.map_int32_foreign_message[111].c = 5
# Maps are serialized to text format using their underlying repeated
# representation.
self.CompareToGoldenText(
text_format.MessageToString(message), 'map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
def testMapOrderEnforcement(self):
message = map_unittest_pb2.TestMap()
for letter in string.ascii_uppercase[13:26]:
message.map_string_string[letter] = 'dummy'
for letter in reversed(string.ascii_uppercase[0:13]):
message.map_string_string[letter] = 'dummy'
golden = ''.join(('map_string_string {\n key: "%c"\n value: "dummy"\n}\n'
% (letter,) for letter in string.ascii_uppercase))
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# TODO(teboring): In c/137553523, not serializing default value for map entry
# message has been fixed. This test needs to be disabled in order to submit
# that cl. Add this back when c/137553523 has been submitted.
# def testMapOrderSemantics(self):
# golden_lines = self.ReadGolden('map_test_data.txt')
# message = map_unittest_pb2.TestMap()
# text_format.ParseLines(golden_lines, message)
# candidate = text_format.MessageToString(message)
# # The Python implementation emits "1.0" for the double value that the C++
# # implementation emits as "1".
# candidate = candidate.replace('1.0', '1', 2)
# candidate = candidate.replace('0.0', '0', 2)
# self.assertMultiLineEqual(candidate, ''.join(golden_lines))
# Tests of proto2-only features (MessageSet, extensions, etc.).
class Proto2Tests(TextFormatBase):
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message), 'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
message = message_set_extensions_pb2.TestMessageSet()
ext = message_set_extensions_pb2.message_set_extension3
message.Extensions[ext].text = 'bar'
self.CompareToGoldenText(
text_format.MessageToString(message),
'[google.protobuf.internal.TestMessageSetExtension3] {\n'
' text: \"bar\"\n'
'}\n')
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testExtensionInsideAnyMessage(self):
message = test_extend_any.TestAny()
text = ('value {\n'
' [type.googleapis.com/google.protobuf.internal.TestAny] {\n'
' [google.protobuf.internal.TestAnyExtension1.extension1] {\n'
' i: 10\n'
' }\n'
' }\n'
'}\n')
text_format.Merge(text, message, descriptor_pool=descriptor_pool.Default())
self.CompareToGoldenText(
text_format.MessageToString(
message, descriptor_pool=descriptor_pool.Default()),
text)
def testParseMessageByFieldNumber(self):
message = unittest_pb2.TestAllTypes()
text = ('34: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message, allow_field_number=True)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_field_number=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
# Can't parse field number without set allow_field_number=True.
message = unittest_pb2.TestAllTypes()
text = '34:1\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"34".'), text_format.Parse, text, message)
# Can't parse if field number is not found.
text = '1234:1\n'
six.assertRaisesRegex(
self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"1234".'),
text_format.Parse,
text,
message,
allow_field_number=True)
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintAllExtensionsPointy(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_extensions_data_pointy.txt')
def testParseGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEqual(message, parsed_message)
def testParseAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testParseAllowedUnknownExtension(self):
# Skip over unknown extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [unknown_extension] {\n'
' i: 23\n'
' bin: "\xe0"'
' [nested_unknown_ext]: {\n'
' i: 23\n'
' x: x\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' multiline_str: "abc"\n'
' "def"\n'
' "xyz."\n'
' [nested_unknown_ext.ext]: <\n'
' i: 23\n'
' i: 24\n'
' pointfloat: .3\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' long_string: "test" "test2" \n'
' >\n'
' }\n'
' }\n'
' [unknown_extension]: 5\n'
' [unknown_extension_with_number_field] {\n'
' 1: "some_field"\n'
' 2: -0.451\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
golden = 'message_set {\n}\n'
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# Catch parse errors in unknown extension.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' i:\n' # Missing value.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: }',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed string\n' # Missing closing quote.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed\n multiline\n string\n'
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [malformed_extension] <\n'
' i: -5\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'5:1 : Expected ">".',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Don't allow unknown fields with allow_unknown_extension=True.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' unknown_field: true\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
('2:3 : Message type '
'"proto2_wireformat_unittest.TestMessageSet" has no'
' field named "unknown_field".'),
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Parse known extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testParseBadIdentifier(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { "bb": 1 }')
with self.assertRaises(text_format.ParseError) as e:
text_format.Parse(text, message)
self.assertEqual(str(e.exception),
'1:27 : Expected identifier or number, got "bb".')
def testParseBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
six.assertRaisesRegex(self, text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Parse, text, message)
message = unittest_pb2.TestAllTypes()
six.assertRaisesRegex(self, text_format.ParseError, (
'1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'), text_format.Parse, text, message)
def testParseNumericUnknownEnum(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: 100'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value with number 100.'), text_format.Parse,
text, message)
def testMergeDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
text_format.Merge(text, message)
self.assertEqual(67,
message.Extensions[unittest_pb2.optional_int32_extension])
def testParseDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:96 : Message type "protobuf_unittest.TestAllExtensions" '
'should not have multiple '
'"protobuf_unittest.optional_int32_extension" extensions.'),
text_format.Parse, text, message)
def testParseDuplicateNestedMessageScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" '
'should not have multiple "bb" fields.'), text_format.Parse, text,
message)
def testParseDuplicateScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:36 : Message type "protobuf_unittest.TestAllTypes" should not '
'have multiple "optional_int32" fields.'), text_format.Parse, text,
message)
def testParseGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected ">".',
text_format.Parse, text, message)
text = 'RepeatedGroup: {'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected "}".',
text_format.Parse, text, message)
def testParseEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
# Maps aren't really proto2-only, but our test schema only has maps for
# proto2.
def testParseMap(self):
text = ('map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
message = map_unittest_pb2.TestMap()
text_format.Parse(text, message)
self.assertEqual(-456, message.map_int32_int32[-123])
self.assertEqual(-2**34, message.map_int64_int64[-2**33])
self.assertEqual(456, message.map_uint32_uint32[123])
self.assertEqual(2**34, message.map_uint64_uint64[2**33])
self.assertEqual('123', message.map_string_string['abc'])
self.assertEqual(5, message.map_int32_foreign_message[111].c)
class Proto3Tests(unittest.TestCase):
def testPrintMessageExpandAny(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
descriptor_pool=descriptor_pool.Default()),
'any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyRepeated(self):
packed_message = unittest_pb2.OneString()
message = any_test_pb2.TestAny()
packed_message.data = 'string0'
message.repeated_any_value.add().Pack(packed_message)
packed_message.data = 'string1'
message.repeated_any_value.add().Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message),
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyDescriptorPoolMissingType(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
empty_pool = descriptor_pool.DescriptorPool()
self.assertEqual(
text_format.MessageToString(message, descriptor_pool=empty_pool),
'any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
def testPrintMessageExpandAnyPointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
pointy_brackets=True),
'any_value <\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'>\n')
def testPrintMessageExpandAnyAsOneLine(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True),
'any_value {'
' [type.googleapis.com/protobuf_unittest.OneString]'
' { data: "string" } '
'}')
def testPrintMessageExpandAnyAsOneLinePointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True,
pointy_brackets=True,
descriptor_pool=descriptor_pool.Default()),
'any_value <'
' [type.googleapis.com/protobuf_unittest.OneString]'
' < data: "string" > '
'>')
def testUnknownEnums(self):
message = unittest_proto3_arena_pb2.TestAllTypes()
message2 = unittest_proto3_arena_pb2.TestAllTypes()
message.optional_nested_enum = 999
text_string = text_format.MessageToString(message)
text_format.Parse(text_string, message2)
self.assertEqual(999, message2.optional_nested_enum)
def testMergeExpandedAny(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
message.Clear()
text_format.Parse(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeExpandedAnyRepeated(self):
message = any_test_pb2.TestAny()
text = ('repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.repeated_any_value[0].Unpack(packed_message)
self.assertEqual('string0', packed_message.data)
message.repeated_any_value[1].Unpack(packed_message)
self.assertEqual('string1', packed_message.data)
def testMergeExpandedAnyPointyBrackets(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeAlternativeUrl(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.otherapi.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
self.assertEqual('type.otherapi.com/protobuf_unittest.OneString',
message.any_value.type_url)
def testMergeExpandedAnyDescriptorPoolMissingType(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
with self.assertRaises(text_format.ParseError) as e:
empty_pool = descriptor_pool.DescriptorPool()
text_format.Merge(text, message, descriptor_pool=empty_pool)
self.assertEqual(
str(e.exception),
'Type protobuf_unittest.OneString not found in descriptor pool')
def testMergeUnexpandedAny(self):
text = ('any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
message = any_test_pb2.TestAny()
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeMissingAnyEndToken(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n')
with self.assertRaises(text_format.ParseError) as e:
text_format.Merge(text, message)
self.assertEqual(str(e.exception), '3:11 : Expected "}".')
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f '
'False_bool: False True_bool: True X:iNf Y:-inF Z:nAN')
tokenizer = text_format.Tokenizer(text.splitlines())
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'), ':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'), ':',
(tokenizer.ConsumeInteger, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'), ':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'), ':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'), ':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'), ':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'), ':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'), ':', '{',
(tokenizer.ConsumeIdentifier, 'A'), ':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'), ':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'), ':',
(tokenizer.ConsumeBool, False), '}',
(tokenizer.ConsumeIdentifier, 'ID9'), ':',
(tokenizer.ConsumeInteger, 22),
(tokenizer.ConsumeIdentifier, 'ID10'), ':',
(tokenizer.ConsumeInteger, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'), ':',
(tokenizer.ConsumeInteger, -22),
(tokenizer.ConsumeIdentifier, 'ID12'), ':',
(tokenizer.ConsumeInteger, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'), ':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'), ':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'False_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'True_bool'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'X'), ':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'Y'), ':',
(tokenizer.ConsumeFloat, float('-inf')),
(tokenizer.ConsumeIdentifier, 'Z'), ':',
(tokenizer.ConsumeFloat, float('nan'))]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if isinstance(m, str):
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
elif isinstance(m[1], float) and math.isnan(m[1]):
self.assertTrue(math.isnan(m[0]()))
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeAbstractIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(-1, tokenizer.ConsumeInteger())
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger())
self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger())
self.assertTrue(tokenizer.AtEnd())
text = '-0 0 0 1.2'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertEqual(True, tokenizer.TryConsumeInteger())
self.assertEqual(False, tokenizer.TryConsumeInteger())
with self.assertRaises(text_format.ParseError):
tokenizer.ConsumeInteger()
self.assertEqual(1.2, tokenizer.ConsumeFloat())
self.assertTrue(tokenizer.AtEnd())
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint64, tokenizer)
self.assertEqual(-1, text_format._ConsumeInt32(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt32, tokenizer)
self.assertEqual(uint32_max + 1, text_format._ConsumeInt64(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt64, tokenizer)
self.assertEqual(int64_max + 1, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
def testSkipComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines())
self.assertTrue(tokenizer.AtEnd())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
def testConsumeComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# another comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTrailingComment(self):
text = 'some_number: 4\n# some comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeLineComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoLineComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# another comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeAndCheckTrailingComment(self):
text = 'some_number: 4 # some comment' # trailing comment on the same line
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((True, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testHashinComment(self):
text = 'some_number: 4 # some comment # not a new comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertEqual((True, '# some comment # not a new comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
# Tests for pretty printer functionality.
@_parameterized.Parameters((unittest_pb2), (unittest_proto3_arena_pb2))
class PrettyPrinterTest(TextFormatBase):
def testPrettyPrintNoMatch(self, message_module):
def printer(message, indent, as_one_line):
del message, indent, as_one_line
return None
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { bb: 42 }')
def testPrettyPrintOneLine(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
return 'My lucky number is %s' % m.bb
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { My lucky number is 42 }')
def testPrettyPrintMultiLine(self, message_module):
def printer(m, indent, as_one_line):
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
line_deliminator = (' ' if as_one_line else '\n') + ' ' * indent
return 'My lucky number is:%s%s' % (line_deliminator, m.bb)
return None
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { My lucky number is: 42 }')
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=False, message_formatter=printer),
'repeated_nested_message {\n My lucky number is:\n 42\n}\n')
def testPrettyPrintEntireMessage(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.DESCRIPTOR:
return 'The is the message!'
return None
message = message_module.TestAllTypes()
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=False, message_formatter=printer),
'The is the message!\n')
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'The is the message!')
def testPrettyPrintMultipleParts(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
return 'My lucky number is %s' % m.bb
return None
message = message_module.TestAllTypes()
message.optional_int32 = 61
msg = message.repeated_nested_message.add()
msg.bb = 42
msg = message.repeated_nested_message.add()
msg.bb = 99
msg = message.optional_nested_message
msg.bb = 1
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
('optional_int32: 61 '
'optional_nested_message { My lucky number is 1 } '
'repeated_nested_message { My lucky number is 42 } '
'repeated_nested_message { My lucky number is 99 }'))
if __name__ == '__main__':
unittest.main()
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
#
# Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test for google.protobuf.text_format."""
__author__ = '<EMAIL> (<NAME>)'
import math
import re
import six
import string
try:
import unittest2 as unittest # PY26, pylint: disable=g-import-not-at-top
except ImportError:
import unittest # pylint: disable=g-import-not-at-top
from google.protobuf.internal import _parameterized
from google.protobuf import any_test_pb2
from google.protobuf import map_unittest_pb2
from google.protobuf import unittest_mset_pb2
from google.protobuf import unittest_pb2
from google.protobuf import unittest_proto3_arena_pb2
from google.protobuf.internal import api_implementation
from google.protobuf.internal import any_test_pb2 as test_extend_any
from google.protobuf.internal import message_set_extensions_pb2
from google.protobuf.internal import test_util
from google.protobuf import descriptor_pool
from google.protobuf import text_format
# Low-level nuts-n-bolts tests.
class SimpleTextFormatTests(unittest.TestCase):
# The members of _QUOTES are formatted into a regexp template that
# expects single characters. Therefore it's an error (in addition to being
# non-sensical in the first place) to try to specify a "quote mark" that is
# more than one character.
def testQuoteMarksAreSingleChars(self):
for quote in text_format._QUOTES:
self.assertEqual(1, len(quote))
# Base class with some common functionality.
class TextFormatBase(unittest.TestCase):
def ReadGolden(self, golden_filename):
with test_util.GoldenFile(golden_filename) as f:
return (f.readlines() if str is bytes else # PY3
[golden_line.decode('utf-8') for golden_line in f])
def CompareToGoldenFile(self, text, golden_filename):
golden_lines = self.ReadGolden(golden_filename)
self.assertMultiLineEqual(text, ''.join(golden_lines))
def CompareToGoldenText(self, text, golden_text):
self.assertEqual(text, golden_text)
def RemoveRedundantZeros(self, text):
# Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove
# these zeros in order to match the golden file.
text = text.replace('e+0','e+').replace('e+0','e+') \
.replace('e-0','e-').replace('e-0','e-')
# Floating point fields are printed with .0 suffix even if they are
# actualy integer numbers.
text = re.compile(r'\.0$', re.MULTILINE).sub('', text)
return text
@_parameterized.Parameters((unittest_pb2), (unittest_proto3_arena_pb2))
class TextFormatTest(TextFormatBase):
def testPrintExotic(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string:'
' "\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintExoticUnicodeSubclass(self, message_module):
class UnicodeSub(six.text_type):
pass
message = message_module.TestAllTypes()
message.repeated_string.append(UnicodeSub(u'\u00fc\ua71f'))
self.CompareToGoldenText(
text_format.MessageToString(message),
'repeated_string: "\\303\\274\\352\\234\\237"\n')
def testPrintNestedMessageAsOneLine(self, message_module):
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_nested_message { bb: 42 }')
def testPrintRepeatedFieldsAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int32.append(1)
message.repeated_int32.append(1)
message.repeated_int32.append(3)
message.repeated_string.append('Google')
message.repeated_string.append('Zurich')
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'repeated_int32: 1 repeated_int32: 1 repeated_int32: 3 '
'repeated_string: "Google" repeated_string: "Zurich"')
def testPrintNestedNewLineInStringAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.optional_string = 'a\nnew\nline'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'optional_string: "a\\nnew\\nline"')
def testPrintExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, as_one_line=True)),
'repeated_int64: -9223372036854775808'
' repeated_uint64: 18446744073709551615'
' repeated_double: 123.456'
' repeated_double: 1.23e+22'
' repeated_double: 1.23e-18'
' repeated_string: '
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""'
' repeated_string: "\\303\\274\\352\\234\\237"')
def testRoundTripExoticAsOneLine(self, message_module):
message = message_module.TestAllTypes()
message.repeated_int64.append(-9223372036854775808)
message.repeated_uint64.append(18446744073709551615)
message.repeated_double.append(123.456)
message.repeated_double.append(1.23e22)
message.repeated_double.append(1.23e-18)
message.repeated_string.append('\000\001\a\b\f\n\r\t\v\\\'"')
message.repeated_string.append(u'\u00fc\ua71f')
# Test as_utf8 = False.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=False)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message)
# Test as_utf8 = True.
wire_text = text_format.MessageToString(message,
as_one_line=True,
as_utf8=True)
parsed_message = message_module.TestAllTypes()
r = text_format.Parse(wire_text, parsed_message)
self.assertIs(r, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintRawUtf8String(self, message_module):
message = message_module.TestAllTypes()
message.repeated_string.append(u'\u00fc\ua71f')
text = text_format.MessageToString(message, as_utf8=True)
self.CompareToGoldenText(text, 'repeated_string: "\303\274\352\234\237"\n')
parsed_message = message_module.TestAllTypes()
text_format.Parse(text, parsed_message)
self.assertEqual(message, parsed_message,
'\n%s != %s' % (message, parsed_message))
def testPrintFloatFormat(self, message_module):
# Check that float_format argument is passed to sub-message formatting.
message = message_module.NestedTestAllTypes()
# We use 1.25 as it is a round number in binary. The proto 32-bit float
# will not gain additional imprecise digits as a 64-bit Python float and
# show up in its str. 32-bit 1.2 is noisy when extended to 64-bit:
# >>> struct.unpack('f', struct.pack('f', 1.2))[0]
# 1.2000000476837158
# >>> struct.unpack('f', struct.pack('f', 1.25))[0]
# 1.25
message.payload.optional_float = 1.25
# Check rounding at 15 significant digits
message.payload.optional_double = -.000003456789012345678
# Check no decimal point.
message.payload.repeated_float.append(-5642)
# Check no trailing zeros.
message.payload.repeated_double.append(.000078900)
formatted_fields = ['optional_float: 1.25',
'optional_double: -3.45678901234568e-6',
'repeated_float: -5642', 'repeated_double: 7.89e-5']
text_message = text_format.MessageToString(message, float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{\n {0}\n {1}\n {2}\n {3}\n}}\n'.format(
*formatted_fields))
# as_one_line=True is a separate code branch where float_format is passed.
text_message = text_format.MessageToString(message,
as_one_line=True,
float_format='.15g')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_message),
'payload {{ {0} {1} {2} {3} }}'.format(*formatted_fields))
def testMessageToString(self, message_module):
message = message_module.ForeignMessage()
message.c = 123
self.assertEqual('c: 123\n', str(message))
def testPrintField(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintField(field, value, out)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintField(field, value)
self.assertEqual('optional_float: 0.0\n', out.getvalue())
out.close()
def testPrintFieldValue(self, message_module):
message = message_module.TestAllTypes()
field = message.DESCRIPTOR.fields_by_name['optional_float']
value = message.optional_float
out = text_format.TextWriter(False)
text_format.PrintFieldValue(field, value, out)
self.assertEqual('0.0', out.getvalue())
out.close()
# Test Printer
out = text_format.TextWriter(False)
printer = text_format._Printer(out)
printer.PrintFieldValue(field, value)
self.assertEqual('0.0', out.getvalue())
out.close()
def testParseAllFields(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
def testParseAndMergeUtf8(self, message_module):
message = message_module.TestAllTypes()
test_util.SetAllFields(message)
ascii_text = text_format.MessageToString(message)
ascii_text = ascii_text.encode('utf-8')
parsed_message = message_module.TestAllTypes()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
parsed_message.Clear()
text_format.Merge(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
if message_module is unittest_pb2:
test_util.ExpectAllFieldsSet(self, message)
if six.PY2:
msg2 = message_module.TestAllTypes()
text = (u'optional_string: "café"')
text_format.Merge(text, msg2)
self.assertEqual(msg2.optional_string, u'café')
msg2.Clear()
text_format.Parse(text, msg2)
self.assertEqual(msg2.optional_string, u'café')
def testParseExotic(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: -9223372036854775808\n'
'repeated_uint64: 18446744073709551615\n'
'repeated_double: 123.456\n'
'repeated_double: 1.23e+22\n'
'repeated_double: 1.23e-18\n'
'repeated_string: \n'
'"\\000\\001\\007\\010\\014\\n\\r\\t\\013\\\\\\\'\\""\n'
'repeated_string: "foo" \'corge\' "grault"\n'
'repeated_string: "\\303\\274\\352\\234\\237"\n'
'repeated_string: "\\xc3\\xbc"\n'
'repeated_string: "\xc3\xbc"\n')
text_format.Parse(text, message)
self.assertEqual(-9223372036854775808, message.repeated_int64[0])
self.assertEqual(18446744073709551615, message.repeated_uint64[0])
self.assertEqual(123.456, message.repeated_double[0])
self.assertEqual(1.23e22, message.repeated_double[1])
self.assertEqual(1.23e-18, message.repeated_double[2])
self.assertEqual('\000\001\a\b\f\n\r\t\v\\\'"', message.repeated_string[0])
self.assertEqual('foocorgegrault', message.repeated_string[1])
self.assertEqual(u'\u00fc\ua71f', message.repeated_string[2])
self.assertEqual(u'\u00fc', message.repeated_string[3])
def testParseTrailingCommas(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: 100;\n'
'repeated_int64: 200;\n'
'repeated_int64: 300,\n'
'repeated_string: "one",\n'
'repeated_string: "two";\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedScalarShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_int64: [100, 200];\n'
'repeated_int64: 300,\n'
'repeated_string: ["one", "two"];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_int64[0])
self.assertEqual(200, message.repeated_int64[1])
self.assertEqual(300, message.repeated_int64[2])
self.assertEqual(u'one', message.repeated_string[0])
self.assertEqual(u'two', message.repeated_string[1])
def testParseRepeatedMessageShortFormat(self, message_module):
message = message_module.TestAllTypes()
text = ('repeated_nested_message: [{bb: 100}, {bb: 200}],\n'
'repeated_nested_message: {bb: 300}\n'
'repeated_nested_message [{bb: 400}];\n')
text_format.Parse(text, message)
self.assertEqual(100, message.repeated_nested_message[0].bb)
self.assertEqual(200, message.repeated_nested_message[1].bb)
self.assertEqual(300, message.repeated_nested_message[2].bb)
self.assertEqual(400, message.repeated_nested_message[3].bb)
def testParseEmptyText(self, message_module):
message = message_module.TestAllTypes()
text = ''
text_format.Parse(text, message)
self.assertEqual(message_module.TestAllTypes(), message)
def testParseInvalidUtf8(self, message_module):
message = message_module.TestAllTypes()
text = 'repeated_string: "\\xc3\\xc3"'
with self.assertRaises(text_format.ParseError) as e:
text_format.Parse(text, message)
self.assertEqual(e.exception.GetLine(), 1)
self.assertEqual(e.exception.GetColumn(), 28)
def testParseSingleWord(self, message_module):
message = message_module.TestAllTypes()
text = 'foo'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"foo".'), text_format.Parse, text, message)
def testParseUnknownField(self, message_module):
message = message_module.TestAllTypes()
text = 'unknown_field: 8\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"unknown_field".'), text_format.Parse, text, message)
def testParseBadEnumValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_nested_enum: BARR'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value named BARR.'), text_format.Parse,
text, message)
def testParseBadIntValue(self, message_module):
message = message_module.TestAllTypes()
text = 'optional_int32: bork'
six.assertRaisesRegex(self, text_format.ParseError,
('1:17 : Couldn\'t parse integer: bork'),
text_format.Parse, text, message)
def testParseStringFieldUnescape(self, message_module):
message = message_module.TestAllTypes()
text = r'''repeated_string: "\xf\x62"
repeated_string: "\\xf\\x62"
repeated_string: "\\\xf\\\x62"
repeated_string: "\\\\xf\\\\x62"
repeated_string: "\\\\\xf\\\\\x62"
repeated_string: "\x5cx20"'''
text_format.Parse(text, message)
SLASH = '\\'
self.assertEqual('\x0fb', message.repeated_string[0])
self.assertEqual(SLASH + 'xf' + SLASH + 'x62', message.repeated_string[1])
self.assertEqual(SLASH + '\x0f' + SLASH + 'b', message.repeated_string[2])
self.assertEqual(SLASH + SLASH + 'xf' + SLASH + SLASH + 'x62',
message.repeated_string[3])
self.assertEqual(SLASH + SLASH + '\x0f' + SLASH + SLASH + 'b',
message.repeated_string[4])
self.assertEqual(SLASH + 'x20', message.repeated_string[5])
def testMergeDuplicateScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
r = text_format.Merge(text, message)
self.assertIs(r, message)
self.assertEqual(67, message.optional_int32)
def testMergeDuplicateNestedMessageScalars(self, message_module):
message = message_module.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
r = text_format.Merge(text, message)
self.assertTrue(r is message)
self.assertEqual(2, message.optional_nested_message.bb)
def testParseOneof(self, message_module):
m = message_module.TestAllTypes()
m.oneof_uint32 = 11
m2 = message_module.TestAllTypes()
text_format.Parse(text_format.MessageToString(m), m2)
self.assertEqual('oneof_uint32', m2.WhichOneof('oneof_field'))
def testMergeMultipleOneof(self, message_module):
m_string = '\n'.join(['oneof_uint32: 11', 'oneof_string: "foo"'])
m2 = message_module.TestAllTypes()
text_format.Merge(m_string, m2)
self.assertEqual('oneof_string', m2.WhichOneof('oneof_field'))
def testParseMultipleOneof(self, message_module):
m_string = '\n'.join(['oneof_uint32: 11', 'oneof_string: "foo"'])
m2 = message_module.TestAllTypes()
with self.assertRaisesRegexp(text_format.ParseError,
' is specified along with field '):
text_format.Parse(m_string, m2)
# These are tests that aren't fundamentally specific to proto2, but are at
# the moment because of differences between the proto2 and proto3 test schemas.
# Ideally the schemas would be made more similar so these tests could pass.
class OnlyWorksWithProto2RightNowTests(TextFormatBase):
def testPrintAllFieldsPointy(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_data_pointy_oneof.txt')
def testParseGolden(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_data_oneof_implemented.txt'))
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.Parse(golden_text, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintAllFields(self):
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_data_oneof_implemented.txt')
def testPrintInIndexOrder(self):
message = unittest_pb2.TestFieldOrderings()
message.my_string = '115'
message.my_int = 101
message.my_float = 111
message.optional_nested_message.oo = 0
message.optional_nested_message.bb = 1
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(
message, use_index_order=True)),
'my_string: \"115\"\nmy_int: 101\nmy_float: 111\n'
'optional_nested_message {\n oo: 0\n bb: 1\n}\n')
self.CompareToGoldenText(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'my_int: 101\nmy_string: \"115\"\nmy_float: 111\n'
'optional_nested_message {\n bb: 1\n oo: 0\n}\n')
def testMergeLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.MergeLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testParseLinesGolden(self):
opened = self.ReadGolden('text_format_unittest_data_oneof_implemented.txt')
parsed_message = unittest_pb2.TestAllTypes()
r = text_format.ParseLines(opened, parsed_message)
self.assertIs(r, parsed_message)
message = unittest_pb2.TestAllTypes()
test_util.SetAllFields(message)
self.assertEqual(message, parsed_message)
def testPrintMap(self):
message = map_unittest_pb2.TestMap()
message.map_int32_int32[-123] = -456
message.map_int64_int64[-2**33] = -2**34
message.map_uint32_uint32[123] = 456
message.map_uint64_uint64[2**33] = 2**34
message.map_string_string['abc'] = '123'
message.map_int32_foreign_message[111].c = 5
# Maps are serialized to text format using their underlying repeated
# representation.
self.CompareToGoldenText(
text_format.MessageToString(message), 'map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
def testMapOrderEnforcement(self):
message = map_unittest_pb2.TestMap()
for letter in string.ascii_uppercase[13:26]:
message.map_string_string[letter] = 'dummy'
for letter in reversed(string.ascii_uppercase[0:13]):
message.map_string_string[letter] = 'dummy'
golden = ''.join(('map_string_string {\n key: "%c"\n value: "dummy"\n}\n'
% (letter,) for letter in string.ascii_uppercase))
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# TODO(teboring): In c/137553523, not serializing default value for map entry
# message has been fixed. This test needs to be disabled in order to submit
# that cl. Add this back when c/137553523 has been submitted.
# def testMapOrderSemantics(self):
# golden_lines = self.ReadGolden('map_test_data.txt')
# message = map_unittest_pb2.TestMap()
# text_format.ParseLines(golden_lines, message)
# candidate = text_format.MessageToString(message)
# # The Python implementation emits "1.0" for the double value that the C++
# # implementation emits as "1".
# candidate = candidate.replace('1.0', '1', 2)
# candidate = candidate.replace('0.0', '0', 2)
# self.assertMultiLineEqual(candidate, ''.join(golden_lines))
# Tests of proto2-only features (MessageSet, extensions, etc.).
class Proto2Tests(TextFormatBase):
def testPrintMessageSet(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message), 'message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
message = message_set_extensions_pb2.TestMessageSet()
ext = message_set_extensions_pb2.message_set_extension3
message.Extensions[ext].text = 'bar'
self.CompareToGoldenText(
text_format.MessageToString(message),
'[google.protobuf.internal.TestMessageSetExtension3] {\n'
' text: \"bar\"\n'
'}\n')
def testPrintMessageSetByFieldNumber(self):
out = text_format.TextWriter(False)
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
text_format.PrintMessage(message, out, use_field_number=True)
self.CompareToGoldenText(out.getvalue(), '1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
out.close()
def testPrintMessageSetAsOneLine(self):
message = unittest_mset_pb2.TestMessageSetContainer()
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
message.message_set.Extensions[ext1].i = 23
message.message_set.Extensions[ext2].str = 'foo'
self.CompareToGoldenText(
text_format.MessageToString(message, as_one_line=True),
'message_set {'
' [protobuf_unittest.TestMessageSetExtension1] {'
' i: 23'
' }'
' [protobuf_unittest.TestMessageSetExtension2] {'
' str: \"foo\"'
' }'
' }')
def testParseMessageSet(self):
message = unittest_pb2.TestAllTypes()
text = ('repeated_uint64: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testExtensionInsideAnyMessage(self):
message = test_extend_any.TestAny()
text = ('value {\n'
' [type.googleapis.com/google.protobuf.internal.TestAny] {\n'
' [google.protobuf.internal.TestAnyExtension1.extension1] {\n'
' i: 10\n'
' }\n'
' }\n'
'}\n')
text_format.Merge(text, message, descriptor_pool=descriptor_pool.Default())
self.CompareToGoldenText(
text_format.MessageToString(
message, descriptor_pool=descriptor_pool.Default()),
text)
def testParseMessageByFieldNumber(self):
message = unittest_pb2.TestAllTypes()
text = ('34: 1\n' 'repeated_uint64: 2\n')
text_format.Parse(text, message, allow_field_number=True)
self.assertEqual(1, message.repeated_uint64[0])
self.assertEqual(2, message.repeated_uint64[1])
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('1 {\n'
' 1545008 {\n'
' 15: 23\n'
' }\n'
' 1547769 {\n'
' 25: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_field_number=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
# Can't parse field number without set allow_field_number=True.
message = unittest_pb2.TestAllTypes()
text = '34:1\n'
six.assertRaisesRegex(self, text_format.ParseError, (
r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"34".'), text_format.Parse, text, message)
# Can't parse if field number is not found.
text = '1234:1\n'
six.assertRaisesRegex(
self,
text_format.ParseError,
(r'1:1 : Message type "\w+.TestAllTypes" has no field named '
r'"1234".'),
text_format.Parse,
text,
message,
allow_field_number=True)
def testPrintAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(message)),
'text_format_unittest_extensions_data.txt')
def testPrintAllExtensionsPointy(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.CompareToGoldenFile(
self.RemoveRedundantZeros(text_format.MessageToString(
message, pointy_brackets=True)),
'text_format_unittest_extensions_data_pointy.txt')
def testParseGoldenExtensions(self):
golden_text = '\n'.join(self.ReadGolden(
'text_format_unittest_extensions_data.txt'))
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(golden_text, parsed_message)
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
self.assertEqual(message, parsed_message)
def testParseAllExtensions(self):
message = unittest_pb2.TestAllExtensions()
test_util.SetAllExtensions(message)
ascii_text = text_format.MessageToString(message)
parsed_message = unittest_pb2.TestAllExtensions()
text_format.Parse(ascii_text, parsed_message)
self.assertEqual(message, parsed_message)
def testParseAllowedUnknownExtension(self):
# Skip over unknown extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [unknown_extension] {\n'
' i: 23\n'
' bin: "\xe0"'
' [nested_unknown_ext]: {\n'
' i: 23\n'
' x: x\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' multiline_str: "abc"\n'
' "def"\n'
' "xyz."\n'
' [nested_unknown_ext.ext]: <\n'
' i: 23\n'
' i: 24\n'
' pointfloat: .3\n'
' test: "test_string"\n'
' floaty_float: -0.315\n'
' num: -inf\n'
' long_string: "test" "test2" \n'
' >\n'
' }\n'
' }\n'
' [unknown_extension]: 5\n'
' [unknown_extension_with_number_field] {\n'
' 1: "some_field"\n'
' 2: -0.451\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
golden = 'message_set {\n}\n'
self.CompareToGoldenText(text_format.MessageToString(message), golden)
# Catch parse errors in unknown extension.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' i:\n' # Missing value.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: }',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed string\n' # Missing closing quote.
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [unknown_extension] {\n'
' str: "malformed\n multiline\n string\n'
' }\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'Invalid field value: "',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' [malformed_extension] <\n'
' i: -5\n'
' \n' # Missing '>' here.
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
'5:1 : Expected ">".',
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Don't allow unknown fields with allow_unknown_extension=True.
message = unittest_mset_pb2.TestMessageSetContainer()
malformed = ('message_set {\n'
' unknown_field: true\n'
'}\n')
six.assertRaisesRegex(self,
text_format.ParseError,
('2:3 : Message type '
'"proto2_wireformat_unittest.TestMessageSet" has no'
' field named "unknown_field".'),
text_format.Parse,
malformed,
message,
allow_unknown_extension=True)
# Parse known extension correctly.
message = unittest_mset_pb2.TestMessageSetContainer()
text = ('message_set {\n'
' [protobuf_unittest.TestMessageSetExtension1] {\n'
' i: 23\n'
' }\n'
' [protobuf_unittest.TestMessageSetExtension2] {\n'
' str: \"foo\"\n'
' }\n'
'}\n')
text_format.Parse(text, message, allow_unknown_extension=True)
ext1 = unittest_mset_pb2.TestMessageSetExtension1.message_set_extension
ext2 = unittest_mset_pb2.TestMessageSetExtension2.message_set_extension
self.assertEqual(23, message.message_set.Extensions[ext1].i)
self.assertEqual('foo', message.message_set.Extensions[ext2].str)
def testParseBadIdentifier(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { "bb": 1 }')
with self.assertRaises(text_format.ParseError) as e:
text_format.Parse(text, message)
self.assertEqual(str(e.exception),
'1:27 : Expected identifier or number, got "bb".')
def testParseBadExtension(self):
message = unittest_pb2.TestAllExtensions()
text = '[unknown_extension]: 8\n'
six.assertRaisesRegex(self, text_format.ParseError,
'1:2 : Extension "unknown_extension" not registered.',
text_format.Parse, text, message)
message = unittest_pb2.TestAllTypes()
six.assertRaisesRegex(self, text_format.ParseError, (
'1:2 : Message type "protobuf_unittest.TestAllTypes" does not have '
'extensions.'), text_format.Parse, text, message)
def testParseNumericUnknownEnum(self):
message = unittest_pb2.TestAllTypes()
text = 'optional_nested_enum: 100'
six.assertRaisesRegex(self, text_format.ParseError,
(r'1:23 : Enum type "\w+.TestAllTypes.NestedEnum" '
r'has no value with number 100.'), text_format.Parse,
text, message)
def testMergeDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
text_format.Merge(text, message)
self.assertEqual(67,
message.Extensions[unittest_pb2.optional_int32_extension])
def testParseDuplicateExtensionScalars(self):
message = unittest_pb2.TestAllExtensions()
text = ('[protobuf_unittest.optional_int32_extension]: 42 '
'[protobuf_unittest.optional_int32_extension]: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:96 : Message type "protobuf_unittest.TestAllExtensions" '
'should not have multiple '
'"protobuf_unittest.optional_int32_extension" extensions.'),
text_format.Parse, text, message)
def testParseDuplicateNestedMessageScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_nested_message { bb: 1 } '
'optional_nested_message { bb: 2 }')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:65 : Message type "protobuf_unittest.TestAllTypes.NestedMessage" '
'should not have multiple "bb" fields.'), text_format.Parse, text,
message)
def testParseDuplicateScalars(self):
message = unittest_pb2.TestAllTypes()
text = ('optional_int32: 42 ' 'optional_int32: 67')
six.assertRaisesRegex(self, text_format.ParseError, (
'1:36 : Message type "protobuf_unittest.TestAllTypes" should not '
'have multiple "optional_int32" fields.'), text_format.Parse, text,
message)
def testParseGroupNotClosed(self):
message = unittest_pb2.TestAllTypes()
text = 'RepeatedGroup: <'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected ">".',
text_format.Parse, text, message)
text = 'RepeatedGroup: {'
six.assertRaisesRegex(self, text_format.ParseError, '1:16 : Expected "}".',
text_format.Parse, text, message)
def testParseEmptyGroup(self):
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: {}'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
message.Clear()
message = unittest_pb2.TestAllTypes()
text = 'OptionalGroup: <>'
text_format.Parse(text, message)
self.assertTrue(message.HasField('optionalgroup'))
# Maps aren't really proto2-only, but our test schema only has maps for
# proto2.
def testParseMap(self):
text = ('map_int32_int32 {\n'
' key: -123\n'
' value: -456\n'
'}\n'
'map_int64_int64 {\n'
' key: -8589934592\n'
' value: -17179869184\n'
'}\n'
'map_uint32_uint32 {\n'
' key: 123\n'
' value: 456\n'
'}\n'
'map_uint64_uint64 {\n'
' key: 8589934592\n'
' value: 17179869184\n'
'}\n'
'map_string_string {\n'
' key: "abc"\n'
' value: "123"\n'
'}\n'
'map_int32_foreign_message {\n'
' key: 111\n'
' value {\n'
' c: 5\n'
' }\n'
'}\n')
message = map_unittest_pb2.TestMap()
text_format.Parse(text, message)
self.assertEqual(-456, message.map_int32_int32[-123])
self.assertEqual(-2**34, message.map_int64_int64[-2**33])
self.assertEqual(456, message.map_uint32_uint32[123])
self.assertEqual(2**34, message.map_uint64_uint64[2**33])
self.assertEqual('123', message.map_string_string['abc'])
self.assertEqual(5, message.map_int32_foreign_message[111].c)
class Proto3Tests(unittest.TestCase):
def testPrintMessageExpandAny(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
descriptor_pool=descriptor_pool.Default()),
'any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyRepeated(self):
packed_message = unittest_pb2.OneString()
message = any_test_pb2.TestAny()
packed_message.data = 'string0'
message.repeated_any_value.add().Pack(packed_message)
packed_message.data = 'string1'
message.repeated_any_value.add().Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message),
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
def testPrintMessageExpandAnyDescriptorPoolMissingType(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
empty_pool = descriptor_pool.DescriptorPool()
self.assertEqual(
text_format.MessageToString(message, descriptor_pool=empty_pool),
'any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
def testPrintMessageExpandAnyPointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
pointy_brackets=True),
'any_value <\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'>\n')
def testPrintMessageExpandAnyAsOneLine(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True),
'any_value {'
' [type.googleapis.com/protobuf_unittest.OneString]'
' { data: "string" } '
'}')
def testPrintMessageExpandAnyAsOneLinePointyBrackets(self):
packed_message = unittest_pb2.OneString()
packed_message.data = 'string'
message = any_test_pb2.TestAny()
message.any_value.Pack(packed_message)
self.assertEqual(
text_format.MessageToString(message,
as_one_line=True,
pointy_brackets=True,
descriptor_pool=descriptor_pool.Default()),
'any_value <'
' [type.googleapis.com/protobuf_unittest.OneString]'
' < data: "string" > '
'>')
def testUnknownEnums(self):
message = unittest_proto3_arena_pb2.TestAllTypes()
message2 = unittest_proto3_arena_pb2.TestAllTypes()
message.optional_nested_enum = 999
text_string = text_format.MessageToString(message)
text_format.Parse(text_string, message2)
self.assertEqual(999, message2.optional_nested_enum)
def testMergeExpandedAny(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
message.Clear()
text_format.Parse(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeExpandedAnyRepeated(self):
message = any_test_pb2.TestAny()
text = ('repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string0"\n'
' }\n'
'}\n'
'repeated_any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string1"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.repeated_any_value[0].Unpack(packed_message)
self.assertEqual('string0', packed_message.data)
message.repeated_any_value[1].Unpack(packed_message)
self.assertEqual('string1', packed_message.data)
def testMergeExpandedAnyPointyBrackets(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] <\n'
' data: "string"\n'
' >\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeAlternativeUrl(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.otherapi.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
self.assertEqual('type.otherapi.com/protobuf_unittest.OneString',
message.any_value.type_url)
def testMergeExpandedAnyDescriptorPoolMissingType(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n'
' }\n'
'}\n')
with self.assertRaises(text_format.ParseError) as e:
empty_pool = descriptor_pool.DescriptorPool()
text_format.Merge(text, message, descriptor_pool=empty_pool)
self.assertEqual(
str(e.exception),
'Type protobuf_unittest.OneString not found in descriptor pool')
def testMergeUnexpandedAny(self):
text = ('any_value {\n'
' type_url: "type.googleapis.com/protobuf_unittest.OneString"\n'
' value: "\\n\\006string"\n'
'}\n')
message = any_test_pb2.TestAny()
text_format.Merge(text, message)
packed_message = unittest_pb2.OneString()
message.any_value.Unpack(packed_message)
self.assertEqual('string', packed_message.data)
def testMergeMissingAnyEndToken(self):
message = any_test_pb2.TestAny()
text = ('any_value {\n'
' [type.googleapis.com/protobuf_unittest.OneString] {\n'
' data: "string"\n')
with self.assertRaises(text_format.ParseError) as e:
text_format.Merge(text, message)
self.assertEqual(str(e.exception), '3:11 : Expected "}".')
class TokenizerTest(unittest.TestCase):
def testSimpleTokenCases(self):
text = ('identifier1:"string1"\n \n\n'
'identifier2 : \n \n123 \n identifier3 :\'string\'\n'
'identifiER_4 : 1.1e+2 ID5:-0.23 ID6:\'aaaa\\\'bbbb\'\n'
'ID7 : "aa\\"bb"\n\n\n\n ID8: {A:inf B:-inf C:true D:false}\n'
'ID9: 22 ID10: -111111111111111111 ID11: -22\n'
'ID12: 2222222222222222222 ID13: 1.23456f ID14: 1.2e+2f '
'false_bool: 0 true_BOOL:t \n true_bool1: 1 false_BOOL1:f '
'False_bool: False True_bool: True X:iNf Y:-inF Z:nAN')
tokenizer = text_format.Tokenizer(text.splitlines())
methods = [(tokenizer.ConsumeIdentifier, 'identifier1'), ':',
(tokenizer.ConsumeString, 'string1'),
(tokenizer.ConsumeIdentifier, 'identifier2'), ':',
(tokenizer.ConsumeInteger, 123),
(tokenizer.ConsumeIdentifier, 'identifier3'), ':',
(tokenizer.ConsumeString, 'string'),
(tokenizer.ConsumeIdentifier, 'identifiER_4'), ':',
(tokenizer.ConsumeFloat, 1.1e+2),
(tokenizer.ConsumeIdentifier, 'ID5'), ':',
(tokenizer.ConsumeFloat, -0.23),
(tokenizer.ConsumeIdentifier, 'ID6'), ':',
(tokenizer.ConsumeString, 'aaaa\'bbbb'),
(tokenizer.ConsumeIdentifier, 'ID7'), ':',
(tokenizer.ConsumeString, 'aa\"bb'),
(tokenizer.ConsumeIdentifier, 'ID8'), ':', '{',
(tokenizer.ConsumeIdentifier, 'A'), ':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'B'), ':',
(tokenizer.ConsumeFloat, -float('inf')),
(tokenizer.ConsumeIdentifier, 'C'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'D'), ':',
(tokenizer.ConsumeBool, False), '}',
(tokenizer.ConsumeIdentifier, 'ID9'), ':',
(tokenizer.ConsumeInteger, 22),
(tokenizer.ConsumeIdentifier, 'ID10'), ':',
(tokenizer.ConsumeInteger, -111111111111111111),
(tokenizer.ConsumeIdentifier, 'ID11'), ':',
(tokenizer.ConsumeInteger, -22),
(tokenizer.ConsumeIdentifier, 'ID12'), ':',
(tokenizer.ConsumeInteger, 2222222222222222222),
(tokenizer.ConsumeIdentifier, 'ID13'), ':',
(tokenizer.ConsumeFloat, 1.23456),
(tokenizer.ConsumeIdentifier, 'ID14'), ':',
(tokenizer.ConsumeFloat, 1.2e+2),
(tokenizer.ConsumeIdentifier, 'false_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'true_BOOL'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'true_bool1'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'false_BOOL1'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'False_bool'), ':',
(tokenizer.ConsumeBool, False),
(tokenizer.ConsumeIdentifier, 'True_bool'), ':',
(tokenizer.ConsumeBool, True),
(tokenizer.ConsumeIdentifier, 'X'), ':',
(tokenizer.ConsumeFloat, float('inf')),
(tokenizer.ConsumeIdentifier, 'Y'), ':',
(tokenizer.ConsumeFloat, float('-inf')),
(tokenizer.ConsumeIdentifier, 'Z'), ':',
(tokenizer.ConsumeFloat, float('nan'))]
i = 0
while not tokenizer.AtEnd():
m = methods[i]
if isinstance(m, str):
token = tokenizer.token
self.assertEqual(token, m)
tokenizer.NextToken()
elif isinstance(m[1], float) and math.isnan(m[1]):
self.assertTrue(math.isnan(m[0]()))
else:
self.assertEqual(m[1], m[0]())
i += 1
def testConsumeAbstractIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(-1, tokenizer.ConsumeInteger())
self.assertEqual(uint32_max + 1, tokenizer.ConsumeInteger())
self.assertEqual(int64_max + 1, tokenizer.ConsumeInteger())
self.assertTrue(tokenizer.AtEnd())
text = '-0 0 0 1.2'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertEqual(0, tokenizer.ConsumeInteger())
self.assertEqual(True, tokenizer.TryConsumeInteger())
self.assertEqual(False, tokenizer.TryConsumeInteger())
with self.assertRaises(text_format.ParseError):
tokenizer.ConsumeInteger()
self.assertEqual(1.2, tokenizer.ConsumeFloat())
self.assertTrue(tokenizer.AtEnd())
def testConsumeIntegers(self):
# This test only tests the failures in the integer parsing methods as well
# as the '0' special cases.
int64_max = (1 << 63) - 1
uint32_max = (1 << 32) - 1
text = '-1 %d %d' % (uint32_max + 1, int64_max + 1)
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint64, tokenizer)
self.assertEqual(-1, text_format._ConsumeInt32(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeUint32, tokenizer)
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt32, tokenizer)
self.assertEqual(uint32_max + 1, text_format._ConsumeInt64(tokenizer))
self.assertRaises(text_format.ParseError,
text_format._ConsumeInt64, tokenizer)
self.assertEqual(int64_max + 1, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
text = '-0 -0 0 0'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertEqual(0, text_format._ConsumeUint32(tokenizer))
self.assertEqual(0, text_format._ConsumeUint64(tokenizer))
self.assertTrue(tokenizer.AtEnd())
def testConsumeByteString(self):
text = '"string1\''
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = 'string1"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\xt"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
text = '\n"\\x"'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeByteString)
def testConsumeBool(self):
text = 'not-a-bool'
tokenizer = text_format.Tokenizer(text.splitlines())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeBool)
def testSkipComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines())
self.assertTrue(tokenizer.AtEnd())
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
def testConsumeComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# another comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTrailingComment(self):
text = 'some_number: 4\n# some comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError, tokenizer.ConsumeComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual('# some comment', tokenizer.ConsumeComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeLineComment(self):
tokenizer = text_format.Tokenizer('# some comment'.splitlines(),
skip_comments=False)
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeTwoLineComments(self):
text = '# some comment\n# another comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual((False, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((False, '# another comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testConsumeAndCheckTrailingComment(self):
text = 'some_number: 4 # some comment' # trailing comment on the same line
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertRaises(text_format.ParseError,
tokenizer.ConsumeCommentOrTrailingComment)
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertFalse(tokenizer.AtEnd())
self.assertEqual((True, '# some comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
def testHashinComment(self):
text = 'some_number: 4 # some comment # not a new comment'
tokenizer = text_format.Tokenizer(text.splitlines(), skip_comments=False)
self.assertEqual('some_number', tokenizer.ConsumeIdentifier())
self.assertEqual(tokenizer.token, ':')
tokenizer.NextToken()
self.assertEqual(4, tokenizer.ConsumeInteger())
self.assertEqual((True, '# some comment # not a new comment'),
tokenizer.ConsumeCommentOrTrailingComment())
self.assertTrue(tokenizer.AtEnd())
# Tests for pretty printer functionality.
@_parameterized.Parameters((unittest_pb2), (unittest_proto3_arena_pb2))
class PrettyPrinterTest(TextFormatBase):
def testPrettyPrintNoMatch(self, message_module):
def printer(message, indent, as_one_line):
del message, indent, as_one_line
return None
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { bb: 42 }')
def testPrettyPrintOneLine(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
return 'My lucky number is %s' % m.bb
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { My lucky number is 42 }')
def testPrettyPrintMultiLine(self, message_module):
def printer(m, indent, as_one_line):
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
line_deliminator = (' ' if as_one_line else '\n') + ' ' * indent
return 'My lucky number is:%s%s' % (line_deliminator, m.bb)
return None
message = message_module.TestAllTypes()
msg = message.repeated_nested_message.add()
msg.bb = 42
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'repeated_nested_message { My lucky number is: 42 }')
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=False, message_formatter=printer),
'repeated_nested_message {\n My lucky number is:\n 42\n}\n')
def testPrettyPrintEntireMessage(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.DESCRIPTOR:
return 'The is the message!'
return None
message = message_module.TestAllTypes()
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=False, message_formatter=printer),
'The is the message!\n')
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
'The is the message!')
def testPrettyPrintMultipleParts(self, message_module):
def printer(m, indent, as_one_line):
del indent, as_one_line
if m.DESCRIPTOR == message_module.TestAllTypes.NestedMessage.DESCRIPTOR:
return 'My lucky number is %s' % m.bb
return None
message = message_module.TestAllTypes()
message.optional_int32 = 61
msg = message.repeated_nested_message.add()
msg.bb = 42
msg = message.repeated_nested_message.add()
msg.bb = 99
msg = message.optional_nested_message
msg.bb = 1
self.CompareToGoldenText(
text_format.MessageToString(
message, as_one_line=True, message_formatter=printer),
('optional_int32: 61 '
'optional_nested_message { My lucky number is 1 } '
'repeated_nested_message { My lucky number is 42 } '
'repeated_nested_message { My lucky number is 99 }'))
if __name__ == '__main__':
unittest.main()
|
en
| 0.781157
|
#! /usr/bin/env python # -*- coding: utf-8 -*- # # Protocol Buffers - Google's data interchange format # Copyright 2008 Google Inc. All rights reserved. # https://developers.google.com/protocol-buffers/ # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Test for google.protobuf.text_format. # PY26, pylint: disable=g-import-not-at-top # pylint: disable=g-import-not-at-top # Low-level nuts-n-bolts tests. # The members of _QUOTES are formatted into a regexp template that # expects single characters. Therefore it's an error (in addition to being # non-sensical in the first place) to try to specify a "quote mark" that is # more than one character. # Base class with some common functionality. # PY3 # Some platforms print 1e+5 as 1e+005. This is fine, but we need to remove # these zeros in order to match the golden file. # Floating point fields are printed with .0 suffix even if they are # actualy integer numbers. # Test as_utf8 = False. # Test as_utf8 = True. # Check that float_format argument is passed to sub-message formatting. # We use 1.25 as it is a round number in binary. The proto 32-bit float # will not gain additional imprecise digits as a 64-bit Python float and # show up in its str. 32-bit 1.2 is noisy when extended to 64-bit: # >>> struct.unpack('f', struct.pack('f', 1.2))[0] # 1.2000000476837158 # >>> struct.unpack('f', struct.pack('f', 1.25))[0] # 1.25 # Check rounding at 15 significant digits # Check no decimal point. # Check no trailing zeros. # as_one_line=True is a separate code branch where float_format is passed. # Test Printer # Test Printer repeated_string: "\xf\x62" repeated_string: "\\xf\\x62" repeated_string: "\\\xf\\\x62" repeated_string: "\\\\xf\\\\x62" repeated_string: "\\\\\xf\\\\\x62" repeated_string: "\x5cx20" # These are tests that aren't fundamentally specific to proto2, but are at # the moment because of differences between the proto2 and proto3 test schemas. # Ideally the schemas would be made more similar so these tests could pass. # Maps are serialized to text format using their underlying repeated # representation. # TODO(teboring): In c/137553523, not serializing default value for map entry # message has been fixed. This test needs to be disabled in order to submit # that cl. Add this back when c/137553523 has been submitted. # def testMapOrderSemantics(self): # golden_lines = self.ReadGolden('map_test_data.txt') # message = map_unittest_pb2.TestMap() # text_format.ParseLines(golden_lines, message) # candidate = text_format.MessageToString(message) # # The Python implementation emits "1.0" for the double value that the C++ # # implementation emits as "1". # candidate = candidate.replace('1.0', '1', 2) # candidate = candidate.replace('0.0', '0', 2) # self.assertMultiLineEqual(candidate, ''.join(golden_lines)) # Tests of proto2-only features (MessageSet, extensions, etc.). # Can't parse field number without set allow_field_number=True. # Can't parse if field number is not found. # Skip over unknown extension correctly. # Catch parse errors in unknown extension. # Missing value. # Missing closing quote. # Missing '>' here. # Don't allow unknown fields with allow_unknown_extension=True. # Parse known extension correctly. # Maps aren't really proto2-only, but our test schema only has maps for # proto2. # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. # This test only tests the failures in the integer parsing methods as well # as the '0' special cases. # another comment' # some comment' # another comment' # some comment' # trailing comment on the same line # some comment # not a new comment' # not a new comment'), # Tests for pretty printer functionality.
| 1.620763
| 2
|
ambari-server/src/main/resources/stacks/ADH/1.5/services/HIVE/package/scripts/hive_server_interactive.py
|
kuhella/ambari
| 0
|
6627149
|
<gh_stars>0
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import subprocess
import os
import re
import time
import shutil
from datetime import datetime
import json
# Ambari Commons & Resource Management imports
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.core.source import InlineTemplate
from resource_management.core.resources.system import Execute, Directory
# Imports needed for Rolling/Express Upgrade
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.core import shell
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from ambari_commons import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
# Local Imports
from setup_ranger_hive import setup_ranger_hive
from hive_service_interactive import hive_service_interactive
from hive_interactive import hive_interactive
from hive_server import HiveServerDefault
from setup_ranger_hive_interactive import setup_ranger_hive_interactive
import traceback
class HiveServerInteractive(Script):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HiveServerInteractiveDefault(HiveServerInteractive):
def get_component_name(self):
return "hive-server2-hive2"
def install(self, env):
import params
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
hive_interactive(name='hiveserver2')
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Hive Server Interactive Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select("hive-server2-hive2", params.version)
conf_select.select(params.stack_name, "hive2", params.version)
# Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
resource_created = copy_to_hdfs(
"hive2",
params.user_group,
params.hdfs_user,
skip=params.sysprep_skip_copy_tarballs_hdfs)
resource_created = copy_to_hdfs(
"tez_hive2",
params.user_group,
params.hdfs_user,
skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
if resource_created:
params.HdfsResource(None, action="execute")
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
if params.security_enabled:
# Do the security setup, internally calls do_kinit()
self.setup_security()
# TODO : We need have conditional [re]start of LLAP once "status check command" for LLAP is ready.
# Check status and based on that decide on [re]starting.
# Start LLAP before Hive Server Interactive start.
status = self._llap_start(env)
if not status:
raise Fail("Skipping START of Hive Server Interactive since LLAP app couldn't be STARTED.")
# TODO : test the workability of Ranger and Hive2 during upgrade
setup_ranger_hive_interactive(upgrade_type=upgrade_type)
hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.security_enabled:
self.do_kinit()
# Stop Hive Interactive Server first
hive_service_interactive('hiveserver2', action='stop')
if not params.is_restart_command:
self._llap_stop(env)
else:
Logger.info("LLAP stop is skipped as its a restart command")
def status(self, env):
import status_params
env.set_params(status_params)
# We are not doing 'llap' status check done here as part of status check for 'HSI', as 'llap' status
# check is a heavy weight operation.
pid_file = format("{hive_pid_dir}/{hive_interactive_pid}")
# Recursively check all existing gmetad pid files
check_process_status(pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
props_value_check = {"hive.server2.authentication": "KERBEROS",
"hive.metastore.sasl.enabled": "true",
"hive.security.authorization.enabled": "true"}
props_empty_check = ["hive.server2.authentication.kerberos.keytab",
"hive.server2.authentication.kerberos.principal",
"hive.server2.authentication.spnego.principal",
"hive.server2.authentication.spnego.keytab"]
props_read_check = ["hive.server2.authentication.kerberos.keytab",
"hive.server2.authentication.spnego.keytab"]
hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
props_read_check)
hive_expectations ={}
hive_expectations.update(hive_site_props)
security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
{'hive-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, hive_expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if 'hive-site' not in security_params \
or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
security_params['hive-site']['hive.server2.authentication.spnego.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
def restart_llap(self, env):
"""
Custom command to Restart LLAP
"""
Logger.info("Custom Command to retart LLAP")
import params
env.set_params(params)
if params.security_enabled:
self.do_kinit()
self._llap_stop(env)
self._llap_start(env)
def _llap_stop(self, env):
import params
Logger.info("Stopping LLAP")
stop_cmd = ["slider", "stop", params.llap_app_name]
code, output, error = shell.call(stop_cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
if code == 0:
Logger.info(format("Stopped {params.llap_app_name} application on Slider successfully"))
elif code == 69 and output is not None and "Unknown application instance" in output:
Logger.info(format("Application {params.llap_app_name} was already stopped on Slider"))
else:
raise Fail(format("Could not stop application {params.llap_app_name} on Slider. {error}\n{output}"))
# Will exit with code 4 if need to run with "--force" to delete directories and registries.
Execute(('slider', 'destroy', params.llap_app_name, "--force"),
user=params.hive_user,
timeout=30,
ignore_failures=True,
)
"""
Controls the start of LLAP.
"""
def _llap_start(self, env, cleanup=False):
import params
env.set_params(params)
if params.hive_server_interactive_ha:
"""
Check llap app state
"""
Logger.info("HSI HA is enabled. Checking if LLAP is already running ...")
if params.stack_supports_hive_interactive_ga:
status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, 2, params.hive_server_interactive_ha)
else:
status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, 2, params.hive_server_interactive_ha)
if status:
Logger.info("LLAP app '{0}' is already running.".format(params.llap_app_name))
return True
else:
Logger.info("LLAP app '{0}' is not running. llap will be started.".format(params.llap_app_name))
pass
# Call for cleaning up the earlier run(s) LLAP package folders.
self._cleanup_past_llap_package_dirs()
Logger.info("Starting LLAP")
LLAP_PACKAGE_CREATION_PATH = Script.get_tmp_dir()
unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
cmd = format("/usr/lib/hive/bin/hive --service llap --slider-am-container-mb {params.slider_am_container_mb} "
"--size 3072m --cache 1024m --xmx {params.llap_heap_size}m "
"--loglevel {params.llap_log_level} {params.llap_extra_slider_opts} --output {LLAP_PACKAGE_CREATION_PATH}/{unique_name}")
# Append params that are supported from Hive llap GA version.
if params.stack_supports_hive_interactive_ga:
# Figure out the Slider Anti-affinity to be used.
# YARN does not support anti-affinity, and therefore Slider implements AA by the means of exclusion lists, i.e, it
# starts containers one by one and excludes the nodes it gets (adding a delay of ~2sec./machine). When the LLAP
# container memory size configuration is more than half of YARN node memory, AA is implicit and should be avoided.
slider_placement = 4
if long(params.llap_daemon_container_size) > (0.5 * long(params.yarn_nm_mem)):
slider_placement = 0
Logger.info("Setting slider_placement : 0, as llap_daemon_container_size : {0} > 0.5 * "
"YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem))
else:
Logger.info("Setting slider_placement: 4, as llap_daemon_container_size : {0} <= 0.5 * "
"YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem))
cmd += format(" --slider-placement {slider_placement} --skiphadoopversion --skiphbasecp --instances {params.num_llap_daemon_running_nodes}")
# Setup the logger for the ga version only
cmd += format(" --logger {params.llap_logger}")
else:
cmd += format(" --instances {params.num_llap_nodes}")
if params.security_enabled:
llap_keytab_splits = params.hive_llap_keytab_file.split("/")
Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits))
cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
"{llap_keytab_splits[4]} --slider-principal {params.hive_llap_principal}")
# Add the aux jars if they are specified. If empty, dont need to add this param.
if params.hive_aux_jars:
cmd+= format(" --auxjars {params.hive_aux_jars}")
# Append args.
llap_java_args = InlineTemplate(params.llap_app_java_opts).get_content()
cmd += format(" --args \" {llap_java_args}\"")
# Append metaspace size to args.
if params.java_version > 7 and params.llap_daemon_container_size > 4096:
if params.llap_daemon_container_size <= 32768:
metaspaceSize = "256m"
else:
metaspaceSize = "1024m"
cmd = cmd[:-1] + " -XX:MetaspaceSize="+metaspaceSize+ "\""
run_file_path = None
try:
Logger.info(format("LLAP start command: {cmd}"))
code, output, error = shell.checked_call(cmd, user=params.hive_user, quiet = True, stderr=subprocess.PIPE, logoutput=True)
if code != 0 or output is None:
raise Fail("Command failed with either non-zero return code or no output.")
# E.g., output:
# Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider
exp = r".*Prepared (.*?run.sh) for running LLAP"
run_file_path = None
out_splits = output.split("\n")
for line in out_splits:
line = line.strip()
m = re.match(exp, line, re.I)
if m and len(m.groups()) == 1:
run_file_name = m.group(1)
#run_file_path = os.path.join(params.hive_user_home_dir, run_file_name)
run_file_path = run_file_name
break
if not run_file_path:
raise Fail("Did not find run.sh file in output: " + str(output))
Logger.info(format("Run file path: {run_file_path}"))
Execute(run_file_path, user=params.hive_user, logoutput=True)
Logger.info("Submitted LLAP app name : {0}".format(params.llap_app_name))
# We need to check the status of LLAP app to figure out it got
# launched properly and is in running state. Then go ahead with Hive Interactive Server start.
if params.stack_supports_hive_interactive_ga:
status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, params.num_retries_for_checking_llap_status)
else:
status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, params.num_retries_for_checking_llap_status)
if status:
Logger.info("LLAP app '{0}' deployed successfully.".format(params.llap_app_name))
return True
else:
Logger.error("LLAP app '{0}' deployment unsuccessful.".format(params.llap_app_name))
return False
except:
# Attempt to clean up the packaged application, or potentially rename it with a .bak
if run_file_path is not None and cleanup:
parent_dir = os.path.dirname(run_file_path)
Directory(parent_dir,
action = "delete",
ignore_failures = True,
)
# throw the original exception
raise
"""
Checks and deletes previous run 'LLAP package' folders, ignoring three latest packages.
Last three are are ignore for debugging/reference purposes.
Helps in keeping check on disk space used.
"""
def _cleanup_past_llap_package_dirs(self):
try:
import params
Logger.info("Determining previous run 'LLAP package' folder(s) to be deleted ....")
llap_package_folder_name_prefix = "llap-slider" # Package name is like : llap-sliderYYYY-MM-DD-HH:MM:SS
num_folders_to_retain = 3 # Hardcoding it as of now, as no considerable use was found to provide an env param.
file_names = [dir_name for dir_name in os.listdir(Script.get_tmp_dir())
if dir_name.startswith(llap_package_folder_name_prefix)]
file_names.sort()
del file_names[-num_folders_to_retain:] # Ignore 'num_folders_to_retain' latest package folders.
Logger.info("Previous run 'LLAP package' folder(s) to be deleted = {0}".format(file_names))
if file_names:
for path in file_names:
abs_path = Script.get_tmp_dir()+"/"+path
Directory(abs_path,
action = "delete",
ignore_failures = True
)
else:
Logger.info("No '{0}*' folder deleted.".format(llap_package_folder_name_prefix))
except:
Logger.exception("Exception while doing cleanup for past 'LLAP package(s)':")
"""
Does kinit and copies keytab for Hive/LLAP to HDFS.
"""
def setup_security(self):
import params
self.do_kinit()
# Copy params.hive_llap_keytab_file to hdfs://<host>:<port>/user/<hive_user>/.slider/keytabs/<hive_user> , required by LLAP
slider_keytab_install_cmd = format("slider install-keytab --keytab {params.hive_llap_keytab_file} --folder {params.hive_user} --overwrite")
Execute(slider_keytab_install_cmd, user=params.hive_user)
def do_kinit(self):
import params
hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ")
Execute(hive_interactive_kinit_cmd, user=params.hive_user)
llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ")
Execute(llap_kinit_cmd, user=params.hive_user)
"""
Get llap app status data for LLAP Tech Preview code base.
"""
def _get_llap_app_status_info_in_llap_tp(self, app_name):
import status_params
LLAP_APP_STATUS_CMD_TIMEOUT = 0
llap_status_cmd = format("/usr/lib/hive/bin/hive --service llapstatus --name {app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, stderr=subprocess.PIPE,
logoutput=False)
Logger.info("Received 'llapstatus' command 'output' : {0}".format(output))
if code == 0:
return self._make_valid_json(output)
else:
Logger.info("'LLAP status command' output : ", output)
Logger.info("'LLAP status command' error : ", error)
Logger.info("'LLAP status command' exit code : ", code)
raise Fail("Error getting LLAP app status. ")
"""
Get llap app status data for LLAP GA code base.
Parameters: 'percent_desired_instances_to_be_up' : A value b/w 0.0 and 1.0.
'total_timeout' : Total wait time while checking the status via llapstatus command
'refresh_rate' : Frequency of polling for llapstatus.
"""
def _get_llap_app_status_info_in_llap_ga(self, percent_desired_instances_to_be_up, total_timeout, refresh_rate):
import status_params
# llapstatus comamnd : llapstatus -w -r <percent containers to wait for to be Up> -i <refresh_rate> -t <total timeout for this comand>
# -w : Watch mode waits until all LLAP daemons are running or subset of the nodes are running (threshold can be specified via -r option) (Default wait until all nodes are running)
# -r : When watch mode is enabled (-w), wait until the specified threshold of nodes are running (Default 1.0 which means 100% nodes are running)
# -i : Amount of time in seconds to wait until subsequent status checks in watch mode (Default: 1sec)
# -t : Exit watch mode if the desired state is not attained until the specified timeout (Default: 300sec)
#
# example : llapstatus -w -r 0.8 -i 2 -t 150
llap_status_cmd = format("/usr/lib/hive/bin/hive --service llapstatus -w -r {percent_desired_instances_to_be_up} -i {refresh_rate} -t {total_timeout}")
Logger.info("\n\n\n\n\n");
Logger.info("LLAP status command : {0}".format(llap_status_cmd))
code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, quiet=True, stderr=subprocess.PIPE,
logoutput=True)
if code == 0:
return self._make_valid_json(output)
else:
Logger.info("'LLAP status command' output : ", output)
Logger.info("'LLAP status command' error : ", error)
Logger.info("'LLAP status command' exit code : ", code)
raise Fail("Error getting LLAP app status. ")
"""
Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
to JSON converter.
"""
def _make_valid_json(self, output):
'''
Note: It is assumed right now that extra lines will be only at the start and not at the end.
Sample expected JSON to be passed for 'loads' is either of the form :
Case 'A':
{
"amInfo" : {
"appName" : "llap0",
"appType" : "org-apache-slider",
"appId" : "APP1",
"containerId" : "container_1466036628595_0010_01_000001",
"hostname" : "hostName",
"amWebUrl" : "http://hostName:port/"
},
"state" : "LAUNCHING",
....
"desiredInstances" : 1,
"liveInstances" : 0,
....
....
}
or
Case 'B':
{
"state" : "APP_NOT_FOUND"
}
'''
splits = output.split("\n")
len_splits = len(splits)
if (len_splits < 3):
raise Fail ("Malformed JSON data received from 'llapstatus' command. Exiting ....")
marker_idx = None # To detect where from to start reading for JSON data
for idx, split in enumerate(splits):
curr_elem = split.strip()
if idx+2 > len_splits:
raise Fail("Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
next_elem = (splits[(idx + 1)]).strip()
if curr_elem == "{":
if next_elem == "\"amInfo\" : {" and (splits[len_splits-1]).strip() == '}':
# For Case 'A'
marker_idx = idx
break;
elif idx+3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
# For Case 'B'
marker_idx = idx
break;
# Remove extra logging from possible JSON output
if marker_idx is None:
raise Fail("Couldn't validate the received output for JSON parsing.")
else:
if marker_idx != 0:
del splits[0:marker_idx]
scanned_output = '\n'.join(splits)
llap_app_info = json.loads(scanned_output)
return llap_app_info
"""
Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'.
if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state:
we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL'
state with 80% or more 'desiredInstances' running and Return True
else :
Return False
Parameters: llap_app_name : deployed llap app name.
num_retries : Number of retries to check the LLAP app status.
"""
def check_llap_app_status_in_llap_tp(self, llap_app_name, num_retries, return_immediately_if_stopped=False):
curr_time = time.time()
num_retries = int(num_retries)
if num_retries <= 0:
Logger.info("Read 'num_retries' as : {0}. Setting it to : {1}".format(num_retries, 2))
num_retries = 2
if num_retries > 20000000000:
Logger.info("Read 'num_retries' as : {0}. Setting it to : {1}".format(num_retries, 1000))
num_retries = 1000
@retry(times=num_retries, sleep_time=2, err_class=Fail)
def do_retries():
llap_app_info = self._get_llap_app_status_info_in_llap_tp(llap_app_name)
return self._verify_llap_app_status(llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time)
try:
status = do_retries()
return status
except Exception, e:
Logger.info("LLAP app '{0}' did not come up after a wait of {1} seconds.".format(llap_app_name,
time.time() - curr_time))
traceback.print_exc()
return False
def check_llap_app_status_in_llap_ga(self, llap_app_name, num_retries, return_immediately_if_stopped=False):
curr_time = time.time()
total_timeout = int(num_retries) * 20; # Total wait time while checking the status via llapstatus command
Logger.debug("Calculated 'total_timeout' : {0} using config 'num_retries_for_checking_llap_status' : {1}".format(total_timeout, num_retries))
refresh_rate = 2 # Frequency of checking the llapstatus
percent_desired_instances_to_be_up = 80 # Out of 100.
llap_app_info = self._get_llap_app_status_info_in_llap_ga(percent_desired_instances_to_be_up/100.0, total_timeout, refresh_rate)
try:
return self._verify_llap_app_status(llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time)
except Exception as e:
Logger.info(e.message)
return False
def get_log_folder(self):
import params
return params.hive_log_dir
def get_user(self):
import params
return params.hive_user
def _verify_llap_app_status(self, llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time):
if llap_app_info is None or 'state' not in llap_app_info:
Logger.error("Malformed JSON data received for LLAP app. Exiting ....")
return False
# counters based on various states.
live_instances = 0
desired_instances = 0
percent_desired_instances_to_be_up = 80 # Used in 'RUNNING_PARTIAL' state.
if return_immediately_if_stopped and (llap_app_info['state'].upper() in ('APP_NOT_FOUND', 'COMPLETE')):
return False
if llap_app_info['state'].upper() == 'RUNNING_ALL':
Logger.info(
"LLAP app '{0}' in '{1}' state.".format(llap_app_name, llap_app_info['state']))
return True
elif llap_app_info['state'].upper() == 'RUNNING_PARTIAL':
# Check how many instances were up.
if 'liveInstances' in llap_app_info and 'desiredInstances' in llap_app_info:
live_instances = llap_app_info['liveInstances']
desired_instances = llap_app_info['desiredInstances']
else:
Logger.info(
"LLAP app '{0}' is in '{1}' state, but 'instances' information not available in JSON received. " \
"Exiting ....".format(llap_app_name, llap_app_info['state']))
Logger.info(llap_app_info)
return False
if desired_instances == 0:
Logger.info("LLAP app '{0}' desired instance are set to 0. Exiting ....".format(llap_app_name))
return False
percentInstancesUp = 0
if live_instances > 0:
percentInstancesUp = float(live_instances) / desired_instances * 100
if percentInstancesUp >= percent_desired_instances_to_be_up:
Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}' >= {3}% of Desired Instances : " \
"'{4}'.".format(llap_app_name, llap_app_info['state'],
llap_app_info['liveInstances'],
percent_desired_instances_to_be_up,
llap_app_info['desiredInstances']))
return True
else:
Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'. Desired Instances : " \
"'{3}' after {4} secs.".format(llap_app_name, llap_app_info['state'],
llap_app_info['liveInstances'],
llap_app_info['desiredInstances'],
time.time() - curr_time))
raise Fail("App state is RUNNING_PARTIAL. Live Instances : '{0}', Desired Instance : '{1}'".format(llap_app_info['liveInstances'],
llap_app_info['desiredInstances']))
elif llap_app_info['state'].upper() in ['APP_NOT_FOUND', 'LAUNCHING', 'COMPLETE']:
status_str = format("LLAP app '{0}' current state is {1}.".format(llap_app_name, llap_app_info['state']))
Logger.info(status_str)
raise Fail(status_str)
else: # Covers any unknown that we get.
Logger.info(
"LLAP app '{0}' current state is '{1}'. Expected : 'RUNNING'.".format(llap_app_name, llap_app_info['state']))
return False
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HiveServerInteractiveWindows(HiveServerInteractive):
def status(self, env):
pass
if __name__ == "__main__":
HiveServerInteractive().execute()
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Python Imports
import subprocess
import os
import re
import time
import shutil
from datetime import datetime
import json
# Ambari Commons & Resource Management imports
from resource_management.libraries.script.script import Script
from resource_management.libraries.functions import format
from resource_management.libraries.functions.check_process_status import check_process_status
from resource_management.core.source import InlineTemplate
from resource_management.core.resources.system import Execute, Directory
# Imports needed for Rolling/Express Upgrade
from resource_management.libraries.functions import StackFeature
from resource_management.libraries.functions.stack_features import check_stack_feature
from resource_management.libraries.functions import conf_select
from resource_management.libraries.functions import stack_select
from resource_management.libraries.functions.copy_tarball import copy_to_hdfs
from resource_management.core import shell
from resource_management.core.exceptions import Fail
from resource_management.core.logger import Logger
from ambari_commons import OSCheck, OSConst
from ambari_commons.os_family_impl import OsFamilyImpl
from resource_management.core.exceptions import ComponentIsNotRunning
from resource_management.libraries.functions.decorator import retry
from resource_management.libraries.functions.security_commons import build_expectations, \
cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \
FILE_TYPE_XML
# Local Imports
from setup_ranger_hive import setup_ranger_hive
from hive_service_interactive import hive_service_interactive
from hive_interactive import hive_interactive
from hive_server import HiveServerDefault
from setup_ranger_hive_interactive import setup_ranger_hive_interactive
import traceback
class HiveServerInteractive(Script):
pass
@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT)
class HiveServerInteractiveDefault(HiveServerInteractive):
def get_component_name(self):
return "hive-server2-hive2"
def install(self, env):
import params
self.install_packages(env)
def configure(self, env):
import params
env.set_params(params)
hive_interactive(name='hiveserver2')
def pre_upgrade_restart(self, env, upgrade_type=None):
Logger.info("Executing Hive Server Interactive Stack Upgrade pre-restart")
import params
env.set_params(params)
if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version):
stack_select.select("hive-server2-hive2", params.version)
conf_select.select(params.stack_name, "hive2", params.version)
# Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS
resource_created = copy_to_hdfs(
"hive2",
params.user_group,
params.hdfs_user,
skip=params.sysprep_skip_copy_tarballs_hdfs)
resource_created = copy_to_hdfs(
"tez_hive2",
params.user_group,
params.hdfs_user,
skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created
if resource_created:
params.HdfsResource(None, action="execute")
def start(self, env, upgrade_type=None):
import params
env.set_params(params)
self.configure(env)
if params.security_enabled:
# Do the security setup, internally calls do_kinit()
self.setup_security()
# TODO : We need have conditional [re]start of LLAP once "status check command" for LLAP is ready.
# Check status and based on that decide on [re]starting.
# Start LLAP before Hive Server Interactive start.
status = self._llap_start(env)
if not status:
raise Fail("Skipping START of Hive Server Interactive since LLAP app couldn't be STARTED.")
# TODO : test the workability of Ranger and Hive2 during upgrade
setup_ranger_hive_interactive(upgrade_type=upgrade_type)
hive_service_interactive('hiveserver2', action='start', upgrade_type=upgrade_type)
def stop(self, env, upgrade_type=None):
import params
env.set_params(params)
if params.security_enabled:
self.do_kinit()
# Stop Hive Interactive Server first
hive_service_interactive('hiveserver2', action='stop')
if not params.is_restart_command:
self._llap_stop(env)
else:
Logger.info("LLAP stop is skipped as its a restart command")
def status(self, env):
import status_params
env.set_params(status_params)
# We are not doing 'llap' status check done here as part of status check for 'HSI', as 'llap' status
# check is a heavy weight operation.
pid_file = format("{hive_pid_dir}/{hive_interactive_pid}")
# Recursively check all existing gmetad pid files
check_process_status(pid_file)
def security_status(self, env):
import status_params
env.set_params(status_params)
if status_params.security_enabled:
props_value_check = {"hive.server2.authentication": "KERBEROS",
"hive.metastore.sasl.enabled": "true",
"hive.security.authorization.enabled": "true"}
props_empty_check = ["hive.server2.authentication.kerberos.keytab",
"hive.server2.authentication.kerberos.principal",
"hive.server2.authentication.spnego.principal",
"hive.server2.authentication.spnego.keytab"]
props_read_check = ["hive.server2.authentication.kerberos.keytab",
"hive.server2.authentication.spnego.keytab"]
hive_site_props = build_expectations('hive-site', props_value_check, props_empty_check,
props_read_check)
hive_expectations ={}
hive_expectations.update(hive_site_props)
security_params = get_params_from_filesystem(status_params.hive_server_interactive_conf_dir,
{'hive-site.xml': FILE_TYPE_XML})
result_issues = validate_security_config_properties(security_params, hive_expectations)
if not result_issues: # If all validations passed successfully
try:
# Double check the dict before calling execute
if 'hive-site' not in security_params \
or 'hive.server2.authentication.kerberos.keytab' not in security_params['hive-site'] \
or 'hive.server2.authentication.kerberos.principal' not in security_params['hive-site'] \
or 'hive.server2.authentication.spnego.keytab' not in security_params['hive-site'] \
or 'hive.server2.authentication.spnego.principal' not in security_params['hive-site']:
self.put_structured_out({"securityState": "UNSECURED"})
self.put_structured_out({"securityIssuesFound": "Keytab file or principal are not set property."})
return
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
security_params['hive-site']['hive.server2.authentication.kerberos.keytab'],
security_params['hive-site']['hive.server2.authentication.kerberos.principal'],
status_params.hostname,
status_params.tmp_dir)
cached_kinit_executor(status_params.kinit_path_local,
status_params.hive_user,
security_params['hive-site']['hive.server2.authentication.spnego.keytab'],
security_params['hive-site']['hive.server2.authentication.spnego.principal'],
status_params.hostname,
status_params.tmp_dir)
self.put_structured_out({"securityState": "SECURED_KERBEROS"})
except Exception as e:
self.put_structured_out({"securityState": "ERROR"})
self.put_structured_out({"securityStateErrorInfo": str(e)})
else:
issues = []
for cf in result_issues:
issues.append("Configuration file %s did not pass the validation. Reason: %s" % (cf, result_issues[cf]))
self.put_structured_out({"securityIssuesFound": ". ".join(issues)})
self.put_structured_out({"securityState": "UNSECURED"})
else:
self.put_structured_out({"securityState": "UNSECURED"})
def restart_llap(self, env):
"""
Custom command to Restart LLAP
"""
Logger.info("Custom Command to retart LLAP")
import params
env.set_params(params)
if params.security_enabled:
self.do_kinit()
self._llap_stop(env)
self._llap_start(env)
def _llap_stop(self, env):
import params
Logger.info("Stopping LLAP")
stop_cmd = ["slider", "stop", params.llap_app_name]
code, output, error = shell.call(stop_cmd, user=params.hive_user, stderr=subprocess.PIPE, logoutput=True)
if code == 0:
Logger.info(format("Stopped {params.llap_app_name} application on Slider successfully"))
elif code == 69 and output is not None and "Unknown application instance" in output:
Logger.info(format("Application {params.llap_app_name} was already stopped on Slider"))
else:
raise Fail(format("Could not stop application {params.llap_app_name} on Slider. {error}\n{output}"))
# Will exit with code 4 if need to run with "--force" to delete directories and registries.
Execute(('slider', 'destroy', params.llap_app_name, "--force"),
user=params.hive_user,
timeout=30,
ignore_failures=True,
)
"""
Controls the start of LLAP.
"""
def _llap_start(self, env, cleanup=False):
import params
env.set_params(params)
if params.hive_server_interactive_ha:
"""
Check llap app state
"""
Logger.info("HSI HA is enabled. Checking if LLAP is already running ...")
if params.stack_supports_hive_interactive_ga:
status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, 2, params.hive_server_interactive_ha)
else:
status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, 2, params.hive_server_interactive_ha)
if status:
Logger.info("LLAP app '{0}' is already running.".format(params.llap_app_name))
return True
else:
Logger.info("LLAP app '{0}' is not running. llap will be started.".format(params.llap_app_name))
pass
# Call for cleaning up the earlier run(s) LLAP package folders.
self._cleanup_past_llap_package_dirs()
Logger.info("Starting LLAP")
LLAP_PACKAGE_CREATION_PATH = Script.get_tmp_dir()
unique_name = "llap-slider%s" % datetime.utcnow().strftime('%Y-%m-%d_%H-%M-%S')
cmd = format("/usr/lib/hive/bin/hive --service llap --slider-am-container-mb {params.slider_am_container_mb} "
"--size 3072m --cache 1024m --xmx {params.llap_heap_size}m "
"--loglevel {params.llap_log_level} {params.llap_extra_slider_opts} --output {LLAP_PACKAGE_CREATION_PATH}/{unique_name}")
# Append params that are supported from Hive llap GA version.
if params.stack_supports_hive_interactive_ga:
# Figure out the Slider Anti-affinity to be used.
# YARN does not support anti-affinity, and therefore Slider implements AA by the means of exclusion lists, i.e, it
# starts containers one by one and excludes the nodes it gets (adding a delay of ~2sec./machine). When the LLAP
# container memory size configuration is more than half of YARN node memory, AA is implicit and should be avoided.
slider_placement = 4
if long(params.llap_daemon_container_size) > (0.5 * long(params.yarn_nm_mem)):
slider_placement = 0
Logger.info("Setting slider_placement : 0, as llap_daemon_container_size : {0} > 0.5 * "
"YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem))
else:
Logger.info("Setting slider_placement: 4, as llap_daemon_container_size : {0} <= 0.5 * "
"YARN NodeManager Memory({1})".format(params.llap_daemon_container_size, params.yarn_nm_mem))
cmd += format(" --slider-placement {slider_placement} --skiphadoopversion --skiphbasecp --instances {params.num_llap_daemon_running_nodes}")
# Setup the logger for the ga version only
cmd += format(" --logger {params.llap_logger}")
else:
cmd += format(" --instances {params.num_llap_nodes}")
if params.security_enabled:
llap_keytab_splits = params.hive_llap_keytab_file.split("/")
Logger.debug("llap_keytab_splits : {0}".format(llap_keytab_splits))
cmd += format(" --slider-keytab-dir .slider/keytabs/{params.hive_user}/ --slider-keytab "
"{llap_keytab_splits[4]} --slider-principal {params.hive_llap_principal}")
# Add the aux jars if they are specified. If empty, dont need to add this param.
if params.hive_aux_jars:
cmd+= format(" --auxjars {params.hive_aux_jars}")
# Append args.
llap_java_args = InlineTemplate(params.llap_app_java_opts).get_content()
cmd += format(" --args \" {llap_java_args}\"")
# Append metaspace size to args.
if params.java_version > 7 and params.llap_daemon_container_size > 4096:
if params.llap_daemon_container_size <= 32768:
metaspaceSize = "256m"
else:
metaspaceSize = "1024m"
cmd = cmd[:-1] + " -XX:MetaspaceSize="+metaspaceSize+ "\""
run_file_path = None
try:
Logger.info(format("LLAP start command: {cmd}"))
code, output, error = shell.checked_call(cmd, user=params.hive_user, quiet = True, stderr=subprocess.PIPE, logoutput=True)
if code != 0 or output is None:
raise Fail("Command failed with either non-zero return code or no output.")
# E.g., output:
# Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider
exp = r".*Prepared (.*?run.sh) for running LLAP"
run_file_path = None
out_splits = output.split("\n")
for line in out_splits:
line = line.strip()
m = re.match(exp, line, re.I)
if m and len(m.groups()) == 1:
run_file_name = m.group(1)
#run_file_path = os.path.join(params.hive_user_home_dir, run_file_name)
run_file_path = run_file_name
break
if not run_file_path:
raise Fail("Did not find run.sh file in output: " + str(output))
Logger.info(format("Run file path: {run_file_path}"))
Execute(run_file_path, user=params.hive_user, logoutput=True)
Logger.info("Submitted LLAP app name : {0}".format(params.llap_app_name))
# We need to check the status of LLAP app to figure out it got
# launched properly and is in running state. Then go ahead with Hive Interactive Server start.
if params.stack_supports_hive_interactive_ga:
status = self.check_llap_app_status_in_llap_ga(params.llap_app_name, params.num_retries_for_checking_llap_status)
else:
status = self.check_llap_app_status_in_llap_tp(params.llap_app_name, params.num_retries_for_checking_llap_status)
if status:
Logger.info("LLAP app '{0}' deployed successfully.".format(params.llap_app_name))
return True
else:
Logger.error("LLAP app '{0}' deployment unsuccessful.".format(params.llap_app_name))
return False
except:
# Attempt to clean up the packaged application, or potentially rename it with a .bak
if run_file_path is not None and cleanup:
parent_dir = os.path.dirname(run_file_path)
Directory(parent_dir,
action = "delete",
ignore_failures = True,
)
# throw the original exception
raise
"""
Checks and deletes previous run 'LLAP package' folders, ignoring three latest packages.
Last three are are ignore for debugging/reference purposes.
Helps in keeping check on disk space used.
"""
def _cleanup_past_llap_package_dirs(self):
try:
import params
Logger.info("Determining previous run 'LLAP package' folder(s) to be deleted ....")
llap_package_folder_name_prefix = "llap-slider" # Package name is like : llap-sliderYYYY-MM-DD-HH:MM:SS
num_folders_to_retain = 3 # Hardcoding it as of now, as no considerable use was found to provide an env param.
file_names = [dir_name for dir_name in os.listdir(Script.get_tmp_dir())
if dir_name.startswith(llap_package_folder_name_prefix)]
file_names.sort()
del file_names[-num_folders_to_retain:] # Ignore 'num_folders_to_retain' latest package folders.
Logger.info("Previous run 'LLAP package' folder(s) to be deleted = {0}".format(file_names))
if file_names:
for path in file_names:
abs_path = Script.get_tmp_dir()+"/"+path
Directory(abs_path,
action = "delete",
ignore_failures = True
)
else:
Logger.info("No '{0}*' folder deleted.".format(llap_package_folder_name_prefix))
except:
Logger.exception("Exception while doing cleanup for past 'LLAP package(s)':")
"""
Does kinit and copies keytab for Hive/LLAP to HDFS.
"""
def setup_security(self):
import params
self.do_kinit()
# Copy params.hive_llap_keytab_file to hdfs://<host>:<port>/user/<hive_user>/.slider/keytabs/<hive_user> , required by LLAP
slider_keytab_install_cmd = format("slider install-keytab --keytab {params.hive_llap_keytab_file} --folder {params.hive_user} --overwrite")
Execute(slider_keytab_install_cmd, user=params.hive_user)
def do_kinit(self):
import params
hive_interactive_kinit_cmd = format("{kinit_path_local} -kt {params.hive_server2_keytab} {params.hive_principal}; ")
Execute(hive_interactive_kinit_cmd, user=params.hive_user)
llap_kinit_cmd = format("{kinit_path_local} -kt {params.hive_llap_keytab_file} {params.hive_llap_principal}; ")
Execute(llap_kinit_cmd, user=params.hive_user)
"""
Get llap app status data for LLAP Tech Preview code base.
"""
def _get_llap_app_status_info_in_llap_tp(self, app_name):
import status_params
LLAP_APP_STATUS_CMD_TIMEOUT = 0
llap_status_cmd = format("/usr/lib/hive/bin/hive --service llapstatus --name {app_name} --findAppTimeout {LLAP_APP_STATUS_CMD_TIMEOUT}")
code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, stderr=subprocess.PIPE,
logoutput=False)
Logger.info("Received 'llapstatus' command 'output' : {0}".format(output))
if code == 0:
return self._make_valid_json(output)
else:
Logger.info("'LLAP status command' output : ", output)
Logger.info("'LLAP status command' error : ", error)
Logger.info("'LLAP status command' exit code : ", code)
raise Fail("Error getting LLAP app status. ")
"""
Get llap app status data for LLAP GA code base.
Parameters: 'percent_desired_instances_to_be_up' : A value b/w 0.0 and 1.0.
'total_timeout' : Total wait time while checking the status via llapstatus command
'refresh_rate' : Frequency of polling for llapstatus.
"""
def _get_llap_app_status_info_in_llap_ga(self, percent_desired_instances_to_be_up, total_timeout, refresh_rate):
import status_params
# llapstatus comamnd : llapstatus -w -r <percent containers to wait for to be Up> -i <refresh_rate> -t <total timeout for this comand>
# -w : Watch mode waits until all LLAP daemons are running or subset of the nodes are running (threshold can be specified via -r option) (Default wait until all nodes are running)
# -r : When watch mode is enabled (-w), wait until the specified threshold of nodes are running (Default 1.0 which means 100% nodes are running)
# -i : Amount of time in seconds to wait until subsequent status checks in watch mode (Default: 1sec)
# -t : Exit watch mode if the desired state is not attained until the specified timeout (Default: 300sec)
#
# example : llapstatus -w -r 0.8 -i 2 -t 150
llap_status_cmd = format("/usr/lib/hive/bin/hive --service llapstatus -w -r {percent_desired_instances_to_be_up} -i {refresh_rate} -t {total_timeout}")
Logger.info("\n\n\n\n\n");
Logger.info("LLAP status command : {0}".format(llap_status_cmd))
code, output, error = shell.checked_call(llap_status_cmd, user=status_params.hive_user, quiet=True, stderr=subprocess.PIPE,
logoutput=True)
if code == 0:
return self._make_valid_json(output)
else:
Logger.info("'LLAP status command' output : ", output)
Logger.info("'LLAP status command' error : ", error)
Logger.info("'LLAP status command' exit code : ", code)
raise Fail("Error getting LLAP app status. ")
"""
Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in
to JSON converter.
"""
def _make_valid_json(self, output):
'''
Note: It is assumed right now that extra lines will be only at the start and not at the end.
Sample expected JSON to be passed for 'loads' is either of the form :
Case 'A':
{
"amInfo" : {
"appName" : "llap0",
"appType" : "org-apache-slider",
"appId" : "APP1",
"containerId" : "container_1466036628595_0010_01_000001",
"hostname" : "hostName",
"amWebUrl" : "http://hostName:port/"
},
"state" : "LAUNCHING",
....
"desiredInstances" : 1,
"liveInstances" : 0,
....
....
}
or
Case 'B':
{
"state" : "APP_NOT_FOUND"
}
'''
splits = output.split("\n")
len_splits = len(splits)
if (len_splits < 3):
raise Fail ("Malformed JSON data received from 'llapstatus' command. Exiting ....")
marker_idx = None # To detect where from to start reading for JSON data
for idx, split in enumerate(splits):
curr_elem = split.strip()
if idx+2 > len_splits:
raise Fail("Iterated over the received 'llapstatus' comamnd. Couldn't validate the received output for JSON parsing.")
next_elem = (splits[(idx + 1)]).strip()
if curr_elem == "{":
if next_elem == "\"amInfo\" : {" and (splits[len_splits-1]).strip() == '}':
# For Case 'A'
marker_idx = idx
break;
elif idx+3 == len_splits and next_elem.startswith('"state" : ') and (splits[idx + 2]).strip() == '}':
# For Case 'B'
marker_idx = idx
break;
# Remove extra logging from possible JSON output
if marker_idx is None:
raise Fail("Couldn't validate the received output for JSON parsing.")
else:
if marker_idx != 0:
del splits[0:marker_idx]
scanned_output = '\n'.join(splits)
llap_app_info = json.loads(scanned_output)
return llap_app_info
"""
Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'.
if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state:
we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL'
state with 80% or more 'desiredInstances' running and Return True
else :
Return False
Parameters: llap_app_name : deployed llap app name.
num_retries : Number of retries to check the LLAP app status.
"""
def check_llap_app_status_in_llap_tp(self, llap_app_name, num_retries, return_immediately_if_stopped=False):
curr_time = time.time()
num_retries = int(num_retries)
if num_retries <= 0:
Logger.info("Read 'num_retries' as : {0}. Setting it to : {1}".format(num_retries, 2))
num_retries = 2
if num_retries > 20000000000:
Logger.info("Read 'num_retries' as : {0}. Setting it to : {1}".format(num_retries, 1000))
num_retries = 1000
@retry(times=num_retries, sleep_time=2, err_class=Fail)
def do_retries():
llap_app_info = self._get_llap_app_status_info_in_llap_tp(llap_app_name)
return self._verify_llap_app_status(llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time)
try:
status = do_retries()
return status
except Exception, e:
Logger.info("LLAP app '{0}' did not come up after a wait of {1} seconds.".format(llap_app_name,
time.time() - curr_time))
traceback.print_exc()
return False
def check_llap_app_status_in_llap_ga(self, llap_app_name, num_retries, return_immediately_if_stopped=False):
curr_time = time.time()
total_timeout = int(num_retries) * 20; # Total wait time while checking the status via llapstatus command
Logger.debug("Calculated 'total_timeout' : {0} using config 'num_retries_for_checking_llap_status' : {1}".format(total_timeout, num_retries))
refresh_rate = 2 # Frequency of checking the llapstatus
percent_desired_instances_to_be_up = 80 # Out of 100.
llap_app_info = self._get_llap_app_status_info_in_llap_ga(percent_desired_instances_to_be_up/100.0, total_timeout, refresh_rate)
try:
return self._verify_llap_app_status(llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time)
except Exception as e:
Logger.info(e.message)
return False
def get_log_folder(self):
import params
return params.hive_log_dir
def get_user(self):
import params
return params.hive_user
def _verify_llap_app_status(self, llap_app_info, llap_app_name, return_immediately_if_stopped, curr_time):
if llap_app_info is None or 'state' not in llap_app_info:
Logger.error("Malformed JSON data received for LLAP app. Exiting ....")
return False
# counters based on various states.
live_instances = 0
desired_instances = 0
percent_desired_instances_to_be_up = 80 # Used in 'RUNNING_PARTIAL' state.
if return_immediately_if_stopped and (llap_app_info['state'].upper() in ('APP_NOT_FOUND', 'COMPLETE')):
return False
if llap_app_info['state'].upper() == 'RUNNING_ALL':
Logger.info(
"LLAP app '{0}' in '{1}' state.".format(llap_app_name, llap_app_info['state']))
return True
elif llap_app_info['state'].upper() == 'RUNNING_PARTIAL':
# Check how many instances were up.
if 'liveInstances' in llap_app_info and 'desiredInstances' in llap_app_info:
live_instances = llap_app_info['liveInstances']
desired_instances = llap_app_info['desiredInstances']
else:
Logger.info(
"LLAP app '{0}' is in '{1}' state, but 'instances' information not available in JSON received. " \
"Exiting ....".format(llap_app_name, llap_app_info['state']))
Logger.info(llap_app_info)
return False
if desired_instances == 0:
Logger.info("LLAP app '{0}' desired instance are set to 0. Exiting ....".format(llap_app_name))
return False
percentInstancesUp = 0
if live_instances > 0:
percentInstancesUp = float(live_instances) / desired_instances * 100
if percentInstancesUp >= percent_desired_instances_to_be_up:
Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}' >= {3}% of Desired Instances : " \
"'{4}'.".format(llap_app_name, llap_app_info['state'],
llap_app_info['liveInstances'],
percent_desired_instances_to_be_up,
llap_app_info['desiredInstances']))
return True
else:
Logger.info("LLAP app '{0}' in '{1}' state. Live Instances : '{2}'. Desired Instances : " \
"'{3}' after {4} secs.".format(llap_app_name, llap_app_info['state'],
llap_app_info['liveInstances'],
llap_app_info['desiredInstances'],
time.time() - curr_time))
raise Fail("App state is RUNNING_PARTIAL. Live Instances : '{0}', Desired Instance : '{1}'".format(llap_app_info['liveInstances'],
llap_app_info['desiredInstances']))
elif llap_app_info['state'].upper() in ['APP_NOT_FOUND', 'LAUNCHING', 'COMPLETE']:
status_str = format("LLAP app '{0}' current state is {1}.".format(llap_app_name, llap_app_info['state']))
Logger.info(status_str)
raise Fail(status_str)
else: # Covers any unknown that we get.
Logger.info(
"LLAP app '{0}' current state is '{1}'. Expected : 'RUNNING'.".format(llap_app_name, llap_app_info['state']))
return False
@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY)
class HiveServerInteractiveWindows(HiveServerInteractive):
def status(self, env):
pass
if __name__ == "__main__":
HiveServerInteractive().execute()
|
en
| 0.779058
|
#!/usr/bin/env python Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # Python Imports # Ambari Commons & Resource Management imports # Imports needed for Rolling/Express Upgrade # Local Imports # Copy hive.tar.gz and tez.tar.gz used by Hive Interactive to HDFS # Do the security setup, internally calls do_kinit() # TODO : We need have conditional [re]start of LLAP once "status check command" for LLAP is ready. # Check status and based on that decide on [re]starting. # Start LLAP before Hive Server Interactive start. # TODO : test the workability of Ranger and Hive2 during upgrade # Stop Hive Interactive Server first # We are not doing 'llap' status check done here as part of status check for 'HSI', as 'llap' status # check is a heavy weight operation. # Recursively check all existing gmetad pid files # If all validations passed successfully # Double check the dict before calling execute Custom command to Restart LLAP # Will exit with code 4 if need to run with "--force" to delete directories and registries. Controls the start of LLAP. Check llap app state # Call for cleaning up the earlier run(s) LLAP package folders. # Append params that are supported from Hive llap GA version. # Figure out the Slider Anti-affinity to be used. # YARN does not support anti-affinity, and therefore Slider implements AA by the means of exclusion lists, i.e, it # starts containers one by one and excludes the nodes it gets (adding a delay of ~2sec./machine). When the LLAP # container memory size configuration is more than half of YARN node memory, AA is implicit and should be avoided. # Setup the logger for the ga version only # Add the aux jars if they are specified. If empty, dont need to add this param. # Append args. # Append metaspace size to args. # E.g., output: # Prepared llap-slider-05Apr2016/run.sh for running LLAP on Slider #run_file_path = os.path.join(params.hive_user_home_dir, run_file_name) # We need to check the status of LLAP app to figure out it got # launched properly and is in running state. Then go ahead with Hive Interactive Server start. # Attempt to clean up the packaged application, or potentially rename it with a .bak # throw the original exception Checks and deletes previous run 'LLAP package' folders, ignoring three latest packages. Last three are are ignore for debugging/reference purposes. Helps in keeping check on disk space used. # Package name is like : llap-sliderYYYY-MM-DD-HH:MM:SS # Hardcoding it as of now, as no considerable use was found to provide an env param. # Ignore 'num_folders_to_retain' latest package folders. Does kinit and copies keytab for Hive/LLAP to HDFS. # Copy params.hive_llap_keytab_file to hdfs://<host>:<port>/user/<hive_user>/.slider/keytabs/<hive_user> , required by LLAP Get llap app status data for LLAP Tech Preview code base. Get llap app status data for LLAP GA code base. Parameters: 'percent_desired_instances_to_be_up' : A value b/w 0.0 and 1.0. 'total_timeout' : Total wait time while checking the status via llapstatus command 'refresh_rate' : Frequency of polling for llapstatus. # llapstatus comamnd : llapstatus -w -r <percent containers to wait for to be Up> -i <refresh_rate> -t <total timeout for this comand> # -w : Watch mode waits until all LLAP daemons are running or subset of the nodes are running (threshold can be specified via -r option) (Default wait until all nodes are running) # -r : When watch mode is enabled (-w), wait until the specified threshold of nodes are running (Default 1.0 which means 100% nodes are running) # -i : Amount of time in seconds to wait until subsequent status checks in watch mode (Default: 1sec) # -t : Exit watch mode if the desired state is not attained until the specified timeout (Default: 300sec) # # example : llapstatus -w -r 0.8 -i 2 -t 150 Remove extra lines from 'llapstatus' status output (eg: because of MOTD logging) so as to have a valid JSON data to be passed in to JSON converter. Note: It is assumed right now that extra lines will be only at the start and not at the end. Sample expected JSON to be passed for 'loads' is either of the form : Case 'A': { "amInfo" : { "appName" : "llap0", "appType" : "org-apache-slider", "appId" : "APP1", "containerId" : "container_1466036628595_0010_01_000001", "hostname" : "hostName", "amWebUrl" : "http://hostName:port/" }, "state" : "LAUNCHING", .... "desiredInstances" : 1, "liveInstances" : 0, .... .... } or Case 'B': { "state" : "APP_NOT_FOUND" } # To detect where from to start reading for JSON data # For Case 'A' # For Case 'B' # Remove extra logging from possible JSON output Checks llap app status. The states can be : 'COMPLETE', 'APP_NOT_FOUND', 'RUNNING_PARTIAL', 'RUNNING_ALL' & 'LAUNCHING'. if app is in 'APP_NOT_FOUND', 'RUNNING_PARTIAL' and 'LAUNCHING' state: we wait for 'num_times_to_wait' to have app in (1). 'RUNNING_ALL' or (2). 'RUNNING_PARTIAL' state with 80% or more 'desiredInstances' running and Return True else : Return False Parameters: llap_app_name : deployed llap app name. num_retries : Number of retries to check the LLAP app status. # Total wait time while checking the status via llapstatus command # Frequency of checking the llapstatus # Out of 100. # counters based on various states. # Used in 'RUNNING_PARTIAL' state. # Check how many instances were up. # Covers any unknown that we get.
| 1.327337
| 1
|
rayml/pipelines/components/estimators/classifiers/vowpal_wabbit_classifiers.py
|
gcode-ai/rayml
| 0
|
6627150
|
"""Vowpal Wabbit Classifiers."""
from abc import abstractmethod
from skopt.space import Integer, Real
from rayml.model_family import ModelFamily
from rayml.pipelines.components.estimators import Estimator
from rayml.problem_types import ProblemTypes
from rayml.utils.gen_utils import import_or_raise
class VowpalWabbitBaseClassifier(Estimator):
"""Vowpal Wabbit Base Classifier.
Args:
loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic".
learning_rate (float): Boosting learning rate. Defaults to 0.5.
decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0.
power_t (float): Power on learning rate decay. Defaults to 0.5.
passes (int): Number of training passes. Defaults to 1.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
hyperparameter_ranges = {
"loss_function": ["squared", "classic", "hinge", "logistic"],
"learning_rate": Real(0.0000001, 10),
"decay_learning_rate": Real(0.0000001, 1.0),
"power_t": Real(0.01, 1.0),
"passes": Integer(1, 10),
}
""""""
model_family = ModelFamily.VOWPAL_WABBIT
"""ModelFamily.VOWPAL_WABBIT"""
_vowpal_wabbit_component = None
def __init__(
self,
loss_function="logistic",
learning_rate=0.5,
decay_learning_rate=1.0,
power_t=0.5,
passes=1,
random_seed=0,
**kwargs,
):
parameters = {
"loss_function": loss_function,
"learning_rate": learning_rate,
"decay_learning_rate": decay_learning_rate,
"power_t": power_t,
"passes": passes,
}
parameters.update(kwargs)
vw_class = self._get_component_obj_class()
vw_classifier = vw_class(**parameters)
super().__init__(
parameters=parameters, component_obj=vw_classifier, random_seed=random_seed
)
@abstractmethod
def _get_component_obj_class(self):
"""Get the appropriate Vowpal Wabbit class."""
@property
def feature_importance(self):
"""Feature importance for Vowpal Wabbit classifiers. This is not implemented."""
raise NotImplementedError(
"Feature importance is not implemented for the Vowpal Wabbit classifiers."
)
class VowpalWabbitBinaryClassifier(VowpalWabbitBaseClassifier):
"""Vowpal Wabbit Binary Classifier.
Args:
loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic".
learning_rate (float): Boosting learning rate. Defaults to 0.5.
decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0.
power_t (float): Power on learning rate decay. Defaults to 0.5.
passes (int): Number of training passes. Defaults to 1.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Vowpal Wabbit Binary Classifier"
supported_problem_types = [
ProblemTypes.BINARY,
ProblemTypes.TIME_SERIES_BINARY,
]
"""[
ProblemTypes.BINARY,
ProblemTypes.TIME_SERIES_BINARY,
]"""
def _get_component_obj_class(self):
vw_error_msg = "Vowpal Wabbit is not installed. Please install using `pip install vowpalwabbit.`"
vw = import_or_raise("vowpalwabbit", error_msg=vw_error_msg)
vw_classifier = vw.sklearn_vw.VWClassifier
return vw_classifier
class VowpalWabbitMulticlassClassifier(VowpalWabbitBaseClassifier):
"""Vowpal Wabbit Multiclass Classifier.
Args:
loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic".
learning_rate (float): Boosting learning rate. Defaults to 0.5.
decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0.
power_t (float): Power on learning rate decay. Defaults to 0.5.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Vowpal Wabbit Multiclass Classifier"
supported_problem_types = [
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
def _get_component_obj_class(self):
vw_error_msg = "Vowpal Wabbit is not installed. Please install using `pip install vowpalwabbit.`"
vw = import_or_raise("vowpalwabbit.sklearn_vw", error_msg=vw_error_msg)
vw_classifier = vw.VWMultiClassifier
return vw_classifier
|
"""Vowpal Wabbit Classifiers."""
from abc import abstractmethod
from skopt.space import Integer, Real
from rayml.model_family import ModelFamily
from rayml.pipelines.components.estimators import Estimator
from rayml.problem_types import ProblemTypes
from rayml.utils.gen_utils import import_or_raise
class VowpalWabbitBaseClassifier(Estimator):
"""Vowpal Wabbit Base Classifier.
Args:
loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic".
learning_rate (float): Boosting learning rate. Defaults to 0.5.
decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0.
power_t (float): Power on learning rate decay. Defaults to 0.5.
passes (int): Number of training passes. Defaults to 1.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
hyperparameter_ranges = {
"loss_function": ["squared", "classic", "hinge", "logistic"],
"learning_rate": Real(0.0000001, 10),
"decay_learning_rate": Real(0.0000001, 1.0),
"power_t": Real(0.01, 1.0),
"passes": Integer(1, 10),
}
""""""
model_family = ModelFamily.VOWPAL_WABBIT
"""ModelFamily.VOWPAL_WABBIT"""
_vowpal_wabbit_component = None
def __init__(
self,
loss_function="logistic",
learning_rate=0.5,
decay_learning_rate=1.0,
power_t=0.5,
passes=1,
random_seed=0,
**kwargs,
):
parameters = {
"loss_function": loss_function,
"learning_rate": learning_rate,
"decay_learning_rate": decay_learning_rate,
"power_t": power_t,
"passes": passes,
}
parameters.update(kwargs)
vw_class = self._get_component_obj_class()
vw_classifier = vw_class(**parameters)
super().__init__(
parameters=parameters, component_obj=vw_classifier, random_seed=random_seed
)
@abstractmethod
def _get_component_obj_class(self):
"""Get the appropriate Vowpal Wabbit class."""
@property
def feature_importance(self):
"""Feature importance for Vowpal Wabbit classifiers. This is not implemented."""
raise NotImplementedError(
"Feature importance is not implemented for the Vowpal Wabbit classifiers."
)
class VowpalWabbitBinaryClassifier(VowpalWabbitBaseClassifier):
"""Vowpal Wabbit Binary Classifier.
Args:
loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic".
learning_rate (float): Boosting learning rate. Defaults to 0.5.
decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0.
power_t (float): Power on learning rate decay. Defaults to 0.5.
passes (int): Number of training passes. Defaults to 1.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Vowpal Wabbit Binary Classifier"
supported_problem_types = [
ProblemTypes.BINARY,
ProblemTypes.TIME_SERIES_BINARY,
]
"""[
ProblemTypes.BINARY,
ProblemTypes.TIME_SERIES_BINARY,
]"""
def _get_component_obj_class(self):
vw_error_msg = "Vowpal Wabbit is not installed. Please install using `pip install vowpalwabbit.`"
vw = import_or_raise("vowpalwabbit", error_msg=vw_error_msg)
vw_classifier = vw.sklearn_vw.VWClassifier
return vw_classifier
class VowpalWabbitMulticlassClassifier(VowpalWabbitBaseClassifier):
"""Vowpal Wabbit Multiclass Classifier.
Args:
loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic".
learning_rate (float): Boosting learning rate. Defaults to 0.5.
decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0.
power_t (float): Power on learning rate decay. Defaults to 0.5.
random_seed (int): Seed for the random number generator. Defaults to 0.
"""
name = "Vowpal Wabbit Multiclass Classifier"
supported_problem_types = [
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_MULTICLASS,
]
"""[
ProblemTypes.MULTICLASS,
ProblemTypes.TIME_SERIES_MULTICLASS,
]"""
def _get_component_obj_class(self):
vw_error_msg = "Vowpal Wabbit is not installed. Please install using `pip install vowpalwabbit.`"
vw = import_or_raise("vowpalwabbit.sklearn_vw", error_msg=vw_error_msg)
vw_classifier = vw.VWMultiClassifier
return vw_classifier
|
en
| 0.647025
|
Vowpal Wabbit Classifiers. Vowpal Wabbit Base Classifier. Args: loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic". learning_rate (float): Boosting learning rate. Defaults to 0.5. decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0. power_t (float): Power on learning rate decay. Defaults to 0.5. passes (int): Number of training passes. Defaults to 1. random_seed (int): Seed for the random number generator. Defaults to 0. ModelFamily.VOWPAL_WABBIT Get the appropriate Vowpal Wabbit class. Feature importance for Vowpal Wabbit classifiers. This is not implemented. Vowpal Wabbit Binary Classifier. Args: loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic". learning_rate (float): Boosting learning rate. Defaults to 0.5. decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0. power_t (float): Power on learning rate decay. Defaults to 0.5. passes (int): Number of training passes. Defaults to 1. random_seed (int): Seed for the random number generator. Defaults to 0. [ ProblemTypes.BINARY, ProblemTypes.TIME_SERIES_BINARY, ] Vowpal Wabbit Multiclass Classifier. Args: loss_function (str): Specifies the loss function to use. One of {"squared", "classic", "hinge", "logistic", "quantile"}. Defaults to "logistic". learning_rate (float): Boosting learning rate. Defaults to 0.5. decay_learning_rate (float): Decay factor for learning_rate. Defaults to 1.0. power_t (float): Power on learning rate decay. Defaults to 0.5. random_seed (int): Seed for the random number generator. Defaults to 0. [ ProblemTypes.MULTICLASS, ProblemTypes.TIME_SERIES_MULTICLASS, ]
| 2.48788
| 2
|